Linux Audio

Check our new training course

Loading...
v5.9
   1/*
   2 * Copyright 2016 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
  23
  24#include <linux/firmware.h>
  25
  26#include "amdgpu.h"
  27#include "amdgpu_uvd.h"
  28#include "soc15.h"
  29#include "soc15d.h"
  30#include "soc15_common.h"
  31#include "mmsch_v1_0.h"
  32
  33#include "uvd/uvd_7_0_offset.h"
  34#include "uvd/uvd_7_0_sh_mask.h"
  35#include "vce/vce_4_0_offset.h"
  36#include "vce/vce_4_0_default.h"
  37#include "vce/vce_4_0_sh_mask.h"
  38#include "nbif/nbif_6_1_offset.h"
  39#include "hdp/hdp_4_0_offset.h"
  40#include "mmhub/mmhub_1_0_offset.h"
  41#include "mmhub/mmhub_1_0_sh_mask.h"
  42#include "ivsrcid/uvd/irqsrcs_uvd_7_0.h"
  43
  44#define mmUVD_PG0_CC_UVD_HARVESTING                                                                    0x00c7
  45#define mmUVD_PG0_CC_UVD_HARVESTING_BASE_IDX                                                           1
  46//UVD_PG0_CC_UVD_HARVESTING
  47#define UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE__SHIFT                                                         0x1
  48#define UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE_MASK                                                           0x00000002L
  49
  50#define UVD7_MAX_HW_INSTANCES_VEGA20			2
  51
  52static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev);
  53static void uvd_v7_0_set_enc_ring_funcs(struct amdgpu_device *adev);
  54static void uvd_v7_0_set_irq_funcs(struct amdgpu_device *adev);
  55static int uvd_v7_0_start(struct amdgpu_device *adev);
  56static void uvd_v7_0_stop(struct amdgpu_device *adev);
  57static int uvd_v7_0_sriov_start(struct amdgpu_device *adev);
  58
  59static int amdgpu_ih_clientid_uvds[] = {
  60	SOC15_IH_CLIENTID_UVD,
  61	SOC15_IH_CLIENTID_UVD1
  62};
  63
  64/**
  65 * uvd_v7_0_ring_get_rptr - get read pointer
  66 *
  67 * @ring: amdgpu_ring pointer
  68 *
  69 * Returns the current hardware read pointer
  70 */
  71static uint64_t uvd_v7_0_ring_get_rptr(struct amdgpu_ring *ring)
  72{
  73	struct amdgpu_device *adev = ring->adev;
  74
  75	return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_RPTR);
  76}
  77
  78/**
  79 * uvd_v7_0_enc_ring_get_rptr - get enc read pointer
  80 *
  81 * @ring: amdgpu_ring pointer
  82 *
  83 * Returns the current hardware enc read pointer
  84 */
  85static uint64_t uvd_v7_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
  86{
  87	struct amdgpu_device *adev = ring->adev;
  88
  89	if (ring == &adev->uvd.inst[ring->me].ring_enc[0])
  90		return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR);
  91	else
  92		return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR2);
  93}
  94
  95/**
  96 * uvd_v7_0_ring_get_wptr - get write pointer
  97 *
  98 * @ring: amdgpu_ring pointer
  99 *
 100 * Returns the current hardware write pointer
 101 */
 102static uint64_t uvd_v7_0_ring_get_wptr(struct amdgpu_ring *ring)
 103{
 104	struct amdgpu_device *adev = ring->adev;
 105
 106	return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR);
 107}
 108
 109/**
 110 * uvd_v7_0_enc_ring_get_wptr - get enc write pointer
 111 *
 112 * @ring: amdgpu_ring pointer
 113 *
 114 * Returns the current hardware enc write pointer
 115 */
 116static uint64_t uvd_v7_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
 117{
 118	struct amdgpu_device *adev = ring->adev;
 119
 120	if (ring->use_doorbell)
 121		return adev->wb.wb[ring->wptr_offs];
 122
 123	if (ring == &adev->uvd.inst[ring->me].ring_enc[0])
 124		return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR);
 125	else
 126		return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2);
 127}
 128
 129/**
 130 * uvd_v7_0_ring_set_wptr - set write pointer
 131 *
 132 * @ring: amdgpu_ring pointer
 133 *
 134 * Commits the write pointer to the hardware
 135 */
 136static void uvd_v7_0_ring_set_wptr(struct amdgpu_ring *ring)
 137{
 138	struct amdgpu_device *adev = ring->adev;
 139
 140	WREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
 141}
 142
 143/**
 144 * uvd_v7_0_enc_ring_set_wptr - set enc write pointer
 145 *
 146 * @ring: amdgpu_ring pointer
 147 *
 148 * Commits the enc write pointer to the hardware
 149 */
 150static void uvd_v7_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
 151{
 152	struct amdgpu_device *adev = ring->adev;
 153
 154	if (ring->use_doorbell) {
 155		/* XXX check if swapping is necessary on BE */
 156		adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
 157		WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
 158		return;
 159	}
 160
 161	if (ring == &adev->uvd.inst[ring->me].ring_enc[0])
 162		WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR,
 163			lower_32_bits(ring->wptr));
 164	else
 165		WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2,
 166			lower_32_bits(ring->wptr));
 167}
 168
 169/**
 170 * uvd_v7_0_enc_ring_test_ring - test if UVD ENC ring is working
 171 *
 172 * @ring: the engine to test on
 173 *
 174 */
 175static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring)
 176{
 177	struct amdgpu_device *adev = ring->adev;
 178	uint32_t rptr;
 179	unsigned i;
 180	int r;
 181
 182	if (amdgpu_sriov_vf(adev))
 183		return 0;
 184
 185	r = amdgpu_ring_alloc(ring, 16);
 186	if (r)
 187		return r;
 188
 189	rptr = amdgpu_ring_get_rptr(ring);
 190
 191	amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
 192	amdgpu_ring_commit(ring);
 193
 194	for (i = 0; i < adev->usec_timeout; i++) {
 195		if (amdgpu_ring_get_rptr(ring) != rptr)
 196			break;
 197		udelay(1);
 198	}
 199
 200	if (i >= adev->usec_timeout)
 201		r = -ETIMEDOUT;
 202
 203	return r;
 204}
 205
 206/**
 207 * uvd_v7_0_enc_get_create_msg - generate a UVD ENC create msg
 208 *
 209 * @adev: amdgpu_device pointer
 210 * @ring: ring we should submit the msg to
 211 * @handle: session handle to use
 
 212 * @fence: optional fence to return
 213 *
 214 * Open up a stream for HW test
 215 */
 216static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
 217				       struct amdgpu_bo *bo,
 218				       struct dma_fence **fence)
 219{
 220	const unsigned ib_size_dw = 16;
 221	struct amdgpu_job *job;
 222	struct amdgpu_ib *ib;
 223	struct dma_fence *f = NULL;
 224	uint64_t addr;
 225	int i, r;
 226
 227	r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
 228					AMDGPU_IB_POOL_DIRECT, &job);
 229	if (r)
 230		return r;
 231
 232	ib = &job->ibs[0];
 233	addr = amdgpu_bo_gpu_offset(bo);
 234
 235	ib->length_dw = 0;
 236	ib->ptr[ib->length_dw++] = 0x00000018;
 237	ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
 238	ib->ptr[ib->length_dw++] = handle;
 239	ib->ptr[ib->length_dw++] = 0x00000000;
 240	ib->ptr[ib->length_dw++] = upper_32_bits(addr);
 241	ib->ptr[ib->length_dw++] = addr;
 242
 243	ib->ptr[ib->length_dw++] = 0x00000014;
 244	ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
 245	ib->ptr[ib->length_dw++] = 0x0000001c;
 246	ib->ptr[ib->length_dw++] = 0x00000000;
 247	ib->ptr[ib->length_dw++] = 0x00000000;
 248
 249	ib->ptr[ib->length_dw++] = 0x00000008;
 250	ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */
 251
 252	for (i = ib->length_dw; i < ib_size_dw; ++i)
 253		ib->ptr[i] = 0x0;
 254
 255	r = amdgpu_job_submit_direct(job, ring, &f);
 256	if (r)
 257		goto err;
 258
 259	if (fence)
 260		*fence = dma_fence_get(f);
 261	dma_fence_put(f);
 262	return 0;
 263
 264err:
 265	amdgpu_job_free(job);
 266	return r;
 267}
 268
 269/**
 270 * uvd_v7_0_enc_get_destroy_msg - generate a UVD ENC destroy msg
 271 *
 272 * @adev: amdgpu_device pointer
 273 * @ring: ring we should submit the msg to
 274 * @handle: session handle to use
 
 275 * @fence: optional fence to return
 276 *
 277 * Close up a stream for HW test or if userspace failed to do so
 278 */
 279static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
 280					struct amdgpu_bo *bo,
 281					struct dma_fence **fence)
 282{
 283	const unsigned ib_size_dw = 16;
 284	struct amdgpu_job *job;
 285	struct amdgpu_ib *ib;
 286	struct dma_fence *f = NULL;
 287	uint64_t addr;
 288	int i, r;
 289
 290	r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
 291					AMDGPU_IB_POOL_DIRECT, &job);
 292	if (r)
 293		return r;
 294
 295	ib = &job->ibs[0];
 296	addr = amdgpu_bo_gpu_offset(bo);
 297
 298	ib->length_dw = 0;
 299	ib->ptr[ib->length_dw++] = 0x00000018;
 300	ib->ptr[ib->length_dw++] = 0x00000001;
 301	ib->ptr[ib->length_dw++] = handle;
 302	ib->ptr[ib->length_dw++] = 0x00000000;
 303	ib->ptr[ib->length_dw++] = upper_32_bits(addr);
 304	ib->ptr[ib->length_dw++] = addr;
 305
 306	ib->ptr[ib->length_dw++] = 0x00000014;
 307	ib->ptr[ib->length_dw++] = 0x00000002;
 308	ib->ptr[ib->length_dw++] = 0x0000001c;
 309	ib->ptr[ib->length_dw++] = 0x00000000;
 310	ib->ptr[ib->length_dw++] = 0x00000000;
 311
 312	ib->ptr[ib->length_dw++] = 0x00000008;
 313	ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */
 314
 315	for (i = ib->length_dw; i < ib_size_dw; ++i)
 316		ib->ptr[i] = 0x0;
 317
 318	r = amdgpu_job_submit_direct(job, ring, &f);
 319	if (r)
 320		goto err;
 321
 322	if (fence)
 323		*fence = dma_fence_get(f);
 324	dma_fence_put(f);
 325	return 0;
 326
 327err:
 328	amdgpu_job_free(job);
 329	return r;
 330}
 331
 332/**
 333 * uvd_v7_0_enc_ring_test_ib - test if UVD ENC IBs are working
 334 *
 335 * @ring: the engine to test on
 
 336 *
 337 */
 338static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 339{
 340	struct dma_fence *fence = NULL;
 341	struct amdgpu_bo *bo = NULL;
 342	long r;
 343
 344	r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE,
 345				      AMDGPU_GEM_DOMAIN_VRAM,
 346				      &bo, NULL, NULL);
 347	if (r)
 348		return r;
 349
 350	r = uvd_v7_0_enc_get_create_msg(ring, 1, bo, NULL);
 351	if (r)
 352		goto error;
 353
 354	r = uvd_v7_0_enc_get_destroy_msg(ring, 1, bo, &fence);
 355	if (r)
 356		goto error;
 357
 358	r = dma_fence_wait_timeout(fence, false, timeout);
 359	if (r == 0)
 360		r = -ETIMEDOUT;
 361	else if (r > 0)
 362		r = 0;
 363
 364error:
 365	dma_fence_put(fence);
 
 366	amdgpu_bo_unreserve(bo);
 367	amdgpu_bo_unref(&bo);
 368	return r;
 369}
 370
 371static int uvd_v7_0_early_init(void *handle)
 372{
 373	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 374
 375	if (adev->asic_type == CHIP_VEGA20) {
 376		u32 harvest;
 377		int i;
 378
 379		adev->uvd.num_uvd_inst = UVD7_MAX_HW_INSTANCES_VEGA20;
 380		for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
 381			harvest = RREG32_SOC15(UVD, i, mmUVD_PG0_CC_UVD_HARVESTING);
 382			if (harvest & UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE_MASK) {
 383				adev->uvd.harvest_config |= 1 << i;
 384			}
 385		}
 386		if (adev->uvd.harvest_config == (AMDGPU_UVD_HARVEST_UVD0 |
 387						 AMDGPU_UVD_HARVEST_UVD1))
 388			/* both instances are harvested, disable the block */
 389			return -ENOENT;
 390	} else {
 391		adev->uvd.num_uvd_inst = 1;
 392	}
 393
 394	if (amdgpu_sriov_vf(adev))
 395		adev->uvd.num_enc_rings = 1;
 396	else
 397		adev->uvd.num_enc_rings = 2;
 398	uvd_v7_0_set_ring_funcs(adev);
 399	uvd_v7_0_set_enc_ring_funcs(adev);
 400	uvd_v7_0_set_irq_funcs(adev);
 401
 402	return 0;
 403}
 404
 405static int uvd_v7_0_sw_init(void *handle)
 406{
 407	struct amdgpu_ring *ring;
 408
 409	int i, j, r;
 410	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 411
 412	for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
 413		if (adev->uvd.harvest_config & (1 << j))
 414			continue;
 415		/* UVD TRAP */
 416		r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_uvds[j], UVD_7_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->uvd.inst[j].irq);
 417		if (r)
 418			return r;
 419
 420		/* UVD ENC TRAP */
 421		for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
 422			r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_uvds[j], i + UVD_7_0__SRCID__UVD_ENC_GEN_PURP, &adev->uvd.inst[j].irq);
 423			if (r)
 424				return r;
 425		}
 426	}
 427
 428	r = amdgpu_uvd_sw_init(adev);
 429	if (r)
 430		return r;
 431
 432	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
 433		const struct common_firmware_header *hdr;
 434		hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
 435		adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].ucode_id = AMDGPU_UCODE_ID_UVD;
 436		adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].fw = adev->uvd.fw;
 437		adev->firmware.fw_size +=
 438			ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
 439
 440		if (adev->uvd.num_uvd_inst == UVD7_MAX_HW_INSTANCES_VEGA20) {
 441			adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].ucode_id = AMDGPU_UCODE_ID_UVD1;
 442			adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].fw = adev->uvd.fw;
 443			adev->firmware.fw_size +=
 444				ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
 445		}
 446		DRM_INFO("PSP loading UVD firmware\n");
 447	}
 448
 449	for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
 450		if (adev->uvd.harvest_config & (1 << j))
 451			continue;
 452		if (!amdgpu_sriov_vf(adev)) {
 453			ring = &adev->uvd.inst[j].ring;
 454			sprintf(ring->name, "uvd_%d", ring->me);
 455			r = amdgpu_ring_init(adev, ring, 512,
 456					     &adev->uvd.inst[j].irq, 0,
 457					     AMDGPU_RING_PRIO_DEFAULT);
 458			if (r)
 459				return r;
 460		}
 461
 462		for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
 463			ring = &adev->uvd.inst[j].ring_enc[i];
 464			sprintf(ring->name, "uvd_enc_%d.%d", ring->me, i);
 465			if (amdgpu_sriov_vf(adev)) {
 466				ring->use_doorbell = true;
 467
 468				/* currently only use the first enconding ring for
 469				 * sriov, so set unused location for other unused rings.
 470				 */
 471				if (i == 0)
 472					ring->doorbell_index = adev->doorbell_index.uvd_vce.uvd_ring0_1 * 2;
 473				else
 474					ring->doorbell_index = adev->doorbell_index.uvd_vce.uvd_ring2_3 * 2 + 1;
 475			}
 476			r = amdgpu_ring_init(adev, ring, 512,
 477					     &adev->uvd.inst[j].irq, 0,
 478					     AMDGPU_RING_PRIO_DEFAULT);
 479			if (r)
 480				return r;
 481		}
 482	}
 483
 484	r = amdgpu_uvd_resume(adev);
 485	if (r)
 486		return r;
 487
 488	r = amdgpu_uvd_entity_init(adev);
 489	if (r)
 490		return r;
 491
 492	r = amdgpu_virt_alloc_mm_table(adev);
 493	if (r)
 494		return r;
 495
 496	return r;
 497}
 498
 499static int uvd_v7_0_sw_fini(void *handle)
 500{
 501	int i, j, r;
 502	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 503
 504	amdgpu_virt_free_mm_table(adev);
 505
 506	r = amdgpu_uvd_suspend(adev);
 507	if (r)
 508		return r;
 509
 510	for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
 511		if (adev->uvd.harvest_config & (1 << j))
 512			continue;
 513		for (i = 0; i < adev->uvd.num_enc_rings; ++i)
 514			amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]);
 515	}
 516	return amdgpu_uvd_sw_fini(adev);
 517}
 518
 519/**
 520 * uvd_v7_0_hw_init - start and test UVD block
 521 *
 522 * @adev: amdgpu_device pointer
 523 *
 524 * Initialize the hardware, boot up the VCPU and do some testing
 525 */
 526static int uvd_v7_0_hw_init(void *handle)
 527{
 528	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 529	struct amdgpu_ring *ring;
 530	uint32_t tmp;
 531	int i, j, r;
 532
 533	if (amdgpu_sriov_vf(adev))
 534		r = uvd_v7_0_sriov_start(adev);
 535	else
 536		r = uvd_v7_0_start(adev);
 537	if (r)
 538		goto done;
 539
 540	for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
 541		if (adev->uvd.harvest_config & (1 << j))
 542			continue;
 543		ring = &adev->uvd.inst[j].ring;
 544
 545		if (!amdgpu_sriov_vf(adev)) {
 546			r = amdgpu_ring_test_helper(ring);
 547			if (r)
 548				goto done;
 549
 550			r = amdgpu_ring_alloc(ring, 10);
 551			if (r) {
 552				DRM_ERROR("amdgpu: (%d)ring failed to lock UVD ring (%d).\n", j, r);
 553				goto done;
 554			}
 555
 556			tmp = PACKET0(SOC15_REG_OFFSET(UVD, j,
 557				mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL), 0);
 558			amdgpu_ring_write(ring, tmp);
 559			amdgpu_ring_write(ring, 0xFFFFF);
 560
 561			tmp = PACKET0(SOC15_REG_OFFSET(UVD, j,
 562				mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL), 0);
 563			amdgpu_ring_write(ring, tmp);
 564			amdgpu_ring_write(ring, 0xFFFFF);
 565
 566			tmp = PACKET0(SOC15_REG_OFFSET(UVD, j,
 567				mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL), 0);
 568			amdgpu_ring_write(ring, tmp);
 569			amdgpu_ring_write(ring, 0xFFFFF);
 570
 571			/* Clear timeout status bits */
 572			amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, j,
 573				mmUVD_SEMA_TIMEOUT_STATUS), 0));
 574			amdgpu_ring_write(ring, 0x8);
 575
 576			amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, j,
 577				mmUVD_SEMA_CNTL), 0));
 578			amdgpu_ring_write(ring, 3);
 579
 580			amdgpu_ring_commit(ring);
 581		}
 582
 583		for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
 584			ring = &adev->uvd.inst[j].ring_enc[i];
 585			r = amdgpu_ring_test_helper(ring);
 586			if (r)
 587				goto done;
 588		}
 589	}
 590done:
 591	if (!r)
 592		DRM_INFO("UVD and UVD ENC initialized successfully.\n");
 593
 594	return r;
 595}
 596
 597/**
 598 * uvd_v7_0_hw_fini - stop the hardware block
 599 *
 600 * @adev: amdgpu_device pointer
 601 *
 602 * Stop the UVD block, mark ring as not ready any more
 603 */
 604static int uvd_v7_0_hw_fini(void *handle)
 605{
 606	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 607
 608	if (!amdgpu_sriov_vf(adev))
 609		uvd_v7_0_stop(adev);
 610	else {
 611		/* full access mode, so don't touch any UVD register */
 612		DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
 613	}
 614
 615	return 0;
 616}
 617
 618static int uvd_v7_0_suspend(void *handle)
 619{
 620	int r;
 621	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 622
 623	r = uvd_v7_0_hw_fini(adev);
 624	if (r)
 625		return r;
 626
 627	return amdgpu_uvd_suspend(adev);
 628}
 629
 630static int uvd_v7_0_resume(void *handle)
 631{
 632	int r;
 633	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 634
 635	r = amdgpu_uvd_resume(adev);
 636	if (r)
 637		return r;
 638
 639	return uvd_v7_0_hw_init(adev);
 640}
 641
 642/**
 643 * uvd_v7_0_mc_resume - memory controller programming
 644 *
 645 * @adev: amdgpu_device pointer
 646 *
 647 * Let the UVD memory controller know it's offsets
 648 */
 649static void uvd_v7_0_mc_resume(struct amdgpu_device *adev)
 650{
 651	uint32_t size = AMDGPU_UVD_FIRMWARE_SIZE(adev);
 652	uint32_t offset;
 653	int i;
 654
 655	for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
 656		if (adev->uvd.harvest_config & (1 << i))
 657			continue;
 658		if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
 659			WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
 660				i == 0 ?
 661				adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_lo:
 662				adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].tmr_mc_addr_lo);
 663			WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
 664				i == 0 ?
 665				adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_hi:
 666				adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].tmr_mc_addr_hi);
 667			WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0, 0);
 668			offset = 0;
 669		} else {
 670			WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
 671				lower_32_bits(adev->uvd.inst[i].gpu_addr));
 672			WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
 673				upper_32_bits(adev->uvd.inst[i].gpu_addr));
 674			offset = size;
 675			WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0,
 676					AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
 677		}
 678
 679		WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE0, size);
 680
 681		WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
 682				lower_32_bits(adev->uvd.inst[i].gpu_addr + offset));
 683		WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
 684				upper_32_bits(adev->uvd.inst[i].gpu_addr + offset));
 685		WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET1, (1 << 21));
 686		WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_UVD_HEAP_SIZE);
 687
 688		WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
 689				lower_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
 690		WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
 691				upper_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
 692		WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET2, (2 << 21));
 693		WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE2,
 694				AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40));
 695
 696		WREG32_SOC15(UVD, i, mmUVD_UDEC_ADDR_CONFIG,
 697				adev->gfx.config.gb_addr_config);
 698		WREG32_SOC15(UVD, i, mmUVD_UDEC_DB_ADDR_CONFIG,
 699				adev->gfx.config.gb_addr_config);
 700		WREG32_SOC15(UVD, i, mmUVD_UDEC_DBW_ADDR_CONFIG,
 701				adev->gfx.config.gb_addr_config);
 702
 703		WREG32_SOC15(UVD, i, mmUVD_GP_SCRATCH4, adev->uvd.max_handles);
 704	}
 705}
 706
 707static int uvd_v7_0_mmsch_start(struct amdgpu_device *adev,
 708				struct amdgpu_mm_table *table)
 709{
 710	uint32_t data = 0, loop;
 711	uint64_t addr = table->gpu_addr;
 712	struct mmsch_v1_0_init_header *header = (struct mmsch_v1_0_init_header *)table->cpu_addr;
 713	uint32_t size;
 714	int i;
 715
 716	size = header->header_size + header->vce_table_size + header->uvd_table_size;
 717
 718	/* 1, write to vce_mmsch_vf_ctx_addr_lo/hi register with GPU mc addr of memory descriptor location */
 719	WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_CTX_ADDR_LO, lower_32_bits(addr));
 720	WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_CTX_ADDR_HI, upper_32_bits(addr));
 721
 722	/* 2, update vmid of descriptor */
 723	data = RREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_VMID);
 724	data &= ~VCE_MMSCH_VF_VMID__VF_CTX_VMID_MASK;
 725	data |= (0 << VCE_MMSCH_VF_VMID__VF_CTX_VMID__SHIFT); /* use domain0 for MM scheduler */
 726	WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_VMID, data);
 727
 728	/* 3, notify mmsch about the size of this descriptor */
 729	WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_CTX_SIZE, size);
 730
 731	/* 4, set resp to zero */
 732	WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP, 0);
 733
 734	for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
 735		if (adev->uvd.harvest_config & (1 << i))
 736			continue;
 737		WDOORBELL32(adev->uvd.inst[i].ring_enc[0].doorbell_index, 0);
 738		adev->wb.wb[adev->uvd.inst[i].ring_enc[0].wptr_offs] = 0;
 739		adev->uvd.inst[i].ring_enc[0].wptr = 0;
 740		adev->uvd.inst[i].ring_enc[0].wptr_old = 0;
 741	}
 742	/* 5, kick off the initialization and wait until VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero */
 743	WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_HOST, 0x10000001);
 744
 745	data = RREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP);
 746	loop = 1000;
 747	while ((data & 0x10000002) != 0x10000002) {
 748		udelay(10);
 749		data = RREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP);
 750		loop--;
 751		if (!loop)
 752			break;
 753	}
 754
 755	if (!loop) {
 756		dev_err(adev->dev, "failed to init MMSCH, mmVCE_MMSCH_VF_MAILBOX_RESP = %x\n", data);
 757		return -EBUSY;
 758	}
 759
 760	return 0;
 761}
 762
 763static int uvd_v7_0_sriov_start(struct amdgpu_device *adev)
 764{
 765	struct amdgpu_ring *ring;
 766	uint32_t offset, size, tmp;
 767	uint32_t table_size = 0;
 768	struct mmsch_v1_0_cmd_direct_write direct_wt = { {0} };
 769	struct mmsch_v1_0_cmd_direct_read_modify_write direct_rd_mod_wt = { {0} };
 770	struct mmsch_v1_0_cmd_direct_polling direct_poll = { {0} };
 771	struct mmsch_v1_0_cmd_end end = { {0} };
 772	uint32_t *init_table = adev->virt.mm_table.cpu_addr;
 773	struct mmsch_v1_0_init_header *header = (struct mmsch_v1_0_init_header *)init_table;
 774	uint8_t i = 0;
 775
 776	direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE;
 777	direct_rd_mod_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
 778	direct_poll.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_POLLING;
 779	end.cmd_header.command_type = MMSCH_COMMAND__END;
 780
 781	if (header->uvd_table_offset == 0 && header->uvd_table_size == 0) {
 782		header->version = MMSCH_VERSION;
 783		header->header_size = sizeof(struct mmsch_v1_0_init_header) >> 2;
 784
 785		if (header->vce_table_offset == 0 && header->vce_table_size == 0)
 786			header->uvd_table_offset = header->header_size;
 787		else
 788			header->uvd_table_offset = header->vce_table_size + header->vce_table_offset;
 789
 790		init_table += header->uvd_table_offset;
 791
 792		for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
 793			if (adev->uvd.harvest_config & (1 << i))
 794				continue;
 795			ring = &adev->uvd.inst[i].ring;
 796			ring->wptr = 0;
 797			size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4);
 798
 799			MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS),
 800							   0xFFFFFFFF, 0x00000004);
 801			/* mc resume*/
 802			if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
 803				MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i,
 804							mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
 805							adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_lo);
 806				MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i,
 807							mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
 808							adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_hi);
 809				MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0);
 810				offset = 0;
 811			} else {
 812				MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
 813							    lower_32_bits(adev->uvd.inst[i].gpu_addr));
 814				MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
 815							    upper_32_bits(adev->uvd.inst[i].gpu_addr));
 816				offset = size;
 817				MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0),
 818							AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
 819
 820			}
 821
 822			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE0), size);
 823
 824			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
 825						    lower_32_bits(adev->uvd.inst[i].gpu_addr + offset));
 826			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
 827						    upper_32_bits(adev->uvd.inst[i].gpu_addr + offset));
 828			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET1), (1 << 21));
 829			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_UVD_HEAP_SIZE);
 830
 831			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
 832						    lower_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
 833			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
 834						    upper_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
 835			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET2), (2 << 21));
 836			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE2),
 837						    AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40));
 838
 839			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_GP_SCRATCH4), adev->uvd.max_handles);
 840			/* mc resume end*/
 841
 842			/* disable clock gating */
 843			MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_CGC_CTRL),
 844							   ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK, 0);
 845
 846			/* disable interupt */
 847			MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN),
 848							   ~UVD_MASTINT_EN__VCPU_EN_MASK, 0);
 849
 850			/* stall UMC and register bus before resetting VCPU */
 851			MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2),
 852							   ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
 853							   UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
 854
 855			/* put LMI, VCPU, RBC etc... into reset */
 856			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_SOFT_RESET),
 857						    (uint32_t)(UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
 858							       UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
 859							       UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
 860							       UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
 861							       UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
 862							       UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
 863							       UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
 864							       UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK));
 865
 866			/* initialize UVD memory controller */
 867			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL),
 868						    (uint32_t)((0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
 869							       UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
 870							       UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
 871							       UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
 872							       UVD_LMI_CTRL__REQ_MODE_MASK |
 873							       0x00100000L));
 874
 875			/* take all subblocks out of reset, except VCPU */
 876			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_SOFT_RESET),
 877						    UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
 878
 879			/* enable VCPU clock */
 880			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL),
 881						    UVD_VCPU_CNTL__CLK_EN_MASK);
 882
 883			/* enable master interrupt */
 884			MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN),
 885							   ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
 886							   (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
 887
 888			/* clear the bit 4 of UVD_STATUS */
 889			MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS),
 890							   ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT), 0);
 891
 892			/* force RBC into idle state */
 893			size = order_base_2(ring->ring_size);
 894			tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, size);
 895			tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
 896			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RBC_RB_CNTL), tmp);
 897
 898			ring = &adev->uvd.inst[i].ring_enc[0];
 899			ring->wptr = 0;
 900			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_LO), ring->gpu_addr);
 901			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_HI), upper_32_bits(ring->gpu_addr));
 902			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_SIZE), ring->ring_size / 4);
 903
 904			/* boot up the VCPU */
 905			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_SOFT_RESET), 0);
 906
 907			/* enable UMC */
 908			MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2),
 909											   ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, 0);
 910
 911			MMSCH_V1_0_INSERT_DIRECT_POLL(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS), 0x02, 0x02);
 912		}
 913		/* add end packet */
 914		memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end));
 915		table_size += sizeof(struct mmsch_v1_0_cmd_end) / 4;
 916		header->uvd_table_size = table_size;
 917
 918	}
 919	return uvd_v7_0_mmsch_start(adev, &adev->virt.mm_table);
 920}
 921
 922/**
 923 * uvd_v7_0_start - start UVD block
 924 *
 925 * @adev: amdgpu_device pointer
 926 *
 927 * Setup and start the UVD block
 928 */
 929static int uvd_v7_0_start(struct amdgpu_device *adev)
 930{
 931	struct amdgpu_ring *ring;
 932	uint32_t rb_bufsz, tmp;
 933	uint32_t lmi_swap_cntl;
 934	uint32_t mp_swap_cntl;
 935	int i, j, k, r;
 936
 937	for (k = 0; k < adev->uvd.num_uvd_inst; ++k) {
 938		if (adev->uvd.harvest_config & (1 << k))
 939			continue;
 940		/* disable DPG */
 941		WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_POWER_STATUS), 0,
 942				~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
 943	}
 944
 945	/* disable byte swapping */
 946	lmi_swap_cntl = 0;
 947	mp_swap_cntl = 0;
 948
 949	uvd_v7_0_mc_resume(adev);
 950
 951	for (k = 0; k < adev->uvd.num_uvd_inst; ++k) {
 952		if (adev->uvd.harvest_config & (1 << k))
 953			continue;
 954		ring = &adev->uvd.inst[k].ring;
 955		/* disable clock gating */
 956		WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_CGC_CTRL), 0,
 957				~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK);
 958
 959		/* disable interupt */
 960		WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_MASTINT_EN), 0,
 961				~UVD_MASTINT_EN__VCPU_EN_MASK);
 962
 963		/* stall UMC and register bus before resetting VCPU */
 964		WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_LMI_CTRL2),
 965				UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
 966				~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
 967		mdelay(1);
 968
 969		/* put LMI, VCPU, RBC etc... into reset */
 970		WREG32_SOC15(UVD, k, mmUVD_SOFT_RESET,
 971			UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
 972			UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
 973			UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
 974			UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
 975			UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
 976			UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
 977			UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
 978			UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
 979		mdelay(5);
 980
 981		/* initialize UVD memory controller */
 982		WREG32_SOC15(UVD, k, mmUVD_LMI_CTRL,
 983			(0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
 984			UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
 985			UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
 986			UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
 987			UVD_LMI_CTRL__REQ_MODE_MASK |
 988			0x00100000L);
 989
 990#ifdef __BIG_ENDIAN
 991		/* swap (8 in 32) RB and IB */
 992		lmi_swap_cntl = 0xa;
 993		mp_swap_cntl = 0;
 994#endif
 995		WREG32_SOC15(UVD, k, mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
 996		WREG32_SOC15(UVD, k, mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
 997
 998		WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXA0, 0x40c2040);
 999		WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXA1, 0x0);
1000		WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXB0, 0x40c2040);
1001		WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXB1, 0x0);
1002		WREG32_SOC15(UVD, k, mmUVD_MPC_SET_ALU, 0);
1003		WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUX, 0x88);
1004
1005		/* take all subblocks out of reset, except VCPU */
1006		WREG32_SOC15(UVD, k, mmUVD_SOFT_RESET,
1007				UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1008		mdelay(5);
1009
1010		/* enable VCPU clock */
1011		WREG32_SOC15(UVD, k, mmUVD_VCPU_CNTL,
1012				UVD_VCPU_CNTL__CLK_EN_MASK);
1013
1014		/* enable UMC */
1015		WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_LMI_CTRL2), 0,
1016				~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1017
1018		/* boot up the VCPU */
1019		WREG32_SOC15(UVD, k, mmUVD_SOFT_RESET, 0);
1020		mdelay(10);
1021
1022		for (i = 0; i < 10; ++i) {
1023			uint32_t status;
1024
1025			for (j = 0; j < 100; ++j) {
1026				status = RREG32_SOC15(UVD, k, mmUVD_STATUS);
1027				if (status & 2)
1028					break;
1029				mdelay(10);
1030			}
1031			r = 0;
1032			if (status & 2)
1033				break;
1034
1035			DRM_ERROR("UVD(%d) not responding, trying to reset the VCPU!!!\n", k);
1036			WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_SOFT_RESET),
1037					UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
1038					~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1039			mdelay(10);
1040			WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_SOFT_RESET), 0,
1041					~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1042			mdelay(10);
1043			r = -1;
1044		}
1045
1046		if (r) {
1047			DRM_ERROR("UVD(%d) not responding, giving up!!!\n", k);
1048			return r;
1049		}
1050		/* enable master interrupt */
1051		WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_MASTINT_EN),
1052			(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
1053			~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
1054
1055		/* clear the bit 4 of UVD_STATUS */
1056		WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_STATUS), 0,
1057				~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
1058
1059		/* force RBC into idle state */
1060		rb_bufsz = order_base_2(ring->ring_size);
1061		tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
1062		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1063		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1064		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
1065		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1066		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1067		WREG32_SOC15(UVD, k, mmUVD_RBC_RB_CNTL, tmp);
1068
1069		/* set the write pointer delay */
1070		WREG32_SOC15(UVD, k, mmUVD_RBC_RB_WPTR_CNTL, 0);
1071
1072		/* set the wb address */
1073		WREG32_SOC15(UVD, k, mmUVD_RBC_RB_RPTR_ADDR,
1074				(upper_32_bits(ring->gpu_addr) >> 2));
1075
1076		/* programm the RB_BASE for ring buffer */
1077		WREG32_SOC15(UVD, k, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
1078				lower_32_bits(ring->gpu_addr));
1079		WREG32_SOC15(UVD, k, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
1080				upper_32_bits(ring->gpu_addr));
1081
1082		/* Initialize the ring buffer's read and write pointers */
1083		WREG32_SOC15(UVD, k, mmUVD_RBC_RB_RPTR, 0);
1084
1085		ring->wptr = RREG32_SOC15(UVD, k, mmUVD_RBC_RB_RPTR);
1086		WREG32_SOC15(UVD, k, mmUVD_RBC_RB_WPTR,
1087				lower_32_bits(ring->wptr));
1088
1089		WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_RBC_RB_CNTL), 0,
1090				~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
1091
1092		ring = &adev->uvd.inst[k].ring_enc[0];
1093		WREG32_SOC15(UVD, k, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1094		WREG32_SOC15(UVD, k, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1095		WREG32_SOC15(UVD, k, mmUVD_RB_BASE_LO, ring->gpu_addr);
1096		WREG32_SOC15(UVD, k, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1097		WREG32_SOC15(UVD, k, mmUVD_RB_SIZE, ring->ring_size / 4);
1098
1099		ring = &adev->uvd.inst[k].ring_enc[1];
1100		WREG32_SOC15(UVD, k, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1101		WREG32_SOC15(UVD, k, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1102		WREG32_SOC15(UVD, k, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1103		WREG32_SOC15(UVD, k, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1104		WREG32_SOC15(UVD, k, mmUVD_RB_SIZE2, ring->ring_size / 4);
1105	}
1106	return 0;
1107}
1108
1109/**
1110 * uvd_v7_0_stop - stop UVD block
1111 *
1112 * @adev: amdgpu_device pointer
1113 *
1114 * stop the UVD block
1115 */
1116static void uvd_v7_0_stop(struct amdgpu_device *adev)
1117{
1118	uint8_t i = 0;
1119
1120	for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
1121		if (adev->uvd.harvest_config & (1 << i))
1122			continue;
1123		/* force RBC into idle state */
1124		WREG32_SOC15(UVD, i, mmUVD_RBC_RB_CNTL, 0x11010101);
1125
1126		/* Stall UMC and register bus before resetting VCPU */
1127		WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2),
1128				UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
1129				~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1130		mdelay(1);
1131
1132		/* put VCPU into reset */
1133		WREG32_SOC15(UVD, i, mmUVD_SOFT_RESET,
1134				UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1135		mdelay(5);
1136
1137		/* disable VCPU clock */
1138		WREG32_SOC15(UVD, i, mmUVD_VCPU_CNTL, 0x0);
1139
1140		/* Unstall UMC and register bus */
1141		WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2), 0,
1142				~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1143	}
1144}
1145
1146/**
1147 * uvd_v7_0_ring_emit_fence - emit an fence & trap command
1148 *
1149 * @ring: amdgpu_ring pointer
1150 * @fence: fence to emit
 
 
1151 *
1152 * Write a fence and a trap command to the ring.
1153 */
1154static void uvd_v7_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
1155				     unsigned flags)
1156{
1157	struct amdgpu_device *adev = ring->adev;
1158
1159	WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1160
1161	amdgpu_ring_write(ring,
1162		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_CONTEXT_ID), 0));
1163	amdgpu_ring_write(ring, seq);
1164	amdgpu_ring_write(ring,
1165		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
1166	amdgpu_ring_write(ring, addr & 0xffffffff);
1167	amdgpu_ring_write(ring,
1168		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
1169	amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
1170	amdgpu_ring_write(ring,
1171		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
1172	amdgpu_ring_write(ring, 0);
1173
1174	amdgpu_ring_write(ring,
1175		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
1176	amdgpu_ring_write(ring, 0);
1177	amdgpu_ring_write(ring,
1178		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
1179	amdgpu_ring_write(ring, 0);
1180	amdgpu_ring_write(ring,
1181		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
1182	amdgpu_ring_write(ring, 2);
1183}
1184
1185/**
1186 * uvd_v7_0_enc_ring_emit_fence - emit an enc fence & trap command
1187 *
1188 * @ring: amdgpu_ring pointer
1189 * @fence: fence to emit
 
 
1190 *
1191 * Write enc a fence and a trap command to the ring.
1192 */
1193static void uvd_v7_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
1194			u64 seq, unsigned flags)
1195{
1196
1197	WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1198
1199	amdgpu_ring_write(ring, HEVC_ENC_CMD_FENCE);
1200	amdgpu_ring_write(ring, addr);
1201	amdgpu_ring_write(ring, upper_32_bits(addr));
1202	amdgpu_ring_write(ring, seq);
1203	amdgpu_ring_write(ring, HEVC_ENC_CMD_TRAP);
1204}
1205
1206/**
1207 * uvd_v7_0_ring_emit_hdp_flush - skip HDP flushing
1208 *
1209 * @ring: amdgpu_ring pointer
1210 */
1211static void uvd_v7_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
1212{
1213	/* The firmware doesn't seem to like touching registers at this point. */
1214}
1215
1216/**
1217 * uvd_v7_0_ring_test_ring - register write test
1218 *
1219 * @ring: amdgpu_ring pointer
1220 *
1221 * Test if we can successfully write to the context register
1222 */
1223static int uvd_v7_0_ring_test_ring(struct amdgpu_ring *ring)
1224{
1225	struct amdgpu_device *adev = ring->adev;
1226	uint32_t tmp = 0;
1227	unsigned i;
1228	int r;
1229
1230	WREG32_SOC15(UVD, ring->me, mmUVD_CONTEXT_ID, 0xCAFEDEAD);
1231	r = amdgpu_ring_alloc(ring, 3);
1232	if (r)
1233		return r;
1234
1235	amdgpu_ring_write(ring,
1236		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_CONTEXT_ID), 0));
1237	amdgpu_ring_write(ring, 0xDEADBEEF);
1238	amdgpu_ring_commit(ring);
1239	for (i = 0; i < adev->usec_timeout; i++) {
1240		tmp = RREG32_SOC15(UVD, ring->me, mmUVD_CONTEXT_ID);
1241		if (tmp == 0xDEADBEEF)
1242			break;
1243		udelay(1);
1244	}
1245
1246	if (i >= adev->usec_timeout)
1247		r = -ETIMEDOUT;
1248
1249	return r;
1250}
1251
1252/**
1253 * uvd_v7_0_ring_patch_cs_in_place - Patch the IB for command submission.
1254 *
1255 * @p: the CS parser with the IBs
1256 * @ib_idx: which IB to patch
1257 *
1258 */
1259static int uvd_v7_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
1260					   uint32_t ib_idx)
1261{
1262	struct amdgpu_ring *ring = to_amdgpu_ring(p->entity->rq->sched);
1263	struct amdgpu_ib *ib = &p->job->ibs[ib_idx];
1264	unsigned i;
1265
1266	/* No patching necessary for the first instance */
1267	if (!ring->me)
1268		return 0;
1269
1270	for (i = 0; i < ib->length_dw; i += 2) {
1271		uint32_t reg = amdgpu_get_ib_value(p, ib_idx, i);
1272
1273		reg -= p->adev->reg_offset[UVD_HWIP][0][1];
1274		reg += p->adev->reg_offset[UVD_HWIP][1][1];
1275
1276		amdgpu_set_ib_value(p, ib_idx, i, reg);
1277	}
1278	return 0;
1279}
1280
1281/**
1282 * uvd_v7_0_ring_emit_ib - execute indirect buffer
1283 *
1284 * @ring: amdgpu_ring pointer
 
1285 * @ib: indirect buffer to execute
 
1286 *
1287 * Write ring commands to execute the indirect buffer
1288 */
1289static void uvd_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
1290				  struct amdgpu_job *job,
1291				  struct amdgpu_ib *ib,
1292				  uint32_t flags)
1293{
1294	struct amdgpu_device *adev = ring->adev;
1295	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1296
1297	amdgpu_ring_write(ring,
1298		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_VMID), 0));
1299	amdgpu_ring_write(ring, vmid);
1300
1301	amdgpu_ring_write(ring,
1302		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_64BIT_BAR_LOW), 0));
1303	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1304	amdgpu_ring_write(ring,
1305		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH), 0));
1306	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1307	amdgpu_ring_write(ring,
1308		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_RBC_IB_SIZE), 0));
1309	amdgpu_ring_write(ring, ib->length_dw);
1310}
1311
1312/**
1313 * uvd_v7_0_enc_ring_emit_ib - enc execute indirect buffer
1314 *
1315 * @ring: amdgpu_ring pointer
 
1316 * @ib: indirect buffer to execute
 
1317 *
1318 * Write enc ring commands to execute the indirect buffer
1319 */
1320static void uvd_v7_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
1321					struct amdgpu_job *job,
1322					struct amdgpu_ib *ib,
1323					uint32_t flags)
1324{
1325	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1326
1327	amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM);
1328	amdgpu_ring_write(ring, vmid);
1329	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1330	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1331	amdgpu_ring_write(ring, ib->length_dw);
1332}
1333
1334static void uvd_v7_0_ring_emit_wreg(struct amdgpu_ring *ring,
1335				    uint32_t reg, uint32_t val)
1336{
1337	struct amdgpu_device *adev = ring->adev;
1338
1339	amdgpu_ring_write(ring,
1340		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
1341	amdgpu_ring_write(ring, reg << 2);
1342	amdgpu_ring_write(ring,
1343		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
1344	amdgpu_ring_write(ring, val);
1345	amdgpu_ring_write(ring,
1346		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
1347	amdgpu_ring_write(ring, 8);
1348}
1349
1350static void uvd_v7_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
1351					uint32_t val, uint32_t mask)
1352{
1353	struct amdgpu_device *adev = ring->adev;
1354
1355	amdgpu_ring_write(ring,
1356		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
1357	amdgpu_ring_write(ring, reg << 2);
1358	amdgpu_ring_write(ring,
1359		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
1360	amdgpu_ring_write(ring, val);
1361	amdgpu_ring_write(ring,
1362		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GP_SCRATCH8), 0));
1363	amdgpu_ring_write(ring, mask);
1364	amdgpu_ring_write(ring,
1365		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
1366	amdgpu_ring_write(ring, 12);
1367}
1368
1369static void uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
1370					unsigned vmid, uint64_t pd_addr)
1371{
1372	struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
1373	uint32_t data0, data1, mask;
1374
1375	pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1376
1377	/* wait for reg writes */
1378	data0 = hub->ctx0_ptb_addr_lo32 + vmid * hub->ctx_addr_distance;
1379	data1 = lower_32_bits(pd_addr);
1380	mask = 0xffffffff;
1381	uvd_v7_0_ring_emit_reg_wait(ring, data0, data1, mask);
1382}
1383
1384static void uvd_v7_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
1385{
1386	struct amdgpu_device *adev = ring->adev;
1387	int i;
1388
1389	WARN_ON(ring->wptr % 2 || count % 2);
1390
1391	for (i = 0; i < count / 2; i++) {
1392		amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_NO_OP), 0));
1393		amdgpu_ring_write(ring, 0);
1394	}
1395}
1396
1397static void uvd_v7_0_enc_ring_insert_end(struct amdgpu_ring *ring)
1398{
1399	amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
1400}
1401
1402static void uvd_v7_0_enc_ring_emit_reg_wait(struct amdgpu_ring *ring,
1403					    uint32_t reg, uint32_t val,
1404					    uint32_t mask)
1405{
1406	amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WAIT);
1407	amdgpu_ring_write(ring,	reg << 2);
1408	amdgpu_ring_write(ring, mask);
1409	amdgpu_ring_write(ring, val);
1410}
1411
1412static void uvd_v7_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
1413					    unsigned int vmid, uint64_t pd_addr)
1414{
1415	struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
1416
1417	pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1418
1419	/* wait for reg writes */
1420	uvd_v7_0_enc_ring_emit_reg_wait(ring, hub->ctx0_ptb_addr_lo32 +
1421					vmid * hub->ctx_addr_distance,
1422					lower_32_bits(pd_addr), 0xffffffff);
1423}
1424
1425static void uvd_v7_0_enc_ring_emit_wreg(struct amdgpu_ring *ring,
1426					uint32_t reg, uint32_t val)
1427{
1428	amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE);
1429	amdgpu_ring_write(ring,	reg << 2);
1430	amdgpu_ring_write(ring, val);
1431}
1432
1433#if 0
1434static bool uvd_v7_0_is_idle(void *handle)
1435{
1436	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1437
1438	return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
1439}
1440
1441static int uvd_v7_0_wait_for_idle(void *handle)
1442{
1443	unsigned i;
1444	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1445
1446	for (i = 0; i < adev->usec_timeout; i++) {
1447		if (uvd_v7_0_is_idle(handle))
1448			return 0;
1449	}
1450	return -ETIMEDOUT;
1451}
1452
1453#define AMDGPU_UVD_STATUS_BUSY_MASK    0xfd
1454static bool uvd_v7_0_check_soft_reset(void *handle)
1455{
1456	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1457	u32 srbm_soft_reset = 0;
1458	u32 tmp = RREG32(mmSRBM_STATUS);
1459
1460	if (REG_GET_FIELD(tmp, SRBM_STATUS, UVD_RQ_PENDING) ||
1461	    REG_GET_FIELD(tmp, SRBM_STATUS, UVD_BUSY) ||
1462	    (RREG32_SOC15(UVD, ring->me, mmUVD_STATUS) &
1463		    AMDGPU_UVD_STATUS_BUSY_MASK))
1464		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1465				SRBM_SOFT_RESET, SOFT_RESET_UVD, 1);
1466
1467	if (srbm_soft_reset) {
1468		adev->uvd.inst[ring->me].srbm_soft_reset = srbm_soft_reset;
1469		return true;
1470	} else {
1471		adev->uvd.inst[ring->me].srbm_soft_reset = 0;
1472		return false;
1473	}
1474}
1475
1476static int uvd_v7_0_pre_soft_reset(void *handle)
1477{
1478	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1479
1480	if (!adev->uvd.inst[ring->me].srbm_soft_reset)
1481		return 0;
1482
1483	uvd_v7_0_stop(adev);
1484	return 0;
1485}
1486
1487static int uvd_v7_0_soft_reset(void *handle)
1488{
1489	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1490	u32 srbm_soft_reset;
1491
1492	if (!adev->uvd.inst[ring->me].srbm_soft_reset)
1493		return 0;
1494	srbm_soft_reset = adev->uvd.inst[ring->me].srbm_soft_reset;
1495
1496	if (srbm_soft_reset) {
1497		u32 tmp;
1498
1499		tmp = RREG32(mmSRBM_SOFT_RESET);
1500		tmp |= srbm_soft_reset;
1501		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1502		WREG32(mmSRBM_SOFT_RESET, tmp);
1503		tmp = RREG32(mmSRBM_SOFT_RESET);
1504
1505		udelay(50);
1506
1507		tmp &= ~srbm_soft_reset;
1508		WREG32(mmSRBM_SOFT_RESET, tmp);
1509		tmp = RREG32(mmSRBM_SOFT_RESET);
1510
1511		/* Wait a little for things to settle down */
1512		udelay(50);
1513	}
1514
1515	return 0;
1516}
1517
1518static int uvd_v7_0_post_soft_reset(void *handle)
1519{
1520	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1521
1522	if (!adev->uvd.inst[ring->me].srbm_soft_reset)
1523		return 0;
1524
1525	mdelay(5);
1526
1527	return uvd_v7_0_start(adev);
1528}
1529#endif
1530
1531static int uvd_v7_0_set_interrupt_state(struct amdgpu_device *adev,
1532					struct amdgpu_irq_src *source,
1533					unsigned type,
1534					enum amdgpu_interrupt_state state)
1535{
1536	// TODO
1537	return 0;
1538}
1539
1540static int uvd_v7_0_process_interrupt(struct amdgpu_device *adev,
1541				      struct amdgpu_irq_src *source,
1542				      struct amdgpu_iv_entry *entry)
1543{
1544	uint32_t ip_instance;
1545
1546	switch (entry->client_id) {
1547	case SOC15_IH_CLIENTID_UVD:
1548		ip_instance = 0;
1549		break;
1550	case SOC15_IH_CLIENTID_UVD1:
1551		ip_instance = 1;
1552		break;
1553	default:
1554		DRM_ERROR("Unhandled client id: %d\n", entry->client_id);
1555		return 0;
1556	}
1557
1558	DRM_DEBUG("IH: UVD TRAP\n");
1559
1560	switch (entry->src_id) {
1561	case 124:
1562		amdgpu_fence_process(&adev->uvd.inst[ip_instance].ring);
1563		break;
1564	case 119:
1565		amdgpu_fence_process(&adev->uvd.inst[ip_instance].ring_enc[0]);
1566		break;
1567	case 120:
1568		if (!amdgpu_sriov_vf(adev))
1569			amdgpu_fence_process(&adev->uvd.inst[ip_instance].ring_enc[1]);
1570		break;
1571	default:
1572		DRM_ERROR("Unhandled interrupt: %d %d\n",
1573			  entry->src_id, entry->src_data[0]);
1574		break;
1575	}
1576
1577	return 0;
1578}
1579
1580#if 0
1581static void uvd_v7_0_set_sw_clock_gating(struct amdgpu_device *adev)
1582{
1583	uint32_t data, data1, data2, suvd_flags;
1584
1585	data = RREG32_SOC15(UVD, ring->me, mmUVD_CGC_CTRL);
1586	data1 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE);
1587	data2 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_CTRL);
1588
1589	data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK |
1590		  UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
1591
1592	suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
1593		     UVD_SUVD_CGC_GATE__SIT_MASK |
1594		     UVD_SUVD_CGC_GATE__SMP_MASK |
1595		     UVD_SUVD_CGC_GATE__SCM_MASK |
1596		     UVD_SUVD_CGC_GATE__SDB_MASK;
1597
1598	data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
1599		(1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) |
1600		(4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY));
1601
1602	data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
1603			UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
1604			UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
1605			UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
1606			UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
1607			UVD_CGC_CTRL__SYS_MODE_MASK |
1608			UVD_CGC_CTRL__UDEC_MODE_MASK |
1609			UVD_CGC_CTRL__MPEG2_MODE_MASK |
1610			UVD_CGC_CTRL__REGS_MODE_MASK |
1611			UVD_CGC_CTRL__RBC_MODE_MASK |
1612			UVD_CGC_CTRL__LMI_MC_MODE_MASK |
1613			UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
1614			UVD_CGC_CTRL__IDCT_MODE_MASK |
1615			UVD_CGC_CTRL__MPRD_MODE_MASK |
1616			UVD_CGC_CTRL__MPC_MODE_MASK |
1617			UVD_CGC_CTRL__LBSI_MODE_MASK |
1618			UVD_CGC_CTRL__LRBBM_MODE_MASK |
1619			UVD_CGC_CTRL__WCB_MODE_MASK |
1620			UVD_CGC_CTRL__VCPU_MODE_MASK |
1621			UVD_CGC_CTRL__JPEG_MODE_MASK |
1622			UVD_CGC_CTRL__JPEG2_MODE_MASK |
1623			UVD_CGC_CTRL__SCPU_MODE_MASK);
1624	data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK |
1625			UVD_SUVD_CGC_CTRL__SIT_MODE_MASK |
1626			UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
1627			UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
1628			UVD_SUVD_CGC_CTRL__SDB_MODE_MASK);
1629	data1 |= suvd_flags;
1630
1631	WREG32_SOC15(UVD, ring->me, mmUVD_CGC_CTRL, data);
1632	WREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE, 0);
1633	WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE, data1);
1634	WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_CTRL, data2);
1635}
1636
1637static void uvd_v7_0_set_hw_clock_gating(struct amdgpu_device *adev)
1638{
1639	uint32_t data, data1, cgc_flags, suvd_flags;
1640
1641	data = RREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE);
1642	data1 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE);
1643
1644	cgc_flags = UVD_CGC_GATE__SYS_MASK |
1645		UVD_CGC_GATE__UDEC_MASK |
1646		UVD_CGC_GATE__MPEG2_MASK |
1647		UVD_CGC_GATE__RBC_MASK |
1648		UVD_CGC_GATE__LMI_MC_MASK |
1649		UVD_CGC_GATE__IDCT_MASK |
1650		UVD_CGC_GATE__MPRD_MASK |
1651		UVD_CGC_GATE__MPC_MASK |
1652		UVD_CGC_GATE__LBSI_MASK |
1653		UVD_CGC_GATE__LRBBM_MASK |
1654		UVD_CGC_GATE__UDEC_RE_MASK |
1655		UVD_CGC_GATE__UDEC_CM_MASK |
1656		UVD_CGC_GATE__UDEC_IT_MASK |
1657		UVD_CGC_GATE__UDEC_DB_MASK |
1658		UVD_CGC_GATE__UDEC_MP_MASK |
1659		UVD_CGC_GATE__WCB_MASK |
1660		UVD_CGC_GATE__VCPU_MASK |
1661		UVD_CGC_GATE__SCPU_MASK |
1662		UVD_CGC_GATE__JPEG_MASK |
1663		UVD_CGC_GATE__JPEG2_MASK;
1664
1665	suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
1666				UVD_SUVD_CGC_GATE__SIT_MASK |
1667				UVD_SUVD_CGC_GATE__SMP_MASK |
1668				UVD_SUVD_CGC_GATE__SCM_MASK |
1669				UVD_SUVD_CGC_GATE__SDB_MASK;
1670
1671	data |= cgc_flags;
1672	data1 |= suvd_flags;
1673
1674	WREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE, data);
1675	WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE, data1);
1676}
1677
1678static void uvd_v7_0_set_bypass_mode(struct amdgpu_device *adev, bool enable)
1679{
1680	u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
1681
1682	if (enable)
1683		tmp |= (GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
1684			GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
1685	else
1686		tmp &= ~(GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
1687			 GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
1688
1689	WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp);
1690}
1691
1692
1693static int uvd_v7_0_set_clockgating_state(void *handle,
1694					  enum amd_clockgating_state state)
1695{
1696	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1697	bool enable = (state == AMD_CG_STATE_GATE);
1698
1699	uvd_v7_0_set_bypass_mode(adev, enable);
1700
1701	if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
1702		return 0;
1703
1704	if (enable) {
1705		/* disable HW gating and enable Sw gating */
1706		uvd_v7_0_set_sw_clock_gating(adev);
1707	} else {
1708		/* wait for STATUS to clear */
1709		if (uvd_v7_0_wait_for_idle(handle))
1710			return -EBUSY;
1711
1712		/* enable HW gates because UVD is idle */
1713		/* uvd_v7_0_set_hw_clock_gating(adev); */
1714	}
1715
1716	return 0;
1717}
1718
1719static int uvd_v7_0_set_powergating_state(void *handle,
1720					  enum amd_powergating_state state)
1721{
1722	/* This doesn't actually powergate the UVD block.
1723	 * That's done in the dpm code via the SMC.  This
1724	 * just re-inits the block as necessary.  The actual
1725	 * gating still happens in the dpm code.  We should
1726	 * revisit this when there is a cleaner line between
1727	 * the smc and the hw blocks
1728	 */
1729	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1730
1731	if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
1732		return 0;
1733
1734	WREG32_SOC15(UVD, ring->me, mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK);
1735
1736	if (state == AMD_PG_STATE_GATE) {
1737		uvd_v7_0_stop(adev);
1738		return 0;
1739	} else {
1740		return uvd_v7_0_start(adev);
1741	}
1742}
1743#endif
1744
1745static int uvd_v7_0_set_clockgating_state(void *handle,
1746					  enum amd_clockgating_state state)
1747{
1748	/* needed for driver unload*/
1749	return 0;
1750}
1751
1752const struct amd_ip_funcs uvd_v7_0_ip_funcs = {
1753	.name = "uvd_v7_0",
1754	.early_init = uvd_v7_0_early_init,
1755	.late_init = NULL,
1756	.sw_init = uvd_v7_0_sw_init,
1757	.sw_fini = uvd_v7_0_sw_fini,
1758	.hw_init = uvd_v7_0_hw_init,
1759	.hw_fini = uvd_v7_0_hw_fini,
1760	.suspend = uvd_v7_0_suspend,
1761	.resume = uvd_v7_0_resume,
1762	.is_idle = NULL /* uvd_v7_0_is_idle */,
1763	.wait_for_idle = NULL /* uvd_v7_0_wait_for_idle */,
1764	.check_soft_reset = NULL /* uvd_v7_0_check_soft_reset */,
1765	.pre_soft_reset = NULL /* uvd_v7_0_pre_soft_reset */,
1766	.soft_reset = NULL /* uvd_v7_0_soft_reset */,
1767	.post_soft_reset = NULL /* uvd_v7_0_post_soft_reset */,
1768	.set_clockgating_state = uvd_v7_0_set_clockgating_state,
1769	.set_powergating_state = NULL /* uvd_v7_0_set_powergating_state */,
1770};
1771
1772static const struct amdgpu_ring_funcs uvd_v7_0_ring_vm_funcs = {
1773	.type = AMDGPU_RING_TYPE_UVD,
1774	.align_mask = 0xf,
1775	.support_64bit_ptrs = false,
1776	.no_user_fence = true,
1777	.vmhub = AMDGPU_MMHUB_0,
1778	.get_rptr = uvd_v7_0_ring_get_rptr,
1779	.get_wptr = uvd_v7_0_ring_get_wptr,
1780	.set_wptr = uvd_v7_0_ring_set_wptr,
1781	.patch_cs_in_place = uvd_v7_0_ring_patch_cs_in_place,
1782	.emit_frame_size =
1783		6 + /* hdp invalidate */
1784		SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
1785		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
1786		8 + /* uvd_v7_0_ring_emit_vm_flush */
1787		14 + 14, /* uvd_v7_0_ring_emit_fence x2 vm fence */
1788	.emit_ib_size = 8, /* uvd_v7_0_ring_emit_ib */
1789	.emit_ib = uvd_v7_0_ring_emit_ib,
1790	.emit_fence = uvd_v7_0_ring_emit_fence,
1791	.emit_vm_flush = uvd_v7_0_ring_emit_vm_flush,
1792	.emit_hdp_flush = uvd_v7_0_ring_emit_hdp_flush,
1793	.test_ring = uvd_v7_0_ring_test_ring,
1794	.test_ib = amdgpu_uvd_ring_test_ib,
1795	.insert_nop = uvd_v7_0_ring_insert_nop,
1796	.pad_ib = amdgpu_ring_generic_pad_ib,
1797	.begin_use = amdgpu_uvd_ring_begin_use,
1798	.end_use = amdgpu_uvd_ring_end_use,
1799	.emit_wreg = uvd_v7_0_ring_emit_wreg,
1800	.emit_reg_wait = uvd_v7_0_ring_emit_reg_wait,
1801	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1802};
1803
1804static const struct amdgpu_ring_funcs uvd_v7_0_enc_ring_vm_funcs = {
1805	.type = AMDGPU_RING_TYPE_UVD_ENC,
1806	.align_mask = 0x3f,
1807	.nop = HEVC_ENC_CMD_NO_OP,
1808	.support_64bit_ptrs = false,
1809	.no_user_fence = true,
1810	.vmhub = AMDGPU_MMHUB_0,
1811	.get_rptr = uvd_v7_0_enc_ring_get_rptr,
1812	.get_wptr = uvd_v7_0_enc_ring_get_wptr,
1813	.set_wptr = uvd_v7_0_enc_ring_set_wptr,
1814	.emit_frame_size =
1815		3 + 3 + /* hdp flush / invalidate */
1816		SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1817		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
1818		4 + /* uvd_v7_0_enc_ring_emit_vm_flush */
1819		5 + 5 + /* uvd_v7_0_enc_ring_emit_fence x2 vm fence */
1820		1, /* uvd_v7_0_enc_ring_insert_end */
1821	.emit_ib_size = 5, /* uvd_v7_0_enc_ring_emit_ib */
1822	.emit_ib = uvd_v7_0_enc_ring_emit_ib,
1823	.emit_fence = uvd_v7_0_enc_ring_emit_fence,
1824	.emit_vm_flush = uvd_v7_0_enc_ring_emit_vm_flush,
1825	.test_ring = uvd_v7_0_enc_ring_test_ring,
1826	.test_ib = uvd_v7_0_enc_ring_test_ib,
1827	.insert_nop = amdgpu_ring_insert_nop,
1828	.insert_end = uvd_v7_0_enc_ring_insert_end,
1829	.pad_ib = amdgpu_ring_generic_pad_ib,
1830	.begin_use = amdgpu_uvd_ring_begin_use,
1831	.end_use = amdgpu_uvd_ring_end_use,
1832	.emit_wreg = uvd_v7_0_enc_ring_emit_wreg,
1833	.emit_reg_wait = uvd_v7_0_enc_ring_emit_reg_wait,
1834	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1835};
1836
1837static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev)
1838{
1839	int i;
1840
1841	for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
1842		if (adev->uvd.harvest_config & (1 << i))
1843			continue;
1844		adev->uvd.inst[i].ring.funcs = &uvd_v7_0_ring_vm_funcs;
1845		adev->uvd.inst[i].ring.me = i;
1846		DRM_INFO("UVD(%d) is enabled in VM mode\n", i);
1847	}
1848}
1849
1850static void uvd_v7_0_set_enc_ring_funcs(struct amdgpu_device *adev)
1851{
1852	int i, j;
1853
1854	for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
1855		if (adev->uvd.harvest_config & (1 << j))
1856			continue;
1857		for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
1858			adev->uvd.inst[j].ring_enc[i].funcs = &uvd_v7_0_enc_ring_vm_funcs;
1859			adev->uvd.inst[j].ring_enc[i].me = j;
1860		}
1861
1862		DRM_INFO("UVD(%d) ENC is enabled in VM mode\n", j);
1863	}
1864}
1865
1866static const struct amdgpu_irq_src_funcs uvd_v7_0_irq_funcs = {
1867	.set = uvd_v7_0_set_interrupt_state,
1868	.process = uvd_v7_0_process_interrupt,
1869};
1870
1871static void uvd_v7_0_set_irq_funcs(struct amdgpu_device *adev)
1872{
1873	int i;
1874
1875	for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
1876		if (adev->uvd.harvest_config & (1 << i))
1877			continue;
1878		adev->uvd.inst[i].irq.num_types = adev->uvd.num_enc_rings + 1;
1879		adev->uvd.inst[i].irq.funcs = &uvd_v7_0_irq_funcs;
1880	}
1881}
1882
1883const struct amdgpu_ip_block_version uvd_v7_0_ip_block =
1884{
1885		.type = AMD_IP_BLOCK_TYPE_UVD,
1886		.major = 7,
1887		.minor = 0,
1888		.rev = 0,
1889		.funcs = &uvd_v7_0_ip_funcs,
1890};
v5.14.15
   1/*
   2 * Copyright 2016 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
  23
  24#include <linux/firmware.h>
  25
  26#include "amdgpu.h"
  27#include "amdgpu_uvd.h"
  28#include "soc15.h"
  29#include "soc15d.h"
  30#include "soc15_common.h"
  31#include "mmsch_v1_0.h"
  32
  33#include "uvd/uvd_7_0_offset.h"
  34#include "uvd/uvd_7_0_sh_mask.h"
  35#include "vce/vce_4_0_offset.h"
  36#include "vce/vce_4_0_default.h"
  37#include "vce/vce_4_0_sh_mask.h"
  38#include "nbif/nbif_6_1_offset.h"
 
  39#include "mmhub/mmhub_1_0_offset.h"
  40#include "mmhub/mmhub_1_0_sh_mask.h"
  41#include "ivsrcid/uvd/irqsrcs_uvd_7_0.h"
  42
  43#define mmUVD_PG0_CC_UVD_HARVESTING                                                                    0x00c7
  44#define mmUVD_PG0_CC_UVD_HARVESTING_BASE_IDX                                                           1
  45//UVD_PG0_CC_UVD_HARVESTING
  46#define UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE__SHIFT                                                         0x1
  47#define UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE_MASK                                                           0x00000002L
  48
  49#define UVD7_MAX_HW_INSTANCES_VEGA20			2
  50
  51static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev);
  52static void uvd_v7_0_set_enc_ring_funcs(struct amdgpu_device *adev);
  53static void uvd_v7_0_set_irq_funcs(struct amdgpu_device *adev);
  54static int uvd_v7_0_start(struct amdgpu_device *adev);
  55static void uvd_v7_0_stop(struct amdgpu_device *adev);
  56static int uvd_v7_0_sriov_start(struct amdgpu_device *adev);
  57
  58static int amdgpu_ih_clientid_uvds[] = {
  59	SOC15_IH_CLIENTID_UVD,
  60	SOC15_IH_CLIENTID_UVD1
  61};
  62
  63/**
  64 * uvd_v7_0_ring_get_rptr - get read pointer
  65 *
  66 * @ring: amdgpu_ring pointer
  67 *
  68 * Returns the current hardware read pointer
  69 */
  70static uint64_t uvd_v7_0_ring_get_rptr(struct amdgpu_ring *ring)
  71{
  72	struct amdgpu_device *adev = ring->adev;
  73
  74	return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_RPTR);
  75}
  76
  77/**
  78 * uvd_v7_0_enc_ring_get_rptr - get enc read pointer
  79 *
  80 * @ring: amdgpu_ring pointer
  81 *
  82 * Returns the current hardware enc read pointer
  83 */
  84static uint64_t uvd_v7_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
  85{
  86	struct amdgpu_device *adev = ring->adev;
  87
  88	if (ring == &adev->uvd.inst[ring->me].ring_enc[0])
  89		return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR);
  90	else
  91		return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR2);
  92}
  93
  94/**
  95 * uvd_v7_0_ring_get_wptr - get write pointer
  96 *
  97 * @ring: amdgpu_ring pointer
  98 *
  99 * Returns the current hardware write pointer
 100 */
 101static uint64_t uvd_v7_0_ring_get_wptr(struct amdgpu_ring *ring)
 102{
 103	struct amdgpu_device *adev = ring->adev;
 104
 105	return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR);
 106}
 107
 108/**
 109 * uvd_v7_0_enc_ring_get_wptr - get enc write pointer
 110 *
 111 * @ring: amdgpu_ring pointer
 112 *
 113 * Returns the current hardware enc write pointer
 114 */
 115static uint64_t uvd_v7_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
 116{
 117	struct amdgpu_device *adev = ring->adev;
 118
 119	if (ring->use_doorbell)
 120		return adev->wb.wb[ring->wptr_offs];
 121
 122	if (ring == &adev->uvd.inst[ring->me].ring_enc[0])
 123		return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR);
 124	else
 125		return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2);
 126}
 127
 128/**
 129 * uvd_v7_0_ring_set_wptr - set write pointer
 130 *
 131 * @ring: amdgpu_ring pointer
 132 *
 133 * Commits the write pointer to the hardware
 134 */
 135static void uvd_v7_0_ring_set_wptr(struct amdgpu_ring *ring)
 136{
 137	struct amdgpu_device *adev = ring->adev;
 138
 139	WREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
 140}
 141
 142/**
 143 * uvd_v7_0_enc_ring_set_wptr - set enc write pointer
 144 *
 145 * @ring: amdgpu_ring pointer
 146 *
 147 * Commits the enc write pointer to the hardware
 148 */
 149static void uvd_v7_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
 150{
 151	struct amdgpu_device *adev = ring->adev;
 152
 153	if (ring->use_doorbell) {
 154		/* XXX check if swapping is necessary on BE */
 155		adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
 156		WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
 157		return;
 158	}
 159
 160	if (ring == &adev->uvd.inst[ring->me].ring_enc[0])
 161		WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR,
 162			lower_32_bits(ring->wptr));
 163	else
 164		WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2,
 165			lower_32_bits(ring->wptr));
 166}
 167
 168/**
 169 * uvd_v7_0_enc_ring_test_ring - test if UVD ENC ring is working
 170 *
 171 * @ring: the engine to test on
 172 *
 173 */
 174static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring)
 175{
 176	struct amdgpu_device *adev = ring->adev;
 177	uint32_t rptr;
 178	unsigned i;
 179	int r;
 180
 181	if (amdgpu_sriov_vf(adev))
 182		return 0;
 183
 184	r = amdgpu_ring_alloc(ring, 16);
 185	if (r)
 186		return r;
 187
 188	rptr = amdgpu_ring_get_rptr(ring);
 189
 190	amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
 191	amdgpu_ring_commit(ring);
 192
 193	for (i = 0; i < adev->usec_timeout; i++) {
 194		if (amdgpu_ring_get_rptr(ring) != rptr)
 195			break;
 196		udelay(1);
 197	}
 198
 199	if (i >= adev->usec_timeout)
 200		r = -ETIMEDOUT;
 201
 202	return r;
 203}
 204
 205/**
 206 * uvd_v7_0_enc_get_create_msg - generate a UVD ENC create msg
 207 *
 
 208 * @ring: ring we should submit the msg to
 209 * @handle: session handle to use
 210 * @bo: amdgpu object for which we query the offset
 211 * @fence: optional fence to return
 212 *
 213 * Open up a stream for HW test
 214 */
 215static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
 216				       struct amdgpu_bo *bo,
 217				       struct dma_fence **fence)
 218{
 219	const unsigned ib_size_dw = 16;
 220	struct amdgpu_job *job;
 221	struct amdgpu_ib *ib;
 222	struct dma_fence *f = NULL;
 223	uint64_t addr;
 224	int i, r;
 225
 226	r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
 227					AMDGPU_IB_POOL_DIRECT, &job);
 228	if (r)
 229		return r;
 230
 231	ib = &job->ibs[0];
 232	addr = amdgpu_bo_gpu_offset(bo);
 233
 234	ib->length_dw = 0;
 235	ib->ptr[ib->length_dw++] = 0x00000018;
 236	ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
 237	ib->ptr[ib->length_dw++] = handle;
 238	ib->ptr[ib->length_dw++] = 0x00000000;
 239	ib->ptr[ib->length_dw++] = upper_32_bits(addr);
 240	ib->ptr[ib->length_dw++] = addr;
 241
 242	ib->ptr[ib->length_dw++] = 0x00000014;
 243	ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
 244	ib->ptr[ib->length_dw++] = 0x0000001c;
 245	ib->ptr[ib->length_dw++] = 0x00000000;
 246	ib->ptr[ib->length_dw++] = 0x00000000;
 247
 248	ib->ptr[ib->length_dw++] = 0x00000008;
 249	ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */
 250
 251	for (i = ib->length_dw; i < ib_size_dw; ++i)
 252		ib->ptr[i] = 0x0;
 253
 254	r = amdgpu_job_submit_direct(job, ring, &f);
 255	if (r)
 256		goto err;
 257
 258	if (fence)
 259		*fence = dma_fence_get(f);
 260	dma_fence_put(f);
 261	return 0;
 262
 263err:
 264	amdgpu_job_free(job);
 265	return r;
 266}
 267
 268/**
 269 * uvd_v7_0_enc_get_destroy_msg - generate a UVD ENC destroy msg
 270 *
 
 271 * @ring: ring we should submit the msg to
 272 * @handle: session handle to use
 273 * @bo: amdgpu object for which we query the offset
 274 * @fence: optional fence to return
 275 *
 276 * Close up a stream for HW test or if userspace failed to do so
 277 */
 278static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
 279					struct amdgpu_bo *bo,
 280					struct dma_fence **fence)
 281{
 282	const unsigned ib_size_dw = 16;
 283	struct amdgpu_job *job;
 284	struct amdgpu_ib *ib;
 285	struct dma_fence *f = NULL;
 286	uint64_t addr;
 287	int i, r;
 288
 289	r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
 290					AMDGPU_IB_POOL_DIRECT, &job);
 291	if (r)
 292		return r;
 293
 294	ib = &job->ibs[0];
 295	addr = amdgpu_bo_gpu_offset(bo);
 296
 297	ib->length_dw = 0;
 298	ib->ptr[ib->length_dw++] = 0x00000018;
 299	ib->ptr[ib->length_dw++] = 0x00000001;
 300	ib->ptr[ib->length_dw++] = handle;
 301	ib->ptr[ib->length_dw++] = 0x00000000;
 302	ib->ptr[ib->length_dw++] = upper_32_bits(addr);
 303	ib->ptr[ib->length_dw++] = addr;
 304
 305	ib->ptr[ib->length_dw++] = 0x00000014;
 306	ib->ptr[ib->length_dw++] = 0x00000002;
 307	ib->ptr[ib->length_dw++] = 0x0000001c;
 308	ib->ptr[ib->length_dw++] = 0x00000000;
 309	ib->ptr[ib->length_dw++] = 0x00000000;
 310
 311	ib->ptr[ib->length_dw++] = 0x00000008;
 312	ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */
 313
 314	for (i = ib->length_dw; i < ib_size_dw; ++i)
 315		ib->ptr[i] = 0x0;
 316
 317	r = amdgpu_job_submit_direct(job, ring, &f);
 318	if (r)
 319		goto err;
 320
 321	if (fence)
 322		*fence = dma_fence_get(f);
 323	dma_fence_put(f);
 324	return 0;
 325
 326err:
 327	amdgpu_job_free(job);
 328	return r;
 329}
 330
 331/**
 332 * uvd_v7_0_enc_ring_test_ib - test if UVD ENC IBs are working
 333 *
 334 * @ring: the engine to test on
 335 * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
 336 *
 337 */
 338static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 339{
 340	struct dma_fence *fence = NULL;
 341	struct amdgpu_bo *bo = NULL;
 342	long r;
 343
 344	r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE,
 345				      AMDGPU_GEM_DOMAIN_VRAM,
 346				      &bo, NULL, NULL);
 347	if (r)
 348		return r;
 349
 350	r = uvd_v7_0_enc_get_create_msg(ring, 1, bo, NULL);
 351	if (r)
 352		goto error;
 353
 354	r = uvd_v7_0_enc_get_destroy_msg(ring, 1, bo, &fence);
 355	if (r)
 356		goto error;
 357
 358	r = dma_fence_wait_timeout(fence, false, timeout);
 359	if (r == 0)
 360		r = -ETIMEDOUT;
 361	else if (r > 0)
 362		r = 0;
 363
 364error:
 365	dma_fence_put(fence);
 366	amdgpu_bo_unpin(bo);
 367	amdgpu_bo_unreserve(bo);
 368	amdgpu_bo_unref(&bo);
 369	return r;
 370}
 371
 372static int uvd_v7_0_early_init(void *handle)
 373{
 374	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 375
 376	if (adev->asic_type == CHIP_VEGA20) {
 377		u32 harvest;
 378		int i;
 379
 380		adev->uvd.num_uvd_inst = UVD7_MAX_HW_INSTANCES_VEGA20;
 381		for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
 382			harvest = RREG32_SOC15(UVD, i, mmUVD_PG0_CC_UVD_HARVESTING);
 383			if (harvest & UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE_MASK) {
 384				adev->uvd.harvest_config |= 1 << i;
 385			}
 386		}
 387		if (adev->uvd.harvest_config == (AMDGPU_UVD_HARVEST_UVD0 |
 388						 AMDGPU_UVD_HARVEST_UVD1))
 389			/* both instances are harvested, disable the block */
 390			return -ENOENT;
 391	} else {
 392		adev->uvd.num_uvd_inst = 1;
 393	}
 394
 395	if (amdgpu_sriov_vf(adev))
 396		adev->uvd.num_enc_rings = 1;
 397	else
 398		adev->uvd.num_enc_rings = 2;
 399	uvd_v7_0_set_ring_funcs(adev);
 400	uvd_v7_0_set_enc_ring_funcs(adev);
 401	uvd_v7_0_set_irq_funcs(adev);
 402
 403	return 0;
 404}
 405
 406static int uvd_v7_0_sw_init(void *handle)
 407{
 408	struct amdgpu_ring *ring;
 409
 410	int i, j, r;
 411	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 412
 413	for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
 414		if (adev->uvd.harvest_config & (1 << j))
 415			continue;
 416		/* UVD TRAP */
 417		r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_uvds[j], UVD_7_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->uvd.inst[j].irq);
 418		if (r)
 419			return r;
 420
 421		/* UVD ENC TRAP */
 422		for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
 423			r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_uvds[j], i + UVD_7_0__SRCID__UVD_ENC_GEN_PURP, &adev->uvd.inst[j].irq);
 424			if (r)
 425				return r;
 426		}
 427	}
 428
 429	r = amdgpu_uvd_sw_init(adev);
 430	if (r)
 431		return r;
 432
 433	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
 434		const struct common_firmware_header *hdr;
 435		hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
 436		adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].ucode_id = AMDGPU_UCODE_ID_UVD;
 437		adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].fw = adev->uvd.fw;
 438		adev->firmware.fw_size +=
 439			ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
 440
 441		if (adev->uvd.num_uvd_inst == UVD7_MAX_HW_INSTANCES_VEGA20) {
 442			adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].ucode_id = AMDGPU_UCODE_ID_UVD1;
 443			adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].fw = adev->uvd.fw;
 444			adev->firmware.fw_size +=
 445				ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
 446		}
 447		DRM_INFO("PSP loading UVD firmware\n");
 448	}
 449
 450	for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
 451		if (adev->uvd.harvest_config & (1 << j))
 452			continue;
 453		if (!amdgpu_sriov_vf(adev)) {
 454			ring = &adev->uvd.inst[j].ring;
 455			sprintf(ring->name, "uvd_%d", ring->me);
 456			r = amdgpu_ring_init(adev, ring, 512,
 457					     &adev->uvd.inst[j].irq, 0,
 458					     AMDGPU_RING_PRIO_DEFAULT, NULL);
 459			if (r)
 460				return r;
 461		}
 462
 463		for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
 464			ring = &adev->uvd.inst[j].ring_enc[i];
 465			sprintf(ring->name, "uvd_enc_%d.%d", ring->me, i);
 466			if (amdgpu_sriov_vf(adev)) {
 467				ring->use_doorbell = true;
 468
 469				/* currently only use the first enconding ring for
 470				 * sriov, so set unused location for other unused rings.
 471				 */
 472				if (i == 0)
 473					ring->doorbell_index = adev->doorbell_index.uvd_vce.uvd_ring0_1 * 2;
 474				else
 475					ring->doorbell_index = adev->doorbell_index.uvd_vce.uvd_ring2_3 * 2 + 1;
 476			}
 477			r = amdgpu_ring_init(adev, ring, 512,
 478					     &adev->uvd.inst[j].irq, 0,
 479					     AMDGPU_RING_PRIO_DEFAULT, NULL);
 480			if (r)
 481				return r;
 482		}
 483	}
 484
 485	r = amdgpu_uvd_resume(adev);
 486	if (r)
 487		return r;
 488
 489	r = amdgpu_uvd_entity_init(adev);
 490	if (r)
 491		return r;
 492
 493	r = amdgpu_virt_alloc_mm_table(adev);
 494	if (r)
 495		return r;
 496
 497	return r;
 498}
 499
 500static int uvd_v7_0_sw_fini(void *handle)
 501{
 502	int i, j, r;
 503	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 504
 505	amdgpu_virt_free_mm_table(adev);
 506
 507	r = amdgpu_uvd_suspend(adev);
 508	if (r)
 509		return r;
 510
 511	for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
 512		if (adev->uvd.harvest_config & (1 << j))
 513			continue;
 514		for (i = 0; i < adev->uvd.num_enc_rings; ++i)
 515			amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]);
 516	}
 517	return amdgpu_uvd_sw_fini(adev);
 518}
 519
 520/**
 521 * uvd_v7_0_hw_init - start and test UVD block
 522 *
 523 * @handle: handle used to pass amdgpu_device pointer
 524 *
 525 * Initialize the hardware, boot up the VCPU and do some testing
 526 */
 527static int uvd_v7_0_hw_init(void *handle)
 528{
 529	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 530	struct amdgpu_ring *ring;
 531	uint32_t tmp;
 532	int i, j, r;
 533
 534	if (amdgpu_sriov_vf(adev))
 535		r = uvd_v7_0_sriov_start(adev);
 536	else
 537		r = uvd_v7_0_start(adev);
 538	if (r)
 539		goto done;
 540
 541	for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
 542		if (adev->uvd.harvest_config & (1 << j))
 543			continue;
 544		ring = &adev->uvd.inst[j].ring;
 545
 546		if (!amdgpu_sriov_vf(adev)) {
 547			r = amdgpu_ring_test_helper(ring);
 548			if (r)
 549				goto done;
 550
 551			r = amdgpu_ring_alloc(ring, 10);
 552			if (r) {
 553				DRM_ERROR("amdgpu: (%d)ring failed to lock UVD ring (%d).\n", j, r);
 554				goto done;
 555			}
 556
 557			tmp = PACKET0(SOC15_REG_OFFSET(UVD, j,
 558				mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL), 0);
 559			amdgpu_ring_write(ring, tmp);
 560			amdgpu_ring_write(ring, 0xFFFFF);
 561
 562			tmp = PACKET0(SOC15_REG_OFFSET(UVD, j,
 563				mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL), 0);
 564			amdgpu_ring_write(ring, tmp);
 565			amdgpu_ring_write(ring, 0xFFFFF);
 566
 567			tmp = PACKET0(SOC15_REG_OFFSET(UVD, j,
 568				mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL), 0);
 569			amdgpu_ring_write(ring, tmp);
 570			amdgpu_ring_write(ring, 0xFFFFF);
 571
 572			/* Clear timeout status bits */
 573			amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, j,
 574				mmUVD_SEMA_TIMEOUT_STATUS), 0));
 575			amdgpu_ring_write(ring, 0x8);
 576
 577			amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, j,
 578				mmUVD_SEMA_CNTL), 0));
 579			amdgpu_ring_write(ring, 3);
 580
 581			amdgpu_ring_commit(ring);
 582		}
 583
 584		for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
 585			ring = &adev->uvd.inst[j].ring_enc[i];
 586			r = amdgpu_ring_test_helper(ring);
 587			if (r)
 588				goto done;
 589		}
 590	}
 591done:
 592	if (!r)
 593		DRM_INFO("UVD and UVD ENC initialized successfully.\n");
 594
 595	return r;
 596}
 597
 598/**
 599 * uvd_v7_0_hw_fini - stop the hardware block
 600 *
 601 * @handle: handle used to pass amdgpu_device pointer
 602 *
 603 * Stop the UVD block, mark ring as not ready any more
 604 */
 605static int uvd_v7_0_hw_fini(void *handle)
 606{
 607	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 608
 609	if (!amdgpu_sriov_vf(adev))
 610		uvd_v7_0_stop(adev);
 611	else {
 612		/* full access mode, so don't touch any UVD register */
 613		DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
 614	}
 615
 616	return 0;
 617}
 618
 619static int uvd_v7_0_suspend(void *handle)
 620{
 621	int r;
 622	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 623
 624	r = uvd_v7_0_hw_fini(adev);
 625	if (r)
 626		return r;
 627
 628	return amdgpu_uvd_suspend(adev);
 629}
 630
 631static int uvd_v7_0_resume(void *handle)
 632{
 633	int r;
 634	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 635
 636	r = amdgpu_uvd_resume(adev);
 637	if (r)
 638		return r;
 639
 640	return uvd_v7_0_hw_init(adev);
 641}
 642
 643/**
 644 * uvd_v7_0_mc_resume - memory controller programming
 645 *
 646 * @adev: amdgpu_device pointer
 647 *
 648 * Let the UVD memory controller know it's offsets
 649 */
 650static void uvd_v7_0_mc_resume(struct amdgpu_device *adev)
 651{
 652	uint32_t size = AMDGPU_UVD_FIRMWARE_SIZE(adev);
 653	uint32_t offset;
 654	int i;
 655
 656	for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
 657		if (adev->uvd.harvest_config & (1 << i))
 658			continue;
 659		if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
 660			WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
 661				i == 0 ?
 662				adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_lo:
 663				adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].tmr_mc_addr_lo);
 664			WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
 665				i == 0 ?
 666				adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_hi:
 667				adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].tmr_mc_addr_hi);
 668			WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0, 0);
 669			offset = 0;
 670		} else {
 671			WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
 672				lower_32_bits(adev->uvd.inst[i].gpu_addr));
 673			WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
 674				upper_32_bits(adev->uvd.inst[i].gpu_addr));
 675			offset = size;
 676			WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0,
 677					AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
 678		}
 679
 680		WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE0, size);
 681
 682		WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
 683				lower_32_bits(adev->uvd.inst[i].gpu_addr + offset));
 684		WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
 685				upper_32_bits(adev->uvd.inst[i].gpu_addr + offset));
 686		WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET1, (1 << 21));
 687		WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_UVD_HEAP_SIZE);
 688
 689		WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
 690				lower_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
 691		WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
 692				upper_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
 693		WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET2, (2 << 21));
 694		WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE2,
 695				AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40));
 696
 697		WREG32_SOC15(UVD, i, mmUVD_UDEC_ADDR_CONFIG,
 698				adev->gfx.config.gb_addr_config);
 699		WREG32_SOC15(UVD, i, mmUVD_UDEC_DB_ADDR_CONFIG,
 700				adev->gfx.config.gb_addr_config);
 701		WREG32_SOC15(UVD, i, mmUVD_UDEC_DBW_ADDR_CONFIG,
 702				adev->gfx.config.gb_addr_config);
 703
 704		WREG32_SOC15(UVD, i, mmUVD_GP_SCRATCH4, adev->uvd.max_handles);
 705	}
 706}
 707
 708static int uvd_v7_0_mmsch_start(struct amdgpu_device *adev,
 709				struct amdgpu_mm_table *table)
 710{
 711	uint32_t data = 0, loop;
 712	uint64_t addr = table->gpu_addr;
 713	struct mmsch_v1_0_init_header *header = (struct mmsch_v1_0_init_header *)table->cpu_addr;
 714	uint32_t size;
 715	int i;
 716
 717	size = header->header_size + header->vce_table_size + header->uvd_table_size;
 718
 719	/* 1, write to vce_mmsch_vf_ctx_addr_lo/hi register with GPU mc addr of memory descriptor location */
 720	WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_CTX_ADDR_LO, lower_32_bits(addr));
 721	WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_CTX_ADDR_HI, upper_32_bits(addr));
 722
 723	/* 2, update vmid of descriptor */
 724	data = RREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_VMID);
 725	data &= ~VCE_MMSCH_VF_VMID__VF_CTX_VMID_MASK;
 726	data |= (0 << VCE_MMSCH_VF_VMID__VF_CTX_VMID__SHIFT); /* use domain0 for MM scheduler */
 727	WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_VMID, data);
 728
 729	/* 3, notify mmsch about the size of this descriptor */
 730	WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_CTX_SIZE, size);
 731
 732	/* 4, set resp to zero */
 733	WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP, 0);
 734
 735	for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
 736		if (adev->uvd.harvest_config & (1 << i))
 737			continue;
 738		WDOORBELL32(adev->uvd.inst[i].ring_enc[0].doorbell_index, 0);
 739		adev->wb.wb[adev->uvd.inst[i].ring_enc[0].wptr_offs] = 0;
 740		adev->uvd.inst[i].ring_enc[0].wptr = 0;
 741		adev->uvd.inst[i].ring_enc[0].wptr_old = 0;
 742	}
 743	/* 5, kick off the initialization and wait until VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero */
 744	WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_HOST, 0x10000001);
 745
 746	data = RREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP);
 747	loop = 1000;
 748	while ((data & 0x10000002) != 0x10000002) {
 749		udelay(10);
 750		data = RREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP);
 751		loop--;
 752		if (!loop)
 753			break;
 754	}
 755
 756	if (!loop) {
 757		dev_err(adev->dev, "failed to init MMSCH, mmVCE_MMSCH_VF_MAILBOX_RESP = %x\n", data);
 758		return -EBUSY;
 759	}
 760
 761	return 0;
 762}
 763
 764static int uvd_v7_0_sriov_start(struct amdgpu_device *adev)
 765{
 766	struct amdgpu_ring *ring;
 767	uint32_t offset, size, tmp;
 768	uint32_t table_size = 0;
 769	struct mmsch_v1_0_cmd_direct_write direct_wt = { {0} };
 770	struct mmsch_v1_0_cmd_direct_read_modify_write direct_rd_mod_wt = { {0} };
 771	struct mmsch_v1_0_cmd_direct_polling direct_poll = { {0} };
 772	struct mmsch_v1_0_cmd_end end = { {0} };
 773	uint32_t *init_table = adev->virt.mm_table.cpu_addr;
 774	struct mmsch_v1_0_init_header *header = (struct mmsch_v1_0_init_header *)init_table;
 775	uint8_t i = 0;
 776
 777	direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE;
 778	direct_rd_mod_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
 779	direct_poll.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_POLLING;
 780	end.cmd_header.command_type = MMSCH_COMMAND__END;
 781
 782	if (header->uvd_table_offset == 0 && header->uvd_table_size == 0) {
 783		header->version = MMSCH_VERSION;
 784		header->header_size = sizeof(struct mmsch_v1_0_init_header) >> 2;
 785
 786		if (header->vce_table_offset == 0 && header->vce_table_size == 0)
 787			header->uvd_table_offset = header->header_size;
 788		else
 789			header->uvd_table_offset = header->vce_table_size + header->vce_table_offset;
 790
 791		init_table += header->uvd_table_offset;
 792
 793		for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
 794			if (adev->uvd.harvest_config & (1 << i))
 795				continue;
 796			ring = &adev->uvd.inst[i].ring;
 797			ring->wptr = 0;
 798			size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4);
 799
 800			MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS),
 801							   0xFFFFFFFF, 0x00000004);
 802			/* mc resume*/
 803			if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
 804				MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i,
 805							mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
 806							adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_lo);
 807				MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i,
 808							mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
 809							adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_hi);
 810				MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0);
 811				offset = 0;
 812			} else {
 813				MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
 814							    lower_32_bits(adev->uvd.inst[i].gpu_addr));
 815				MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
 816							    upper_32_bits(adev->uvd.inst[i].gpu_addr));
 817				offset = size;
 818				MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0),
 819							AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
 820
 821			}
 822
 823			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE0), size);
 824
 825			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
 826						    lower_32_bits(adev->uvd.inst[i].gpu_addr + offset));
 827			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
 828						    upper_32_bits(adev->uvd.inst[i].gpu_addr + offset));
 829			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET1), (1 << 21));
 830			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_UVD_HEAP_SIZE);
 831
 832			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
 833						    lower_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
 834			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
 835						    upper_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
 836			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET2), (2 << 21));
 837			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE2),
 838						    AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40));
 839
 840			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_GP_SCRATCH4), adev->uvd.max_handles);
 841			/* mc resume end*/
 842
 843			/* disable clock gating */
 844			MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_CGC_CTRL),
 845							   ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK, 0);
 846
 847			/* disable interupt */
 848			MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN),
 849							   ~UVD_MASTINT_EN__VCPU_EN_MASK, 0);
 850
 851			/* stall UMC and register bus before resetting VCPU */
 852			MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2),
 853							   ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
 854							   UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
 855
 856			/* put LMI, VCPU, RBC etc... into reset */
 857			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_SOFT_RESET),
 858						    (uint32_t)(UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
 859							       UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
 860							       UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
 861							       UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
 862							       UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
 863							       UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
 864							       UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
 865							       UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK));
 866
 867			/* initialize UVD memory controller */
 868			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL),
 869						    (uint32_t)((0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
 870							       UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
 871							       UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
 872							       UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
 873							       UVD_LMI_CTRL__REQ_MODE_MASK |
 874							       0x00100000L));
 875
 876			/* take all subblocks out of reset, except VCPU */
 877			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_SOFT_RESET),
 878						    UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
 879
 880			/* enable VCPU clock */
 881			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL),
 882						    UVD_VCPU_CNTL__CLK_EN_MASK);
 883
 884			/* enable master interrupt */
 885			MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN),
 886							   ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
 887							   (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
 888
 889			/* clear the bit 4 of UVD_STATUS */
 890			MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS),
 891							   ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT), 0);
 892
 893			/* force RBC into idle state */
 894			size = order_base_2(ring->ring_size);
 895			tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, size);
 896			tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
 897			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RBC_RB_CNTL), tmp);
 898
 899			ring = &adev->uvd.inst[i].ring_enc[0];
 900			ring->wptr = 0;
 901			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_LO), ring->gpu_addr);
 902			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_HI), upper_32_bits(ring->gpu_addr));
 903			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_SIZE), ring->ring_size / 4);
 904
 905			/* boot up the VCPU */
 906			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_SOFT_RESET), 0);
 907
 908			/* enable UMC */
 909			MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2),
 910											   ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, 0);
 911
 912			MMSCH_V1_0_INSERT_DIRECT_POLL(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS), 0x02, 0x02);
 913		}
 914		/* add end packet */
 915		memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end));
 916		table_size += sizeof(struct mmsch_v1_0_cmd_end) / 4;
 917		header->uvd_table_size = table_size;
 918
 919	}
 920	return uvd_v7_0_mmsch_start(adev, &adev->virt.mm_table);
 921}
 922
 923/**
 924 * uvd_v7_0_start - start UVD block
 925 *
 926 * @adev: amdgpu_device pointer
 927 *
 928 * Setup and start the UVD block
 929 */
 930static int uvd_v7_0_start(struct amdgpu_device *adev)
 931{
 932	struct amdgpu_ring *ring;
 933	uint32_t rb_bufsz, tmp;
 934	uint32_t lmi_swap_cntl;
 935	uint32_t mp_swap_cntl;
 936	int i, j, k, r;
 937
 938	for (k = 0; k < adev->uvd.num_uvd_inst; ++k) {
 939		if (adev->uvd.harvest_config & (1 << k))
 940			continue;
 941		/* disable DPG */
 942		WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_POWER_STATUS), 0,
 943				~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
 944	}
 945
 946	/* disable byte swapping */
 947	lmi_swap_cntl = 0;
 948	mp_swap_cntl = 0;
 949
 950	uvd_v7_0_mc_resume(adev);
 951
 952	for (k = 0; k < adev->uvd.num_uvd_inst; ++k) {
 953		if (adev->uvd.harvest_config & (1 << k))
 954			continue;
 955		ring = &adev->uvd.inst[k].ring;
 956		/* disable clock gating */
 957		WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_CGC_CTRL), 0,
 958				~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK);
 959
 960		/* disable interupt */
 961		WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_MASTINT_EN), 0,
 962				~UVD_MASTINT_EN__VCPU_EN_MASK);
 963
 964		/* stall UMC and register bus before resetting VCPU */
 965		WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_LMI_CTRL2),
 966				UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
 967				~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
 968		mdelay(1);
 969
 970		/* put LMI, VCPU, RBC etc... into reset */
 971		WREG32_SOC15(UVD, k, mmUVD_SOFT_RESET,
 972			UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
 973			UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
 974			UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
 975			UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
 976			UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
 977			UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
 978			UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
 979			UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
 980		mdelay(5);
 981
 982		/* initialize UVD memory controller */
 983		WREG32_SOC15(UVD, k, mmUVD_LMI_CTRL,
 984			(0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
 985			UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
 986			UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
 987			UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
 988			UVD_LMI_CTRL__REQ_MODE_MASK |
 989			0x00100000L);
 990
 991#ifdef __BIG_ENDIAN
 992		/* swap (8 in 32) RB and IB */
 993		lmi_swap_cntl = 0xa;
 994		mp_swap_cntl = 0;
 995#endif
 996		WREG32_SOC15(UVD, k, mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
 997		WREG32_SOC15(UVD, k, mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
 998
 999		WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXA0, 0x40c2040);
1000		WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXA1, 0x0);
1001		WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXB0, 0x40c2040);
1002		WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXB1, 0x0);
1003		WREG32_SOC15(UVD, k, mmUVD_MPC_SET_ALU, 0);
1004		WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUX, 0x88);
1005
1006		/* take all subblocks out of reset, except VCPU */
1007		WREG32_SOC15(UVD, k, mmUVD_SOFT_RESET,
1008				UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1009		mdelay(5);
1010
1011		/* enable VCPU clock */
1012		WREG32_SOC15(UVD, k, mmUVD_VCPU_CNTL,
1013				UVD_VCPU_CNTL__CLK_EN_MASK);
1014
1015		/* enable UMC */
1016		WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_LMI_CTRL2), 0,
1017				~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1018
1019		/* boot up the VCPU */
1020		WREG32_SOC15(UVD, k, mmUVD_SOFT_RESET, 0);
1021		mdelay(10);
1022
1023		for (i = 0; i < 10; ++i) {
1024			uint32_t status;
1025
1026			for (j = 0; j < 100; ++j) {
1027				status = RREG32_SOC15(UVD, k, mmUVD_STATUS);
1028				if (status & 2)
1029					break;
1030				mdelay(10);
1031			}
1032			r = 0;
1033			if (status & 2)
1034				break;
1035
1036			DRM_ERROR("UVD(%d) not responding, trying to reset the VCPU!!!\n", k);
1037			WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_SOFT_RESET),
1038					UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
1039					~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1040			mdelay(10);
1041			WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_SOFT_RESET), 0,
1042					~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1043			mdelay(10);
1044			r = -1;
1045		}
1046
1047		if (r) {
1048			DRM_ERROR("UVD(%d) not responding, giving up!!!\n", k);
1049			return r;
1050		}
1051		/* enable master interrupt */
1052		WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_MASTINT_EN),
1053			(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
1054			~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
1055
1056		/* clear the bit 4 of UVD_STATUS */
1057		WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_STATUS), 0,
1058				~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
1059
1060		/* force RBC into idle state */
1061		rb_bufsz = order_base_2(ring->ring_size);
1062		tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
1063		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1064		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1065		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
1066		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1067		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1068		WREG32_SOC15(UVD, k, mmUVD_RBC_RB_CNTL, tmp);
1069
1070		/* set the write pointer delay */
1071		WREG32_SOC15(UVD, k, mmUVD_RBC_RB_WPTR_CNTL, 0);
1072
1073		/* set the wb address */
1074		WREG32_SOC15(UVD, k, mmUVD_RBC_RB_RPTR_ADDR,
1075				(upper_32_bits(ring->gpu_addr) >> 2));
1076
1077		/* program the RB_BASE for ring buffer */
1078		WREG32_SOC15(UVD, k, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
1079				lower_32_bits(ring->gpu_addr));
1080		WREG32_SOC15(UVD, k, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
1081				upper_32_bits(ring->gpu_addr));
1082
1083		/* Initialize the ring buffer's read and write pointers */
1084		WREG32_SOC15(UVD, k, mmUVD_RBC_RB_RPTR, 0);
1085
1086		ring->wptr = RREG32_SOC15(UVD, k, mmUVD_RBC_RB_RPTR);
1087		WREG32_SOC15(UVD, k, mmUVD_RBC_RB_WPTR,
1088				lower_32_bits(ring->wptr));
1089
1090		WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_RBC_RB_CNTL), 0,
1091				~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
1092
1093		ring = &adev->uvd.inst[k].ring_enc[0];
1094		WREG32_SOC15(UVD, k, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1095		WREG32_SOC15(UVD, k, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1096		WREG32_SOC15(UVD, k, mmUVD_RB_BASE_LO, ring->gpu_addr);
1097		WREG32_SOC15(UVD, k, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1098		WREG32_SOC15(UVD, k, mmUVD_RB_SIZE, ring->ring_size / 4);
1099
1100		ring = &adev->uvd.inst[k].ring_enc[1];
1101		WREG32_SOC15(UVD, k, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1102		WREG32_SOC15(UVD, k, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1103		WREG32_SOC15(UVD, k, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1104		WREG32_SOC15(UVD, k, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1105		WREG32_SOC15(UVD, k, mmUVD_RB_SIZE2, ring->ring_size / 4);
1106	}
1107	return 0;
1108}
1109
1110/**
1111 * uvd_v7_0_stop - stop UVD block
1112 *
1113 * @adev: amdgpu_device pointer
1114 *
1115 * stop the UVD block
1116 */
1117static void uvd_v7_0_stop(struct amdgpu_device *adev)
1118{
1119	uint8_t i = 0;
1120
1121	for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
1122		if (adev->uvd.harvest_config & (1 << i))
1123			continue;
1124		/* force RBC into idle state */
1125		WREG32_SOC15(UVD, i, mmUVD_RBC_RB_CNTL, 0x11010101);
1126
1127		/* Stall UMC and register bus before resetting VCPU */
1128		WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2),
1129				UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
1130				~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1131		mdelay(1);
1132
1133		/* put VCPU into reset */
1134		WREG32_SOC15(UVD, i, mmUVD_SOFT_RESET,
1135				UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1136		mdelay(5);
1137
1138		/* disable VCPU clock */
1139		WREG32_SOC15(UVD, i, mmUVD_VCPU_CNTL, 0x0);
1140
1141		/* Unstall UMC and register bus */
1142		WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2), 0,
1143				~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1144	}
1145}
1146
1147/**
1148 * uvd_v7_0_ring_emit_fence - emit an fence & trap command
1149 *
1150 * @ring: amdgpu_ring pointer
1151 * @addr: address
1152 * @seq: sequence number
1153 * @flags: fence related flags
1154 *
1155 * Write a fence and a trap command to the ring.
1156 */
1157static void uvd_v7_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
1158				     unsigned flags)
1159{
1160	struct amdgpu_device *adev = ring->adev;
1161
1162	WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1163
1164	amdgpu_ring_write(ring,
1165		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_CONTEXT_ID), 0));
1166	amdgpu_ring_write(ring, seq);
1167	amdgpu_ring_write(ring,
1168		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
1169	amdgpu_ring_write(ring, addr & 0xffffffff);
1170	amdgpu_ring_write(ring,
1171		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
1172	amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
1173	amdgpu_ring_write(ring,
1174		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
1175	amdgpu_ring_write(ring, 0);
1176
1177	amdgpu_ring_write(ring,
1178		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
1179	amdgpu_ring_write(ring, 0);
1180	amdgpu_ring_write(ring,
1181		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
1182	amdgpu_ring_write(ring, 0);
1183	amdgpu_ring_write(ring,
1184		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
1185	amdgpu_ring_write(ring, 2);
1186}
1187
1188/**
1189 * uvd_v7_0_enc_ring_emit_fence - emit an enc fence & trap command
1190 *
1191 * @ring: amdgpu_ring pointer
1192 * @addr: address
1193 * @seq: sequence number
1194 * @flags: fence related flags
1195 *
1196 * Write enc a fence and a trap command to the ring.
1197 */
1198static void uvd_v7_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
1199			u64 seq, unsigned flags)
1200{
1201
1202	WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1203
1204	amdgpu_ring_write(ring, HEVC_ENC_CMD_FENCE);
1205	amdgpu_ring_write(ring, addr);
1206	amdgpu_ring_write(ring, upper_32_bits(addr));
1207	amdgpu_ring_write(ring, seq);
1208	amdgpu_ring_write(ring, HEVC_ENC_CMD_TRAP);
1209}
1210
1211/**
1212 * uvd_v7_0_ring_emit_hdp_flush - skip HDP flushing
1213 *
1214 * @ring: amdgpu_ring pointer
1215 */
1216static void uvd_v7_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
1217{
1218	/* The firmware doesn't seem to like touching registers at this point. */
1219}
1220
1221/**
1222 * uvd_v7_0_ring_test_ring - register write test
1223 *
1224 * @ring: amdgpu_ring pointer
1225 *
1226 * Test if we can successfully write to the context register
1227 */
1228static int uvd_v7_0_ring_test_ring(struct amdgpu_ring *ring)
1229{
1230	struct amdgpu_device *adev = ring->adev;
1231	uint32_t tmp = 0;
1232	unsigned i;
1233	int r;
1234
1235	WREG32_SOC15(UVD, ring->me, mmUVD_CONTEXT_ID, 0xCAFEDEAD);
1236	r = amdgpu_ring_alloc(ring, 3);
1237	if (r)
1238		return r;
1239
1240	amdgpu_ring_write(ring,
1241		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_CONTEXT_ID), 0));
1242	amdgpu_ring_write(ring, 0xDEADBEEF);
1243	amdgpu_ring_commit(ring);
1244	for (i = 0; i < adev->usec_timeout; i++) {
1245		tmp = RREG32_SOC15(UVD, ring->me, mmUVD_CONTEXT_ID);
1246		if (tmp == 0xDEADBEEF)
1247			break;
1248		udelay(1);
1249	}
1250
1251	if (i >= adev->usec_timeout)
1252		r = -ETIMEDOUT;
1253
1254	return r;
1255}
1256
1257/**
1258 * uvd_v7_0_ring_patch_cs_in_place - Patch the IB for command submission.
1259 *
1260 * @p: the CS parser with the IBs
1261 * @ib_idx: which IB to patch
1262 *
1263 */
1264static int uvd_v7_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
1265					   uint32_t ib_idx)
1266{
1267	struct amdgpu_ring *ring = to_amdgpu_ring(p->entity->rq->sched);
1268	struct amdgpu_ib *ib = &p->job->ibs[ib_idx];
1269	unsigned i;
1270
1271	/* No patching necessary for the first instance */
1272	if (!ring->me)
1273		return 0;
1274
1275	for (i = 0; i < ib->length_dw; i += 2) {
1276		uint32_t reg = amdgpu_get_ib_value(p, ib_idx, i);
1277
1278		reg -= p->adev->reg_offset[UVD_HWIP][0][1];
1279		reg += p->adev->reg_offset[UVD_HWIP][1][1];
1280
1281		amdgpu_set_ib_value(p, ib_idx, i, reg);
1282	}
1283	return 0;
1284}
1285
1286/**
1287 * uvd_v7_0_ring_emit_ib - execute indirect buffer
1288 *
1289 * @ring: amdgpu_ring pointer
1290 * @job: job to retrieve vmid from
1291 * @ib: indirect buffer to execute
1292 * @flags: unused
1293 *
1294 * Write ring commands to execute the indirect buffer
1295 */
1296static void uvd_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
1297				  struct amdgpu_job *job,
1298				  struct amdgpu_ib *ib,
1299				  uint32_t flags)
1300{
1301	struct amdgpu_device *adev = ring->adev;
1302	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1303
1304	amdgpu_ring_write(ring,
1305		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_VMID), 0));
1306	amdgpu_ring_write(ring, vmid);
1307
1308	amdgpu_ring_write(ring,
1309		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_64BIT_BAR_LOW), 0));
1310	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1311	amdgpu_ring_write(ring,
1312		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH), 0));
1313	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1314	amdgpu_ring_write(ring,
1315		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_RBC_IB_SIZE), 0));
1316	amdgpu_ring_write(ring, ib->length_dw);
1317}
1318
1319/**
1320 * uvd_v7_0_enc_ring_emit_ib - enc execute indirect buffer
1321 *
1322 * @ring: amdgpu_ring pointer
1323 * @job: job to retrive vmid from
1324 * @ib: indirect buffer to execute
1325 * @flags: unused
1326 *
1327 * Write enc ring commands to execute the indirect buffer
1328 */
1329static void uvd_v7_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
1330					struct amdgpu_job *job,
1331					struct amdgpu_ib *ib,
1332					uint32_t flags)
1333{
1334	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1335
1336	amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM);
1337	amdgpu_ring_write(ring, vmid);
1338	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1339	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1340	amdgpu_ring_write(ring, ib->length_dw);
1341}
1342
1343static void uvd_v7_0_ring_emit_wreg(struct amdgpu_ring *ring,
1344				    uint32_t reg, uint32_t val)
1345{
1346	struct amdgpu_device *adev = ring->adev;
1347
1348	amdgpu_ring_write(ring,
1349		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
1350	amdgpu_ring_write(ring, reg << 2);
1351	amdgpu_ring_write(ring,
1352		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
1353	amdgpu_ring_write(ring, val);
1354	amdgpu_ring_write(ring,
1355		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
1356	amdgpu_ring_write(ring, 8);
1357}
1358
1359static void uvd_v7_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
1360					uint32_t val, uint32_t mask)
1361{
1362	struct amdgpu_device *adev = ring->adev;
1363
1364	amdgpu_ring_write(ring,
1365		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
1366	amdgpu_ring_write(ring, reg << 2);
1367	amdgpu_ring_write(ring,
1368		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
1369	amdgpu_ring_write(ring, val);
1370	amdgpu_ring_write(ring,
1371		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GP_SCRATCH8), 0));
1372	amdgpu_ring_write(ring, mask);
1373	amdgpu_ring_write(ring,
1374		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
1375	amdgpu_ring_write(ring, 12);
1376}
1377
1378static void uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
1379					unsigned vmid, uint64_t pd_addr)
1380{
1381	struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
1382	uint32_t data0, data1, mask;
1383
1384	pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1385
1386	/* wait for reg writes */
1387	data0 = hub->ctx0_ptb_addr_lo32 + vmid * hub->ctx_addr_distance;
1388	data1 = lower_32_bits(pd_addr);
1389	mask = 0xffffffff;
1390	uvd_v7_0_ring_emit_reg_wait(ring, data0, data1, mask);
1391}
1392
1393static void uvd_v7_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
1394{
1395	struct amdgpu_device *adev = ring->adev;
1396	int i;
1397
1398	WARN_ON(ring->wptr % 2 || count % 2);
1399
1400	for (i = 0; i < count / 2; i++) {
1401		amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_NO_OP), 0));
1402		amdgpu_ring_write(ring, 0);
1403	}
1404}
1405
1406static void uvd_v7_0_enc_ring_insert_end(struct amdgpu_ring *ring)
1407{
1408	amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
1409}
1410
1411static void uvd_v7_0_enc_ring_emit_reg_wait(struct amdgpu_ring *ring,
1412					    uint32_t reg, uint32_t val,
1413					    uint32_t mask)
1414{
1415	amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WAIT);
1416	amdgpu_ring_write(ring,	reg << 2);
1417	amdgpu_ring_write(ring, mask);
1418	amdgpu_ring_write(ring, val);
1419}
1420
1421static void uvd_v7_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
1422					    unsigned int vmid, uint64_t pd_addr)
1423{
1424	struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
1425
1426	pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1427
1428	/* wait for reg writes */
1429	uvd_v7_0_enc_ring_emit_reg_wait(ring, hub->ctx0_ptb_addr_lo32 +
1430					vmid * hub->ctx_addr_distance,
1431					lower_32_bits(pd_addr), 0xffffffff);
1432}
1433
1434static void uvd_v7_0_enc_ring_emit_wreg(struct amdgpu_ring *ring,
1435					uint32_t reg, uint32_t val)
1436{
1437	amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE);
1438	amdgpu_ring_write(ring,	reg << 2);
1439	amdgpu_ring_write(ring, val);
1440}
1441
1442#if 0
1443static bool uvd_v7_0_is_idle(void *handle)
1444{
1445	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1446
1447	return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
1448}
1449
1450static int uvd_v7_0_wait_for_idle(void *handle)
1451{
1452	unsigned i;
1453	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1454
1455	for (i = 0; i < adev->usec_timeout; i++) {
1456		if (uvd_v7_0_is_idle(handle))
1457			return 0;
1458	}
1459	return -ETIMEDOUT;
1460}
1461
1462#define AMDGPU_UVD_STATUS_BUSY_MASK    0xfd
1463static bool uvd_v7_0_check_soft_reset(void *handle)
1464{
1465	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1466	u32 srbm_soft_reset = 0;
1467	u32 tmp = RREG32(mmSRBM_STATUS);
1468
1469	if (REG_GET_FIELD(tmp, SRBM_STATUS, UVD_RQ_PENDING) ||
1470	    REG_GET_FIELD(tmp, SRBM_STATUS, UVD_BUSY) ||
1471	    (RREG32_SOC15(UVD, ring->me, mmUVD_STATUS) &
1472		    AMDGPU_UVD_STATUS_BUSY_MASK))
1473		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1474				SRBM_SOFT_RESET, SOFT_RESET_UVD, 1);
1475
1476	if (srbm_soft_reset) {
1477		adev->uvd.inst[ring->me].srbm_soft_reset = srbm_soft_reset;
1478		return true;
1479	} else {
1480		adev->uvd.inst[ring->me].srbm_soft_reset = 0;
1481		return false;
1482	}
1483}
1484
1485static int uvd_v7_0_pre_soft_reset(void *handle)
1486{
1487	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1488
1489	if (!adev->uvd.inst[ring->me].srbm_soft_reset)
1490		return 0;
1491
1492	uvd_v7_0_stop(adev);
1493	return 0;
1494}
1495
1496static int uvd_v7_0_soft_reset(void *handle)
1497{
1498	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1499	u32 srbm_soft_reset;
1500
1501	if (!adev->uvd.inst[ring->me].srbm_soft_reset)
1502		return 0;
1503	srbm_soft_reset = adev->uvd.inst[ring->me].srbm_soft_reset;
1504
1505	if (srbm_soft_reset) {
1506		u32 tmp;
1507
1508		tmp = RREG32(mmSRBM_SOFT_RESET);
1509		tmp |= srbm_soft_reset;
1510		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1511		WREG32(mmSRBM_SOFT_RESET, tmp);
1512		tmp = RREG32(mmSRBM_SOFT_RESET);
1513
1514		udelay(50);
1515
1516		tmp &= ~srbm_soft_reset;
1517		WREG32(mmSRBM_SOFT_RESET, tmp);
1518		tmp = RREG32(mmSRBM_SOFT_RESET);
1519
1520		/* Wait a little for things to settle down */
1521		udelay(50);
1522	}
1523
1524	return 0;
1525}
1526
1527static int uvd_v7_0_post_soft_reset(void *handle)
1528{
1529	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1530
1531	if (!adev->uvd.inst[ring->me].srbm_soft_reset)
1532		return 0;
1533
1534	mdelay(5);
1535
1536	return uvd_v7_0_start(adev);
1537}
1538#endif
1539
1540static int uvd_v7_0_set_interrupt_state(struct amdgpu_device *adev,
1541					struct amdgpu_irq_src *source,
1542					unsigned type,
1543					enum amdgpu_interrupt_state state)
1544{
1545	// TODO
1546	return 0;
1547}
1548
1549static int uvd_v7_0_process_interrupt(struct amdgpu_device *adev,
1550				      struct amdgpu_irq_src *source,
1551				      struct amdgpu_iv_entry *entry)
1552{
1553	uint32_t ip_instance;
1554
1555	switch (entry->client_id) {
1556	case SOC15_IH_CLIENTID_UVD:
1557		ip_instance = 0;
1558		break;
1559	case SOC15_IH_CLIENTID_UVD1:
1560		ip_instance = 1;
1561		break;
1562	default:
1563		DRM_ERROR("Unhandled client id: %d\n", entry->client_id);
1564		return 0;
1565	}
1566
1567	DRM_DEBUG("IH: UVD TRAP\n");
1568
1569	switch (entry->src_id) {
1570	case 124:
1571		amdgpu_fence_process(&adev->uvd.inst[ip_instance].ring);
1572		break;
1573	case 119:
1574		amdgpu_fence_process(&adev->uvd.inst[ip_instance].ring_enc[0]);
1575		break;
1576	case 120:
1577		if (!amdgpu_sriov_vf(adev))
1578			amdgpu_fence_process(&adev->uvd.inst[ip_instance].ring_enc[1]);
1579		break;
1580	default:
1581		DRM_ERROR("Unhandled interrupt: %d %d\n",
1582			  entry->src_id, entry->src_data[0]);
1583		break;
1584	}
1585
1586	return 0;
1587}
1588
1589#if 0
1590static void uvd_v7_0_set_sw_clock_gating(struct amdgpu_device *adev)
1591{
1592	uint32_t data, data1, data2, suvd_flags;
1593
1594	data = RREG32_SOC15(UVD, ring->me, mmUVD_CGC_CTRL);
1595	data1 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE);
1596	data2 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_CTRL);
1597
1598	data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK |
1599		  UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
1600
1601	suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
1602		     UVD_SUVD_CGC_GATE__SIT_MASK |
1603		     UVD_SUVD_CGC_GATE__SMP_MASK |
1604		     UVD_SUVD_CGC_GATE__SCM_MASK |
1605		     UVD_SUVD_CGC_GATE__SDB_MASK;
1606
1607	data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
1608		(1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) |
1609		(4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY));
1610
1611	data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
1612			UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
1613			UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
1614			UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
1615			UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
1616			UVD_CGC_CTRL__SYS_MODE_MASK |
1617			UVD_CGC_CTRL__UDEC_MODE_MASK |
1618			UVD_CGC_CTRL__MPEG2_MODE_MASK |
1619			UVD_CGC_CTRL__REGS_MODE_MASK |
1620			UVD_CGC_CTRL__RBC_MODE_MASK |
1621			UVD_CGC_CTRL__LMI_MC_MODE_MASK |
1622			UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
1623			UVD_CGC_CTRL__IDCT_MODE_MASK |
1624			UVD_CGC_CTRL__MPRD_MODE_MASK |
1625			UVD_CGC_CTRL__MPC_MODE_MASK |
1626			UVD_CGC_CTRL__LBSI_MODE_MASK |
1627			UVD_CGC_CTRL__LRBBM_MODE_MASK |
1628			UVD_CGC_CTRL__WCB_MODE_MASK |
1629			UVD_CGC_CTRL__VCPU_MODE_MASK |
1630			UVD_CGC_CTRL__JPEG_MODE_MASK |
1631			UVD_CGC_CTRL__JPEG2_MODE_MASK |
1632			UVD_CGC_CTRL__SCPU_MODE_MASK);
1633	data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK |
1634			UVD_SUVD_CGC_CTRL__SIT_MODE_MASK |
1635			UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
1636			UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
1637			UVD_SUVD_CGC_CTRL__SDB_MODE_MASK);
1638	data1 |= suvd_flags;
1639
1640	WREG32_SOC15(UVD, ring->me, mmUVD_CGC_CTRL, data);
1641	WREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE, 0);
1642	WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE, data1);
1643	WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_CTRL, data2);
1644}
1645
1646static void uvd_v7_0_set_hw_clock_gating(struct amdgpu_device *adev)
1647{
1648	uint32_t data, data1, cgc_flags, suvd_flags;
1649
1650	data = RREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE);
1651	data1 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE);
1652
1653	cgc_flags = UVD_CGC_GATE__SYS_MASK |
1654		UVD_CGC_GATE__UDEC_MASK |
1655		UVD_CGC_GATE__MPEG2_MASK |
1656		UVD_CGC_GATE__RBC_MASK |
1657		UVD_CGC_GATE__LMI_MC_MASK |
1658		UVD_CGC_GATE__IDCT_MASK |
1659		UVD_CGC_GATE__MPRD_MASK |
1660		UVD_CGC_GATE__MPC_MASK |
1661		UVD_CGC_GATE__LBSI_MASK |
1662		UVD_CGC_GATE__LRBBM_MASK |
1663		UVD_CGC_GATE__UDEC_RE_MASK |
1664		UVD_CGC_GATE__UDEC_CM_MASK |
1665		UVD_CGC_GATE__UDEC_IT_MASK |
1666		UVD_CGC_GATE__UDEC_DB_MASK |
1667		UVD_CGC_GATE__UDEC_MP_MASK |
1668		UVD_CGC_GATE__WCB_MASK |
1669		UVD_CGC_GATE__VCPU_MASK |
1670		UVD_CGC_GATE__SCPU_MASK |
1671		UVD_CGC_GATE__JPEG_MASK |
1672		UVD_CGC_GATE__JPEG2_MASK;
1673
1674	suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
1675				UVD_SUVD_CGC_GATE__SIT_MASK |
1676				UVD_SUVD_CGC_GATE__SMP_MASK |
1677				UVD_SUVD_CGC_GATE__SCM_MASK |
1678				UVD_SUVD_CGC_GATE__SDB_MASK;
1679
1680	data |= cgc_flags;
1681	data1 |= suvd_flags;
1682
1683	WREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE, data);
1684	WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE, data1);
1685}
1686
1687static void uvd_v7_0_set_bypass_mode(struct amdgpu_device *adev, bool enable)
1688{
1689	u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
1690
1691	if (enable)
1692		tmp |= (GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
1693			GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
1694	else
1695		tmp &= ~(GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
1696			 GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
1697
1698	WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp);
1699}
1700
1701
1702static int uvd_v7_0_set_clockgating_state(void *handle,
1703					  enum amd_clockgating_state state)
1704{
1705	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1706	bool enable = (state == AMD_CG_STATE_GATE);
1707
1708	uvd_v7_0_set_bypass_mode(adev, enable);
1709
1710	if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
1711		return 0;
1712
1713	if (enable) {
1714		/* disable HW gating and enable Sw gating */
1715		uvd_v7_0_set_sw_clock_gating(adev);
1716	} else {
1717		/* wait for STATUS to clear */
1718		if (uvd_v7_0_wait_for_idle(handle))
1719			return -EBUSY;
1720
1721		/* enable HW gates because UVD is idle */
1722		/* uvd_v7_0_set_hw_clock_gating(adev); */
1723	}
1724
1725	return 0;
1726}
1727
1728static int uvd_v7_0_set_powergating_state(void *handle,
1729					  enum amd_powergating_state state)
1730{
1731	/* This doesn't actually powergate the UVD block.
1732	 * That's done in the dpm code via the SMC.  This
1733	 * just re-inits the block as necessary.  The actual
1734	 * gating still happens in the dpm code.  We should
1735	 * revisit this when there is a cleaner line between
1736	 * the smc and the hw blocks
1737	 */
1738	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1739
1740	if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
1741		return 0;
1742
1743	WREG32_SOC15(UVD, ring->me, mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK);
1744
1745	if (state == AMD_PG_STATE_GATE) {
1746		uvd_v7_0_stop(adev);
1747		return 0;
1748	} else {
1749		return uvd_v7_0_start(adev);
1750	}
1751}
1752#endif
1753
1754static int uvd_v7_0_set_clockgating_state(void *handle,
1755					  enum amd_clockgating_state state)
1756{
1757	/* needed for driver unload*/
1758	return 0;
1759}
1760
1761const struct amd_ip_funcs uvd_v7_0_ip_funcs = {
1762	.name = "uvd_v7_0",
1763	.early_init = uvd_v7_0_early_init,
1764	.late_init = NULL,
1765	.sw_init = uvd_v7_0_sw_init,
1766	.sw_fini = uvd_v7_0_sw_fini,
1767	.hw_init = uvd_v7_0_hw_init,
1768	.hw_fini = uvd_v7_0_hw_fini,
1769	.suspend = uvd_v7_0_suspend,
1770	.resume = uvd_v7_0_resume,
1771	.is_idle = NULL /* uvd_v7_0_is_idle */,
1772	.wait_for_idle = NULL /* uvd_v7_0_wait_for_idle */,
1773	.check_soft_reset = NULL /* uvd_v7_0_check_soft_reset */,
1774	.pre_soft_reset = NULL /* uvd_v7_0_pre_soft_reset */,
1775	.soft_reset = NULL /* uvd_v7_0_soft_reset */,
1776	.post_soft_reset = NULL /* uvd_v7_0_post_soft_reset */,
1777	.set_clockgating_state = uvd_v7_0_set_clockgating_state,
1778	.set_powergating_state = NULL /* uvd_v7_0_set_powergating_state */,
1779};
1780
1781static const struct amdgpu_ring_funcs uvd_v7_0_ring_vm_funcs = {
1782	.type = AMDGPU_RING_TYPE_UVD,
1783	.align_mask = 0xf,
1784	.support_64bit_ptrs = false,
1785	.no_user_fence = true,
1786	.vmhub = AMDGPU_MMHUB_0,
1787	.get_rptr = uvd_v7_0_ring_get_rptr,
1788	.get_wptr = uvd_v7_0_ring_get_wptr,
1789	.set_wptr = uvd_v7_0_ring_set_wptr,
1790	.patch_cs_in_place = uvd_v7_0_ring_patch_cs_in_place,
1791	.emit_frame_size =
1792		6 + /* hdp invalidate */
1793		SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
1794		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
1795		8 + /* uvd_v7_0_ring_emit_vm_flush */
1796		14 + 14, /* uvd_v7_0_ring_emit_fence x2 vm fence */
1797	.emit_ib_size = 8, /* uvd_v7_0_ring_emit_ib */
1798	.emit_ib = uvd_v7_0_ring_emit_ib,
1799	.emit_fence = uvd_v7_0_ring_emit_fence,
1800	.emit_vm_flush = uvd_v7_0_ring_emit_vm_flush,
1801	.emit_hdp_flush = uvd_v7_0_ring_emit_hdp_flush,
1802	.test_ring = uvd_v7_0_ring_test_ring,
1803	.test_ib = amdgpu_uvd_ring_test_ib,
1804	.insert_nop = uvd_v7_0_ring_insert_nop,
1805	.pad_ib = amdgpu_ring_generic_pad_ib,
1806	.begin_use = amdgpu_uvd_ring_begin_use,
1807	.end_use = amdgpu_uvd_ring_end_use,
1808	.emit_wreg = uvd_v7_0_ring_emit_wreg,
1809	.emit_reg_wait = uvd_v7_0_ring_emit_reg_wait,
1810	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1811};
1812
1813static const struct amdgpu_ring_funcs uvd_v7_0_enc_ring_vm_funcs = {
1814	.type = AMDGPU_RING_TYPE_UVD_ENC,
1815	.align_mask = 0x3f,
1816	.nop = HEVC_ENC_CMD_NO_OP,
1817	.support_64bit_ptrs = false,
1818	.no_user_fence = true,
1819	.vmhub = AMDGPU_MMHUB_0,
1820	.get_rptr = uvd_v7_0_enc_ring_get_rptr,
1821	.get_wptr = uvd_v7_0_enc_ring_get_wptr,
1822	.set_wptr = uvd_v7_0_enc_ring_set_wptr,
1823	.emit_frame_size =
1824		3 + 3 + /* hdp flush / invalidate */
1825		SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1826		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
1827		4 + /* uvd_v7_0_enc_ring_emit_vm_flush */
1828		5 + 5 + /* uvd_v7_0_enc_ring_emit_fence x2 vm fence */
1829		1, /* uvd_v7_0_enc_ring_insert_end */
1830	.emit_ib_size = 5, /* uvd_v7_0_enc_ring_emit_ib */
1831	.emit_ib = uvd_v7_0_enc_ring_emit_ib,
1832	.emit_fence = uvd_v7_0_enc_ring_emit_fence,
1833	.emit_vm_flush = uvd_v7_0_enc_ring_emit_vm_flush,
1834	.test_ring = uvd_v7_0_enc_ring_test_ring,
1835	.test_ib = uvd_v7_0_enc_ring_test_ib,
1836	.insert_nop = amdgpu_ring_insert_nop,
1837	.insert_end = uvd_v7_0_enc_ring_insert_end,
1838	.pad_ib = amdgpu_ring_generic_pad_ib,
1839	.begin_use = amdgpu_uvd_ring_begin_use,
1840	.end_use = amdgpu_uvd_ring_end_use,
1841	.emit_wreg = uvd_v7_0_enc_ring_emit_wreg,
1842	.emit_reg_wait = uvd_v7_0_enc_ring_emit_reg_wait,
1843	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1844};
1845
1846static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev)
1847{
1848	int i;
1849
1850	for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
1851		if (adev->uvd.harvest_config & (1 << i))
1852			continue;
1853		adev->uvd.inst[i].ring.funcs = &uvd_v7_0_ring_vm_funcs;
1854		adev->uvd.inst[i].ring.me = i;
1855		DRM_INFO("UVD(%d) is enabled in VM mode\n", i);
1856	}
1857}
1858
1859static void uvd_v7_0_set_enc_ring_funcs(struct amdgpu_device *adev)
1860{
1861	int i, j;
1862
1863	for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
1864		if (adev->uvd.harvest_config & (1 << j))
1865			continue;
1866		for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
1867			adev->uvd.inst[j].ring_enc[i].funcs = &uvd_v7_0_enc_ring_vm_funcs;
1868			adev->uvd.inst[j].ring_enc[i].me = j;
1869		}
1870
1871		DRM_INFO("UVD(%d) ENC is enabled in VM mode\n", j);
1872	}
1873}
1874
1875static const struct amdgpu_irq_src_funcs uvd_v7_0_irq_funcs = {
1876	.set = uvd_v7_0_set_interrupt_state,
1877	.process = uvd_v7_0_process_interrupt,
1878};
1879
1880static void uvd_v7_0_set_irq_funcs(struct amdgpu_device *adev)
1881{
1882	int i;
1883
1884	for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
1885		if (adev->uvd.harvest_config & (1 << i))
1886			continue;
1887		adev->uvd.inst[i].irq.num_types = adev->uvd.num_enc_rings + 1;
1888		adev->uvd.inst[i].irq.funcs = &uvd_v7_0_irq_funcs;
1889	}
1890}
1891
1892const struct amdgpu_ip_block_version uvd_v7_0_ip_block =
1893{
1894		.type = AMD_IP_BLOCK_TYPE_UVD,
1895		.major = 7,
1896		.minor = 0,
1897		.rev = 0,
1898		.funcs = &uvd_v7_0_ip_funcs,
1899};