Linux Audio

Check our new training course

Loading...
v6.2
   1/*
   2 * Copyright 2016 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
  23
  24#include <linux/firmware.h>
  25
  26#include "amdgpu.h"
  27#include "amdgpu_uvd.h"
  28#include "amdgpu_cs.h"
  29#include "soc15.h"
  30#include "soc15d.h"
  31#include "soc15_common.h"
  32#include "mmsch_v1_0.h"
  33
  34#include "uvd/uvd_7_0_offset.h"
  35#include "uvd/uvd_7_0_sh_mask.h"
  36#include "vce/vce_4_0_offset.h"
  37#include "vce/vce_4_0_default.h"
  38#include "vce/vce_4_0_sh_mask.h"
  39#include "nbif/nbif_6_1_offset.h"
  40#include "mmhub/mmhub_1_0_offset.h"
  41#include "mmhub/mmhub_1_0_sh_mask.h"
  42#include "ivsrcid/uvd/irqsrcs_uvd_7_0.h"
  43
  44#define mmUVD_PG0_CC_UVD_HARVESTING                                                                    0x00c7
  45#define mmUVD_PG0_CC_UVD_HARVESTING_BASE_IDX                                                           1
  46//UVD_PG0_CC_UVD_HARVESTING
  47#define UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE__SHIFT                                                         0x1
  48#define UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE_MASK                                                           0x00000002L
  49
  50#define UVD7_MAX_HW_INSTANCES_VEGA20			2
  51
  52static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev);
  53static void uvd_v7_0_set_enc_ring_funcs(struct amdgpu_device *adev);
  54static void uvd_v7_0_set_irq_funcs(struct amdgpu_device *adev);
  55static int uvd_v7_0_start(struct amdgpu_device *adev);
  56static void uvd_v7_0_stop(struct amdgpu_device *adev);
  57static int uvd_v7_0_sriov_start(struct amdgpu_device *adev);
  58
  59static int amdgpu_ih_clientid_uvds[] = {
  60	SOC15_IH_CLIENTID_UVD,
  61	SOC15_IH_CLIENTID_UVD1
  62};
  63
  64/**
  65 * uvd_v7_0_ring_get_rptr - get read pointer
  66 *
  67 * @ring: amdgpu_ring pointer
  68 *
  69 * Returns the current hardware read pointer
  70 */
  71static uint64_t uvd_v7_0_ring_get_rptr(struct amdgpu_ring *ring)
  72{
  73	struct amdgpu_device *adev = ring->adev;
  74
  75	return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_RPTR);
  76}
  77
  78/**
  79 * uvd_v7_0_enc_ring_get_rptr - get enc read pointer
  80 *
  81 * @ring: amdgpu_ring pointer
  82 *
  83 * Returns the current hardware enc read pointer
  84 */
  85static uint64_t uvd_v7_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
  86{
  87	struct amdgpu_device *adev = ring->adev;
  88
  89	if (ring == &adev->uvd.inst[ring->me].ring_enc[0])
  90		return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR);
  91	else
  92		return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR2);
  93}
  94
  95/**
  96 * uvd_v7_0_ring_get_wptr - get write pointer
  97 *
  98 * @ring: amdgpu_ring pointer
  99 *
 100 * Returns the current hardware write pointer
 101 */
 102static uint64_t uvd_v7_0_ring_get_wptr(struct amdgpu_ring *ring)
 103{
 104	struct amdgpu_device *adev = ring->adev;
 105
 106	return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR);
 107}
 108
 109/**
 110 * uvd_v7_0_enc_ring_get_wptr - get enc write pointer
 111 *
 112 * @ring: amdgpu_ring pointer
 113 *
 114 * Returns the current hardware enc write pointer
 115 */
 116static uint64_t uvd_v7_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
 117{
 118	struct amdgpu_device *adev = ring->adev;
 119
 120	if (ring->use_doorbell)
 121		return *ring->wptr_cpu_addr;
 122
 123	if (ring == &adev->uvd.inst[ring->me].ring_enc[0])
 124		return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR);
 125	else
 126		return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2);
 127}
 128
 129/**
 130 * uvd_v7_0_ring_set_wptr - set write pointer
 131 *
 132 * @ring: amdgpu_ring pointer
 133 *
 134 * Commits the write pointer to the hardware
 135 */
 136static void uvd_v7_0_ring_set_wptr(struct amdgpu_ring *ring)
 137{
 138	struct amdgpu_device *adev = ring->adev;
 139
 140	WREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
 141}
 142
 143/**
 144 * uvd_v7_0_enc_ring_set_wptr - set enc write pointer
 145 *
 146 * @ring: amdgpu_ring pointer
 147 *
 148 * Commits the enc write pointer to the hardware
 149 */
 150static void uvd_v7_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
 151{
 152	struct amdgpu_device *adev = ring->adev;
 153
 154	if (ring->use_doorbell) {
 155		/* XXX check if swapping is necessary on BE */
 156		*ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
 157		WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
 158		return;
 159	}
 160
 161	if (ring == &adev->uvd.inst[ring->me].ring_enc[0])
 162		WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR,
 163			lower_32_bits(ring->wptr));
 164	else
 165		WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2,
 166			lower_32_bits(ring->wptr));
 167}
 168
 169/**
 170 * uvd_v7_0_enc_ring_test_ring - test if UVD ENC ring is working
 171 *
 172 * @ring: the engine to test on
 173 *
 174 */
 175static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring)
 176{
 177	struct amdgpu_device *adev = ring->adev;
 178	uint32_t rptr;
 179	unsigned i;
 180	int r;
 181
 182	if (amdgpu_sriov_vf(adev))
 183		return 0;
 184
 185	r = amdgpu_ring_alloc(ring, 16);
 186	if (r)
 187		return r;
 188
 189	rptr = amdgpu_ring_get_rptr(ring);
 190
 191	amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
 192	amdgpu_ring_commit(ring);
 193
 194	for (i = 0; i < adev->usec_timeout; i++) {
 195		if (amdgpu_ring_get_rptr(ring) != rptr)
 196			break;
 197		udelay(1);
 198	}
 199
 200	if (i >= adev->usec_timeout)
 201		r = -ETIMEDOUT;
 202
 203	return r;
 204}
 205
 206/**
 207 * uvd_v7_0_enc_get_create_msg - generate a UVD ENC create msg
 208 *
 209 * @ring: ring we should submit the msg to
 210 * @handle: session handle to use
 211 * @bo: amdgpu object for which we query the offset
 212 * @fence: optional fence to return
 213 *
 214 * Open up a stream for HW test
 215 */
 216static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, u32 handle,
 217				       struct amdgpu_bo *bo,
 218				       struct dma_fence **fence)
 219{
 220	const unsigned ib_size_dw = 16;
 221	struct amdgpu_job *job;
 222	struct amdgpu_ib *ib;
 223	struct dma_fence *f = NULL;
 224	uint64_t addr;
 225	int i, r;
 226
 227	r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4,
 228				     AMDGPU_IB_POOL_DIRECT, &job);
 229	if (r)
 230		return r;
 231
 232	ib = &job->ibs[0];
 233	addr = amdgpu_bo_gpu_offset(bo);
 234
 235	ib->length_dw = 0;
 236	ib->ptr[ib->length_dw++] = 0x00000018;
 237	ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
 238	ib->ptr[ib->length_dw++] = handle;
 239	ib->ptr[ib->length_dw++] = 0x00000000;
 240	ib->ptr[ib->length_dw++] = upper_32_bits(addr);
 241	ib->ptr[ib->length_dw++] = addr;
 242
 243	ib->ptr[ib->length_dw++] = 0x00000014;
 244	ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
 245	ib->ptr[ib->length_dw++] = 0x0000001c;
 246	ib->ptr[ib->length_dw++] = 0x00000000;
 247	ib->ptr[ib->length_dw++] = 0x00000000;
 248
 249	ib->ptr[ib->length_dw++] = 0x00000008;
 250	ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */
 251
 252	for (i = ib->length_dw; i < ib_size_dw; ++i)
 253		ib->ptr[i] = 0x0;
 254
 255	r = amdgpu_job_submit_direct(job, ring, &f);
 256	if (r)
 257		goto err;
 258
 259	if (fence)
 260		*fence = dma_fence_get(f);
 261	dma_fence_put(f);
 262	return 0;
 263
 264err:
 265	amdgpu_job_free(job);
 266	return r;
 267}
 268
 269/**
 270 * uvd_v7_0_enc_get_destroy_msg - generate a UVD ENC destroy msg
 271 *
 272 * @ring: ring we should submit the msg to
 273 * @handle: session handle to use
 274 * @bo: amdgpu object for which we query the offset
 275 * @fence: optional fence to return
 276 *
 277 * Close up a stream for HW test or if userspace failed to do so
 278 */
 279static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, u32 handle,
 280					struct amdgpu_bo *bo,
 281					struct dma_fence **fence)
 282{
 283	const unsigned ib_size_dw = 16;
 284	struct amdgpu_job *job;
 285	struct amdgpu_ib *ib;
 286	struct dma_fence *f = NULL;
 287	uint64_t addr;
 288	int i, r;
 289
 290	r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4,
 291				     AMDGPU_IB_POOL_DIRECT, &job);
 292	if (r)
 293		return r;
 294
 295	ib = &job->ibs[0];
 296	addr = amdgpu_bo_gpu_offset(bo);
 297
 298	ib->length_dw = 0;
 299	ib->ptr[ib->length_dw++] = 0x00000018;
 300	ib->ptr[ib->length_dw++] = 0x00000001;
 301	ib->ptr[ib->length_dw++] = handle;
 302	ib->ptr[ib->length_dw++] = 0x00000000;
 303	ib->ptr[ib->length_dw++] = upper_32_bits(addr);
 304	ib->ptr[ib->length_dw++] = addr;
 305
 306	ib->ptr[ib->length_dw++] = 0x00000014;
 307	ib->ptr[ib->length_dw++] = 0x00000002;
 308	ib->ptr[ib->length_dw++] = 0x0000001c;
 309	ib->ptr[ib->length_dw++] = 0x00000000;
 310	ib->ptr[ib->length_dw++] = 0x00000000;
 311
 312	ib->ptr[ib->length_dw++] = 0x00000008;
 313	ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */
 314
 315	for (i = ib->length_dw; i < ib_size_dw; ++i)
 316		ib->ptr[i] = 0x0;
 317
 318	r = amdgpu_job_submit_direct(job, ring, &f);
 319	if (r)
 320		goto err;
 321
 322	if (fence)
 323		*fence = dma_fence_get(f);
 324	dma_fence_put(f);
 325	return 0;
 326
 327err:
 328	amdgpu_job_free(job);
 329	return r;
 330}
 331
 332/**
 333 * uvd_v7_0_enc_ring_test_ib - test if UVD ENC IBs are working
 334 *
 335 * @ring: the engine to test on
 336 * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
 337 *
 338 */
 339static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 340{
 341	struct dma_fence *fence = NULL;
 342	struct amdgpu_bo *bo = ring->adev->uvd.ib_bo;
 343	long r;
 344
 
 
 
 
 
 
 345	r = uvd_v7_0_enc_get_create_msg(ring, 1, bo, NULL);
 346	if (r)
 347		goto error;
 348
 349	r = uvd_v7_0_enc_get_destroy_msg(ring, 1, bo, &fence);
 350	if (r)
 351		goto error;
 352
 353	r = dma_fence_wait_timeout(fence, false, timeout);
 354	if (r == 0)
 355		r = -ETIMEDOUT;
 356	else if (r > 0)
 357		r = 0;
 358
 359error:
 360	dma_fence_put(fence);
 
 
 
 361	return r;
 362}
 363
 364static int uvd_v7_0_early_init(void *handle)
 365{
 366	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 367
 368	if (adev->asic_type == CHIP_VEGA20) {
 369		u32 harvest;
 370		int i;
 371
 372		adev->uvd.num_uvd_inst = UVD7_MAX_HW_INSTANCES_VEGA20;
 373		for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
 374			harvest = RREG32_SOC15(UVD, i, mmUVD_PG0_CC_UVD_HARVESTING);
 375			if (harvest & UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE_MASK) {
 376				adev->uvd.harvest_config |= 1 << i;
 377			}
 378		}
 379		if (adev->uvd.harvest_config == (AMDGPU_UVD_HARVEST_UVD0 |
 380						 AMDGPU_UVD_HARVEST_UVD1))
 381			/* both instances are harvested, disable the block */
 382			return -ENOENT;
 383	} else {
 384		adev->uvd.num_uvd_inst = 1;
 385	}
 386
 387	if (amdgpu_sriov_vf(adev))
 388		adev->uvd.num_enc_rings = 1;
 389	else
 390		adev->uvd.num_enc_rings = 2;
 391	uvd_v7_0_set_ring_funcs(adev);
 392	uvd_v7_0_set_enc_ring_funcs(adev);
 393	uvd_v7_0_set_irq_funcs(adev);
 394
 395	return 0;
 396}
 397
 398static int uvd_v7_0_sw_init(void *handle)
 399{
 400	struct amdgpu_ring *ring;
 401
 402	int i, j, r;
 403	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 404
 405	for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
 406		if (adev->uvd.harvest_config & (1 << j))
 407			continue;
 408		/* UVD TRAP */
 409		r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_uvds[j], UVD_7_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->uvd.inst[j].irq);
 410		if (r)
 411			return r;
 412
 413		/* UVD ENC TRAP */
 414		for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
 415			r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_uvds[j], i + UVD_7_0__SRCID__UVD_ENC_GEN_PURP, &adev->uvd.inst[j].irq);
 416			if (r)
 417				return r;
 418		}
 419	}
 420
 421	r = amdgpu_uvd_sw_init(adev);
 422	if (r)
 423		return r;
 424
 425	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
 426		const struct common_firmware_header *hdr;
 427		hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
 428		adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].ucode_id = AMDGPU_UCODE_ID_UVD;
 429		adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].fw = adev->uvd.fw;
 430		adev->firmware.fw_size +=
 431			ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
 432
 433		if (adev->uvd.num_uvd_inst == UVD7_MAX_HW_INSTANCES_VEGA20) {
 434			adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].ucode_id = AMDGPU_UCODE_ID_UVD1;
 435			adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].fw = adev->uvd.fw;
 436			adev->firmware.fw_size +=
 437				ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
 438		}
 439		DRM_INFO("PSP loading UVD firmware\n");
 440	}
 441
 442	for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
 443		if (adev->uvd.harvest_config & (1 << j))
 444			continue;
 445		if (!amdgpu_sriov_vf(adev)) {
 446			ring = &adev->uvd.inst[j].ring;
 447			sprintf(ring->name, "uvd_%d", ring->me);
 448			r = amdgpu_ring_init(adev, ring, 512,
 449					     &adev->uvd.inst[j].irq, 0,
 450					     AMDGPU_RING_PRIO_DEFAULT, NULL);
 451			if (r)
 452				return r;
 453		}
 454
 455		for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
 456			ring = &adev->uvd.inst[j].ring_enc[i];
 457			sprintf(ring->name, "uvd_enc_%d.%d", ring->me, i);
 458			if (amdgpu_sriov_vf(adev)) {
 459				ring->use_doorbell = true;
 460
 461				/* currently only use the first enconding ring for
 462				 * sriov, so set unused location for other unused rings.
 463				 */
 464				if (i == 0)
 465					ring->doorbell_index = adev->doorbell_index.uvd_vce.uvd_ring0_1 * 2;
 466				else
 467					ring->doorbell_index = adev->doorbell_index.uvd_vce.uvd_ring2_3 * 2 + 1;
 468			}
 469			r = amdgpu_ring_init(adev, ring, 512,
 470					     &adev->uvd.inst[j].irq, 0,
 471					     AMDGPU_RING_PRIO_DEFAULT, NULL);
 472			if (r)
 473				return r;
 474		}
 475	}
 476
 477	r = amdgpu_uvd_resume(adev);
 478	if (r)
 479		return r;
 480
 481	r = amdgpu_uvd_entity_init(adev);
 482	if (r)
 483		return r;
 484
 485	r = amdgpu_virt_alloc_mm_table(adev);
 486	if (r)
 487		return r;
 488
 489	return r;
 490}
 491
 492static int uvd_v7_0_sw_fini(void *handle)
 493{
 494	int i, j, r;
 495	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 496
 497	amdgpu_virt_free_mm_table(adev);
 498
 499	r = amdgpu_uvd_suspend(adev);
 500	if (r)
 501		return r;
 502
 503	for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
 504		if (adev->uvd.harvest_config & (1 << j))
 505			continue;
 506		for (i = 0; i < adev->uvd.num_enc_rings; ++i)
 507			amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]);
 508	}
 509	return amdgpu_uvd_sw_fini(adev);
 510}
 511
 512/**
 513 * uvd_v7_0_hw_init - start and test UVD block
 514 *
 515 * @handle: handle used to pass amdgpu_device pointer
 516 *
 517 * Initialize the hardware, boot up the VCPU and do some testing
 518 */
 519static int uvd_v7_0_hw_init(void *handle)
 520{
 521	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 522	struct amdgpu_ring *ring;
 523	uint32_t tmp;
 524	int i, j, r;
 525
 526	if (amdgpu_sriov_vf(adev))
 527		r = uvd_v7_0_sriov_start(adev);
 528	else
 529		r = uvd_v7_0_start(adev);
 530	if (r)
 531		goto done;
 532
 533	for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
 534		if (adev->uvd.harvest_config & (1 << j))
 535			continue;
 536		ring = &adev->uvd.inst[j].ring;
 537
 538		if (!amdgpu_sriov_vf(adev)) {
 539			r = amdgpu_ring_test_helper(ring);
 540			if (r)
 541				goto done;
 542
 543			r = amdgpu_ring_alloc(ring, 10);
 544			if (r) {
 545				DRM_ERROR("amdgpu: (%d)ring failed to lock UVD ring (%d).\n", j, r);
 546				goto done;
 547			}
 548
 549			tmp = PACKET0(SOC15_REG_OFFSET(UVD, j,
 550				mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL), 0);
 551			amdgpu_ring_write(ring, tmp);
 552			amdgpu_ring_write(ring, 0xFFFFF);
 553
 554			tmp = PACKET0(SOC15_REG_OFFSET(UVD, j,
 555				mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL), 0);
 556			amdgpu_ring_write(ring, tmp);
 557			amdgpu_ring_write(ring, 0xFFFFF);
 558
 559			tmp = PACKET0(SOC15_REG_OFFSET(UVD, j,
 560				mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL), 0);
 561			amdgpu_ring_write(ring, tmp);
 562			amdgpu_ring_write(ring, 0xFFFFF);
 563
 564			/* Clear timeout status bits */
 565			amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, j,
 566				mmUVD_SEMA_TIMEOUT_STATUS), 0));
 567			amdgpu_ring_write(ring, 0x8);
 568
 569			amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, j,
 570				mmUVD_SEMA_CNTL), 0));
 571			amdgpu_ring_write(ring, 3);
 572
 573			amdgpu_ring_commit(ring);
 574		}
 575
 576		for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
 577			ring = &adev->uvd.inst[j].ring_enc[i];
 578			r = amdgpu_ring_test_helper(ring);
 579			if (r)
 580				goto done;
 581		}
 582	}
 583done:
 584	if (!r)
 585		DRM_INFO("UVD and UVD ENC initialized successfully.\n");
 586
 587	return r;
 588}
 589
 590/**
 591 * uvd_v7_0_hw_fini - stop the hardware block
 592 *
 593 * @handle: handle used to pass amdgpu_device pointer
 594 *
 595 * Stop the UVD block, mark ring as not ready any more
 596 */
 597static int uvd_v7_0_hw_fini(void *handle)
 598{
 599	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 600
 601	cancel_delayed_work_sync(&adev->uvd.idle_work);
 602
 603	if (!amdgpu_sriov_vf(adev))
 604		uvd_v7_0_stop(adev);
 605	else {
 606		/* full access mode, so don't touch any UVD register */
 607		DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
 608	}
 609
 610	return 0;
 611}
 612
 613static int uvd_v7_0_suspend(void *handle)
 614{
 615	int r;
 616	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 617
 618	/*
 619	 * Proper cleanups before halting the HW engine:
 620	 *   - cancel the delayed idle work
 621	 *   - enable powergating
 622	 *   - enable clockgating
 623	 *   - disable dpm
 624	 *
 625	 * TODO: to align with the VCN implementation, move the
 626	 * jobs for clockgating/powergating/dpm setting to
 627	 * ->set_powergating_state().
 628	 */
 629	cancel_delayed_work_sync(&adev->uvd.idle_work);
 630
 631	if (adev->pm.dpm_enabled) {
 632		amdgpu_dpm_enable_uvd(adev, false);
 633	} else {
 634		amdgpu_asic_set_uvd_clocks(adev, 0, 0);
 635		/* shutdown the UVD block */
 636		amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
 637						       AMD_PG_STATE_GATE);
 638		amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
 639						       AMD_CG_STATE_GATE);
 640	}
 641
 642	r = uvd_v7_0_hw_fini(adev);
 643	if (r)
 644		return r;
 645
 646	return amdgpu_uvd_suspend(adev);
 647}
 648
 649static int uvd_v7_0_resume(void *handle)
 650{
 651	int r;
 652	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 653
 654	r = amdgpu_uvd_resume(adev);
 655	if (r)
 656		return r;
 657
 658	return uvd_v7_0_hw_init(adev);
 659}
 660
 661/**
 662 * uvd_v7_0_mc_resume - memory controller programming
 663 *
 664 * @adev: amdgpu_device pointer
 665 *
 666 * Let the UVD memory controller know it's offsets
 667 */
 668static void uvd_v7_0_mc_resume(struct amdgpu_device *adev)
 669{
 670	uint32_t size = AMDGPU_UVD_FIRMWARE_SIZE(adev);
 671	uint32_t offset;
 672	int i;
 673
 674	for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
 675		if (adev->uvd.harvest_config & (1 << i))
 676			continue;
 677		if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
 678			WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
 679				i == 0 ?
 680				adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_lo:
 681				adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].tmr_mc_addr_lo);
 682			WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
 683				i == 0 ?
 684				adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_hi:
 685				adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].tmr_mc_addr_hi);
 686			WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0, 0);
 687			offset = 0;
 688		} else {
 689			WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
 690				lower_32_bits(adev->uvd.inst[i].gpu_addr));
 691			WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
 692				upper_32_bits(adev->uvd.inst[i].gpu_addr));
 693			offset = size;
 694			WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0,
 695					AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
 696		}
 697
 698		WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE0, size);
 699
 700		WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
 701				lower_32_bits(adev->uvd.inst[i].gpu_addr + offset));
 702		WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
 703				upper_32_bits(adev->uvd.inst[i].gpu_addr + offset));
 704		WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET1, (1 << 21));
 705		WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_UVD_HEAP_SIZE);
 706
 707		WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
 708				lower_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
 709		WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
 710				upper_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
 711		WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET2, (2 << 21));
 712		WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE2,
 713				AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40));
 714
 715		WREG32_SOC15(UVD, i, mmUVD_UDEC_ADDR_CONFIG,
 716				adev->gfx.config.gb_addr_config);
 717		WREG32_SOC15(UVD, i, mmUVD_UDEC_DB_ADDR_CONFIG,
 718				adev->gfx.config.gb_addr_config);
 719		WREG32_SOC15(UVD, i, mmUVD_UDEC_DBW_ADDR_CONFIG,
 720				adev->gfx.config.gb_addr_config);
 721
 722		WREG32_SOC15(UVD, i, mmUVD_GP_SCRATCH4, adev->uvd.max_handles);
 723	}
 724}
 725
 726static int uvd_v7_0_mmsch_start(struct amdgpu_device *adev,
 727				struct amdgpu_mm_table *table)
 728{
 729	uint32_t data = 0, loop;
 730	uint64_t addr = table->gpu_addr;
 731	struct mmsch_v1_0_init_header *header = (struct mmsch_v1_0_init_header *)table->cpu_addr;
 732	uint32_t size;
 733	int i;
 734
 735	size = header->header_size + header->vce_table_size + header->uvd_table_size;
 736
 737	/* 1, write to vce_mmsch_vf_ctx_addr_lo/hi register with GPU mc addr of memory descriptor location */
 738	WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_CTX_ADDR_LO, lower_32_bits(addr));
 739	WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_CTX_ADDR_HI, upper_32_bits(addr));
 740
 741	/* 2, update vmid of descriptor */
 742	data = RREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_VMID);
 743	data &= ~VCE_MMSCH_VF_VMID__VF_CTX_VMID_MASK;
 744	data |= (0 << VCE_MMSCH_VF_VMID__VF_CTX_VMID__SHIFT); /* use domain0 for MM scheduler */
 745	WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_VMID, data);
 746
 747	/* 3, notify mmsch about the size of this descriptor */
 748	WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_CTX_SIZE, size);
 749
 750	/* 4, set resp to zero */
 751	WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP, 0);
 752
 753	for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
 754		if (adev->uvd.harvest_config & (1 << i))
 755			continue;
 756		WDOORBELL32(adev->uvd.inst[i].ring_enc[0].doorbell_index, 0);
 757		*adev->uvd.inst[i].ring_enc[0].wptr_cpu_addr = 0;
 758		adev->uvd.inst[i].ring_enc[0].wptr = 0;
 759		adev->uvd.inst[i].ring_enc[0].wptr_old = 0;
 760	}
 761	/* 5, kick off the initialization and wait until VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero */
 762	WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_HOST, 0x10000001);
 763
 764	data = RREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP);
 765	loop = 1000;
 766	while ((data & 0x10000002) != 0x10000002) {
 767		udelay(10);
 768		data = RREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP);
 769		loop--;
 770		if (!loop)
 771			break;
 772	}
 773
 774	if (!loop) {
 775		dev_err(adev->dev, "failed to init MMSCH, mmVCE_MMSCH_VF_MAILBOX_RESP = %x\n", data);
 776		return -EBUSY;
 777	}
 778
 779	return 0;
 780}
 781
 782static int uvd_v7_0_sriov_start(struct amdgpu_device *adev)
 783{
 784	struct amdgpu_ring *ring;
 785	uint32_t offset, size, tmp;
 786	uint32_t table_size = 0;
 787	struct mmsch_v1_0_cmd_direct_write direct_wt = { {0} };
 788	struct mmsch_v1_0_cmd_direct_read_modify_write direct_rd_mod_wt = { {0} };
 789	struct mmsch_v1_0_cmd_direct_polling direct_poll = { {0} };
 790	struct mmsch_v1_0_cmd_end end = { {0} };
 791	uint32_t *init_table = adev->virt.mm_table.cpu_addr;
 792	struct mmsch_v1_0_init_header *header = (struct mmsch_v1_0_init_header *)init_table;
 793	uint8_t i = 0;
 794
 795	direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE;
 796	direct_rd_mod_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
 797	direct_poll.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_POLLING;
 798	end.cmd_header.command_type = MMSCH_COMMAND__END;
 799
 800	if (header->uvd_table_offset == 0 && header->uvd_table_size == 0) {
 801		header->version = MMSCH_VERSION;
 802		header->header_size = sizeof(struct mmsch_v1_0_init_header) >> 2;
 803
 804		if (header->vce_table_offset == 0 && header->vce_table_size == 0)
 805			header->uvd_table_offset = header->header_size;
 806		else
 807			header->uvd_table_offset = header->vce_table_size + header->vce_table_offset;
 808
 809		init_table += header->uvd_table_offset;
 810
 811		for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
 812			if (adev->uvd.harvest_config & (1 << i))
 813				continue;
 814			ring = &adev->uvd.inst[i].ring;
 815			ring->wptr = 0;
 816			size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4);
 817
 818			MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS),
 819							   0xFFFFFFFF, 0x00000004);
 820			/* mc resume*/
 821			if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
 822				MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i,
 823							mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
 824							adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_lo);
 825				MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i,
 826							mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
 827							adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_hi);
 828				MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0);
 829				offset = 0;
 830			} else {
 831				MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
 832							    lower_32_bits(adev->uvd.inst[i].gpu_addr));
 833				MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
 834							    upper_32_bits(adev->uvd.inst[i].gpu_addr));
 835				offset = size;
 836				MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0),
 837							AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
 838
 839			}
 840
 841			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE0), size);
 842
 843			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
 844						    lower_32_bits(adev->uvd.inst[i].gpu_addr + offset));
 845			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
 846						    upper_32_bits(adev->uvd.inst[i].gpu_addr + offset));
 847			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET1), (1 << 21));
 848			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_UVD_HEAP_SIZE);
 849
 850			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
 851						    lower_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
 852			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
 853						    upper_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
 854			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET2), (2 << 21));
 855			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE2),
 856						    AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40));
 857
 858			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_GP_SCRATCH4), adev->uvd.max_handles);
 859			/* mc resume end*/
 860
 861			/* disable clock gating */
 862			MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_CGC_CTRL),
 863							   ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK, 0);
 864
 865			/* disable interupt */
 866			MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN),
 867							   ~UVD_MASTINT_EN__VCPU_EN_MASK, 0);
 868
 869			/* stall UMC and register bus before resetting VCPU */
 870			MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2),
 871							   ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
 872							   UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
 873
 874			/* put LMI, VCPU, RBC etc... into reset */
 875			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_SOFT_RESET),
 876						    (uint32_t)(UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
 877							       UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
 878							       UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
 879							       UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
 880							       UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
 881							       UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
 882							       UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
 883							       UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK));
 884
 885			/* initialize UVD memory controller */
 886			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL),
 887						    (uint32_t)((0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
 888							       UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
 889							       UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
 890							       UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
 891							       UVD_LMI_CTRL__REQ_MODE_MASK |
 892							       0x00100000L));
 893
 894			/* take all subblocks out of reset, except VCPU */
 895			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_SOFT_RESET),
 896						    UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
 897
 898			/* enable VCPU clock */
 899			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL),
 900						    UVD_VCPU_CNTL__CLK_EN_MASK);
 901
 902			/* enable master interrupt */
 903			MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN),
 904							   ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
 905							   (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
 906
 907			/* clear the bit 4 of UVD_STATUS */
 908			MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS),
 909							   ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT), 0);
 910
 911			/* force RBC into idle state */
 912			size = order_base_2(ring->ring_size);
 913			tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, size);
 914			tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
 915			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RBC_RB_CNTL), tmp);
 916
 917			ring = &adev->uvd.inst[i].ring_enc[0];
 918			ring->wptr = 0;
 919			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_LO), ring->gpu_addr);
 920			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_HI), upper_32_bits(ring->gpu_addr));
 921			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_SIZE), ring->ring_size / 4);
 922
 923			/* boot up the VCPU */
 924			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_SOFT_RESET), 0);
 925
 926			/* enable UMC */
 927			MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2),
 928											   ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, 0);
 929
 930			MMSCH_V1_0_INSERT_DIRECT_POLL(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS), 0x02, 0x02);
 931		}
 932		/* add end packet */
 933		memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end));
 934		table_size += sizeof(struct mmsch_v1_0_cmd_end) / 4;
 935		header->uvd_table_size = table_size;
 936
 937	}
 938	return uvd_v7_0_mmsch_start(adev, &adev->virt.mm_table);
 939}
 940
 941/**
 942 * uvd_v7_0_start - start UVD block
 943 *
 944 * @adev: amdgpu_device pointer
 945 *
 946 * Setup and start the UVD block
 947 */
 948static int uvd_v7_0_start(struct amdgpu_device *adev)
 949{
 950	struct amdgpu_ring *ring;
 951	uint32_t rb_bufsz, tmp;
 952	uint32_t lmi_swap_cntl;
 953	uint32_t mp_swap_cntl;
 954	int i, j, k, r;
 955
 956	for (k = 0; k < adev->uvd.num_uvd_inst; ++k) {
 957		if (adev->uvd.harvest_config & (1 << k))
 958			continue;
 959		/* disable DPG */
 960		WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_POWER_STATUS), 0,
 961				~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
 962	}
 963
 964	/* disable byte swapping */
 965	lmi_swap_cntl = 0;
 966	mp_swap_cntl = 0;
 967
 968	uvd_v7_0_mc_resume(adev);
 969
 970	for (k = 0; k < adev->uvd.num_uvd_inst; ++k) {
 971		if (adev->uvd.harvest_config & (1 << k))
 972			continue;
 973		ring = &adev->uvd.inst[k].ring;
 974		/* disable clock gating */
 975		WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_CGC_CTRL), 0,
 976				~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK);
 977
 978		/* disable interupt */
 979		WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_MASTINT_EN), 0,
 980				~UVD_MASTINT_EN__VCPU_EN_MASK);
 981
 982		/* stall UMC and register bus before resetting VCPU */
 983		WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_LMI_CTRL2),
 984				UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
 985				~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
 986		mdelay(1);
 987
 988		/* put LMI, VCPU, RBC etc... into reset */
 989		WREG32_SOC15(UVD, k, mmUVD_SOFT_RESET,
 990			UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
 991			UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
 992			UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
 993			UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
 994			UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
 995			UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
 996			UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
 997			UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
 998		mdelay(5);
 999
1000		/* initialize UVD memory controller */
1001		WREG32_SOC15(UVD, k, mmUVD_LMI_CTRL,
1002			(0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
1003			UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
1004			UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
1005			UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
1006			UVD_LMI_CTRL__REQ_MODE_MASK |
1007			0x00100000L);
1008
1009#ifdef __BIG_ENDIAN
1010		/* swap (8 in 32) RB and IB */
1011		lmi_swap_cntl = 0xa;
1012		mp_swap_cntl = 0;
1013#endif
1014		WREG32_SOC15(UVD, k, mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
1015		WREG32_SOC15(UVD, k, mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
1016
1017		WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXA0, 0x40c2040);
1018		WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXA1, 0x0);
1019		WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXB0, 0x40c2040);
1020		WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXB1, 0x0);
1021		WREG32_SOC15(UVD, k, mmUVD_MPC_SET_ALU, 0);
1022		WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUX, 0x88);
1023
1024		/* take all subblocks out of reset, except VCPU */
1025		WREG32_SOC15(UVD, k, mmUVD_SOFT_RESET,
1026				UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1027		mdelay(5);
1028
1029		/* enable VCPU clock */
1030		WREG32_SOC15(UVD, k, mmUVD_VCPU_CNTL,
1031				UVD_VCPU_CNTL__CLK_EN_MASK);
1032
1033		/* enable UMC */
1034		WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_LMI_CTRL2), 0,
1035				~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1036
1037		/* boot up the VCPU */
1038		WREG32_SOC15(UVD, k, mmUVD_SOFT_RESET, 0);
1039		mdelay(10);
1040
1041		for (i = 0; i < 10; ++i) {
1042			uint32_t status;
1043
1044			for (j = 0; j < 100; ++j) {
1045				status = RREG32_SOC15(UVD, k, mmUVD_STATUS);
1046				if (status & 2)
1047					break;
1048				mdelay(10);
1049			}
1050			r = 0;
1051			if (status & 2)
1052				break;
1053
1054			DRM_ERROR("UVD(%d) not responding, trying to reset the VCPU!!!\n", k);
1055			WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_SOFT_RESET),
1056					UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
1057					~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1058			mdelay(10);
1059			WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_SOFT_RESET), 0,
1060					~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1061			mdelay(10);
1062			r = -1;
1063		}
1064
1065		if (r) {
1066			DRM_ERROR("UVD(%d) not responding, giving up!!!\n", k);
1067			return r;
1068		}
1069		/* enable master interrupt */
1070		WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_MASTINT_EN),
1071			(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
1072			~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
1073
1074		/* clear the bit 4 of UVD_STATUS */
1075		WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_STATUS), 0,
1076				~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
1077
1078		/* force RBC into idle state */
1079		rb_bufsz = order_base_2(ring->ring_size);
1080		tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
1081		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1082		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1083		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
1084		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1085		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1086		WREG32_SOC15(UVD, k, mmUVD_RBC_RB_CNTL, tmp);
1087
1088		/* set the write pointer delay */
1089		WREG32_SOC15(UVD, k, mmUVD_RBC_RB_WPTR_CNTL, 0);
1090
1091		/* set the wb address */
1092		WREG32_SOC15(UVD, k, mmUVD_RBC_RB_RPTR_ADDR,
1093				(upper_32_bits(ring->gpu_addr) >> 2));
1094
1095		/* program the RB_BASE for ring buffer */
1096		WREG32_SOC15(UVD, k, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
1097				lower_32_bits(ring->gpu_addr));
1098		WREG32_SOC15(UVD, k, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
1099				upper_32_bits(ring->gpu_addr));
1100
1101		/* Initialize the ring buffer's read and write pointers */
1102		WREG32_SOC15(UVD, k, mmUVD_RBC_RB_RPTR, 0);
1103
1104		ring->wptr = RREG32_SOC15(UVD, k, mmUVD_RBC_RB_RPTR);
1105		WREG32_SOC15(UVD, k, mmUVD_RBC_RB_WPTR,
1106				lower_32_bits(ring->wptr));
1107
1108		WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_RBC_RB_CNTL), 0,
1109				~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
1110
1111		ring = &adev->uvd.inst[k].ring_enc[0];
1112		WREG32_SOC15(UVD, k, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1113		WREG32_SOC15(UVD, k, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1114		WREG32_SOC15(UVD, k, mmUVD_RB_BASE_LO, ring->gpu_addr);
1115		WREG32_SOC15(UVD, k, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1116		WREG32_SOC15(UVD, k, mmUVD_RB_SIZE, ring->ring_size / 4);
1117
1118		ring = &adev->uvd.inst[k].ring_enc[1];
1119		WREG32_SOC15(UVD, k, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1120		WREG32_SOC15(UVD, k, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1121		WREG32_SOC15(UVD, k, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1122		WREG32_SOC15(UVD, k, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1123		WREG32_SOC15(UVD, k, mmUVD_RB_SIZE2, ring->ring_size / 4);
1124	}
1125	return 0;
1126}
1127
1128/**
1129 * uvd_v7_0_stop - stop UVD block
1130 *
1131 * @adev: amdgpu_device pointer
1132 *
1133 * stop the UVD block
1134 */
1135static void uvd_v7_0_stop(struct amdgpu_device *adev)
1136{
1137	uint8_t i = 0;
1138
1139	for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
1140		if (adev->uvd.harvest_config & (1 << i))
1141			continue;
1142		/* force RBC into idle state */
1143		WREG32_SOC15(UVD, i, mmUVD_RBC_RB_CNTL, 0x11010101);
1144
1145		/* Stall UMC and register bus before resetting VCPU */
1146		WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2),
1147				UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
1148				~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1149		mdelay(1);
1150
1151		/* put VCPU into reset */
1152		WREG32_SOC15(UVD, i, mmUVD_SOFT_RESET,
1153				UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1154		mdelay(5);
1155
1156		/* disable VCPU clock */
1157		WREG32_SOC15(UVD, i, mmUVD_VCPU_CNTL, 0x0);
1158
1159		/* Unstall UMC and register bus */
1160		WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2), 0,
1161				~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1162	}
1163}
1164
1165/**
1166 * uvd_v7_0_ring_emit_fence - emit an fence & trap command
1167 *
1168 * @ring: amdgpu_ring pointer
1169 * @addr: address
1170 * @seq: sequence number
1171 * @flags: fence related flags
1172 *
1173 * Write a fence and a trap command to the ring.
1174 */
1175static void uvd_v7_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
1176				     unsigned flags)
1177{
1178	struct amdgpu_device *adev = ring->adev;
1179
1180	WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1181
1182	amdgpu_ring_write(ring,
1183		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_CONTEXT_ID), 0));
1184	amdgpu_ring_write(ring, seq);
1185	amdgpu_ring_write(ring,
1186		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
1187	amdgpu_ring_write(ring, addr & 0xffffffff);
1188	amdgpu_ring_write(ring,
1189		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
1190	amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
1191	amdgpu_ring_write(ring,
1192		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
1193	amdgpu_ring_write(ring, 0);
1194
1195	amdgpu_ring_write(ring,
1196		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
1197	amdgpu_ring_write(ring, 0);
1198	amdgpu_ring_write(ring,
1199		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
1200	amdgpu_ring_write(ring, 0);
1201	amdgpu_ring_write(ring,
1202		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
1203	amdgpu_ring_write(ring, 2);
1204}
1205
1206/**
1207 * uvd_v7_0_enc_ring_emit_fence - emit an enc fence & trap command
1208 *
1209 * @ring: amdgpu_ring pointer
1210 * @addr: address
1211 * @seq: sequence number
1212 * @flags: fence related flags
1213 *
1214 * Write enc a fence and a trap command to the ring.
1215 */
1216static void uvd_v7_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
1217			u64 seq, unsigned flags)
1218{
1219
1220	WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1221
1222	amdgpu_ring_write(ring, HEVC_ENC_CMD_FENCE);
1223	amdgpu_ring_write(ring, addr);
1224	amdgpu_ring_write(ring, upper_32_bits(addr));
1225	amdgpu_ring_write(ring, seq);
1226	amdgpu_ring_write(ring, HEVC_ENC_CMD_TRAP);
1227}
1228
1229/**
1230 * uvd_v7_0_ring_emit_hdp_flush - skip HDP flushing
1231 *
1232 * @ring: amdgpu_ring pointer
1233 */
1234static void uvd_v7_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
1235{
1236	/* The firmware doesn't seem to like touching registers at this point. */
1237}
1238
1239/**
1240 * uvd_v7_0_ring_test_ring - register write test
1241 *
1242 * @ring: amdgpu_ring pointer
1243 *
1244 * Test if we can successfully write to the context register
1245 */
1246static int uvd_v7_0_ring_test_ring(struct amdgpu_ring *ring)
1247{
1248	struct amdgpu_device *adev = ring->adev;
1249	uint32_t tmp = 0;
1250	unsigned i;
1251	int r;
1252
1253	WREG32_SOC15(UVD, ring->me, mmUVD_CONTEXT_ID, 0xCAFEDEAD);
1254	r = amdgpu_ring_alloc(ring, 3);
1255	if (r)
1256		return r;
1257
1258	amdgpu_ring_write(ring,
1259		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_CONTEXT_ID), 0));
1260	amdgpu_ring_write(ring, 0xDEADBEEF);
1261	amdgpu_ring_commit(ring);
1262	for (i = 0; i < adev->usec_timeout; i++) {
1263		tmp = RREG32_SOC15(UVD, ring->me, mmUVD_CONTEXT_ID);
1264		if (tmp == 0xDEADBEEF)
1265			break;
1266		udelay(1);
1267	}
1268
1269	if (i >= adev->usec_timeout)
1270		r = -ETIMEDOUT;
1271
1272	return r;
1273}
1274
1275/**
1276 * uvd_v7_0_ring_patch_cs_in_place - Patch the IB for command submission.
1277 *
1278 * @p: the CS parser with the IBs
1279 * @job: which job this ib is in
1280 * @ib: which IB to patch
1281 *
1282 */
1283static int uvd_v7_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
1284					   struct amdgpu_job *job,
1285					   struct amdgpu_ib *ib)
1286{
1287	struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched);
 
1288	unsigned i;
1289
1290	/* No patching necessary for the first instance */
1291	if (!ring->me)
1292		return 0;
1293
1294	for (i = 0; i < ib->length_dw; i += 2) {
1295		uint32_t reg = amdgpu_ib_get_value(ib, i);
1296
1297		reg -= p->adev->reg_offset[UVD_HWIP][0][1];
1298		reg += p->adev->reg_offset[UVD_HWIP][1][1];
1299
1300		amdgpu_ib_set_value(ib, i, reg);
1301	}
1302	return 0;
1303}
1304
1305/**
1306 * uvd_v7_0_ring_emit_ib - execute indirect buffer
1307 *
1308 * @ring: amdgpu_ring pointer
1309 * @job: job to retrieve vmid from
1310 * @ib: indirect buffer to execute
1311 * @flags: unused
1312 *
1313 * Write ring commands to execute the indirect buffer
1314 */
1315static void uvd_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
1316				  struct amdgpu_job *job,
1317				  struct amdgpu_ib *ib,
1318				  uint32_t flags)
1319{
1320	struct amdgpu_device *adev = ring->adev;
1321	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1322
1323	amdgpu_ring_write(ring,
1324		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_VMID), 0));
1325	amdgpu_ring_write(ring, vmid);
1326
1327	amdgpu_ring_write(ring,
1328		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_64BIT_BAR_LOW), 0));
1329	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1330	amdgpu_ring_write(ring,
1331		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH), 0));
1332	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1333	amdgpu_ring_write(ring,
1334		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_RBC_IB_SIZE), 0));
1335	amdgpu_ring_write(ring, ib->length_dw);
1336}
1337
1338/**
1339 * uvd_v7_0_enc_ring_emit_ib - enc execute indirect buffer
1340 *
1341 * @ring: amdgpu_ring pointer
1342 * @job: job to retrive vmid from
1343 * @ib: indirect buffer to execute
1344 * @flags: unused
1345 *
1346 * Write enc ring commands to execute the indirect buffer
1347 */
1348static void uvd_v7_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
1349					struct amdgpu_job *job,
1350					struct amdgpu_ib *ib,
1351					uint32_t flags)
1352{
1353	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1354
1355	amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM);
1356	amdgpu_ring_write(ring, vmid);
1357	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1358	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1359	amdgpu_ring_write(ring, ib->length_dw);
1360}
1361
1362static void uvd_v7_0_ring_emit_wreg(struct amdgpu_ring *ring,
1363				    uint32_t reg, uint32_t val)
1364{
1365	struct amdgpu_device *adev = ring->adev;
1366
1367	amdgpu_ring_write(ring,
1368		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
1369	amdgpu_ring_write(ring, reg << 2);
1370	amdgpu_ring_write(ring,
1371		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
1372	amdgpu_ring_write(ring, val);
1373	amdgpu_ring_write(ring,
1374		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
1375	amdgpu_ring_write(ring, 8);
1376}
1377
1378static void uvd_v7_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
1379					uint32_t val, uint32_t mask)
1380{
1381	struct amdgpu_device *adev = ring->adev;
1382
1383	amdgpu_ring_write(ring,
1384		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
1385	amdgpu_ring_write(ring, reg << 2);
1386	amdgpu_ring_write(ring,
1387		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
1388	amdgpu_ring_write(ring, val);
1389	amdgpu_ring_write(ring,
1390		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GP_SCRATCH8), 0));
1391	amdgpu_ring_write(ring, mask);
1392	amdgpu_ring_write(ring,
1393		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
1394	amdgpu_ring_write(ring, 12);
1395}
1396
1397static void uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
1398					unsigned vmid, uint64_t pd_addr)
1399{
1400	struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
1401	uint32_t data0, data1, mask;
1402
1403	pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1404
1405	/* wait for reg writes */
1406	data0 = hub->ctx0_ptb_addr_lo32 + vmid * hub->ctx_addr_distance;
1407	data1 = lower_32_bits(pd_addr);
1408	mask = 0xffffffff;
1409	uvd_v7_0_ring_emit_reg_wait(ring, data0, data1, mask);
1410}
1411
1412static void uvd_v7_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
1413{
1414	struct amdgpu_device *adev = ring->adev;
1415	int i;
1416
1417	WARN_ON(ring->wptr % 2 || count % 2);
1418
1419	for (i = 0; i < count / 2; i++) {
1420		amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_NO_OP), 0));
1421		amdgpu_ring_write(ring, 0);
1422	}
1423}
1424
1425static void uvd_v7_0_enc_ring_insert_end(struct amdgpu_ring *ring)
1426{
1427	amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
1428}
1429
1430static void uvd_v7_0_enc_ring_emit_reg_wait(struct amdgpu_ring *ring,
1431					    uint32_t reg, uint32_t val,
1432					    uint32_t mask)
1433{
1434	amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WAIT);
1435	amdgpu_ring_write(ring,	reg << 2);
1436	amdgpu_ring_write(ring, mask);
1437	amdgpu_ring_write(ring, val);
1438}
1439
1440static void uvd_v7_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
1441					    unsigned int vmid, uint64_t pd_addr)
1442{
1443	struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
1444
1445	pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1446
1447	/* wait for reg writes */
1448	uvd_v7_0_enc_ring_emit_reg_wait(ring, hub->ctx0_ptb_addr_lo32 +
1449					vmid * hub->ctx_addr_distance,
1450					lower_32_bits(pd_addr), 0xffffffff);
1451}
1452
1453static void uvd_v7_0_enc_ring_emit_wreg(struct amdgpu_ring *ring,
1454					uint32_t reg, uint32_t val)
1455{
1456	amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE);
1457	amdgpu_ring_write(ring,	reg << 2);
1458	amdgpu_ring_write(ring, val);
1459}
1460
1461#if 0
1462static bool uvd_v7_0_is_idle(void *handle)
1463{
1464	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1465
1466	return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
1467}
1468
1469static int uvd_v7_0_wait_for_idle(void *handle)
1470{
1471	unsigned i;
1472	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1473
1474	for (i = 0; i < adev->usec_timeout; i++) {
1475		if (uvd_v7_0_is_idle(handle))
1476			return 0;
1477	}
1478	return -ETIMEDOUT;
1479}
1480
1481#define AMDGPU_UVD_STATUS_BUSY_MASK    0xfd
1482static bool uvd_v7_0_check_soft_reset(void *handle)
1483{
1484	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1485	u32 srbm_soft_reset = 0;
1486	u32 tmp = RREG32(mmSRBM_STATUS);
1487
1488	if (REG_GET_FIELD(tmp, SRBM_STATUS, UVD_RQ_PENDING) ||
1489	    REG_GET_FIELD(tmp, SRBM_STATUS, UVD_BUSY) ||
1490	    (RREG32_SOC15(UVD, ring->me, mmUVD_STATUS) &
1491		    AMDGPU_UVD_STATUS_BUSY_MASK))
1492		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1493				SRBM_SOFT_RESET, SOFT_RESET_UVD, 1);
1494
1495	if (srbm_soft_reset) {
1496		adev->uvd.inst[ring->me].srbm_soft_reset = srbm_soft_reset;
1497		return true;
1498	} else {
1499		adev->uvd.inst[ring->me].srbm_soft_reset = 0;
1500		return false;
1501	}
1502}
1503
1504static int uvd_v7_0_pre_soft_reset(void *handle)
1505{
1506	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1507
1508	if (!adev->uvd.inst[ring->me].srbm_soft_reset)
1509		return 0;
1510
1511	uvd_v7_0_stop(adev);
1512	return 0;
1513}
1514
1515static int uvd_v7_0_soft_reset(void *handle)
1516{
1517	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1518	u32 srbm_soft_reset;
1519
1520	if (!adev->uvd.inst[ring->me].srbm_soft_reset)
1521		return 0;
1522	srbm_soft_reset = adev->uvd.inst[ring->me].srbm_soft_reset;
1523
1524	if (srbm_soft_reset) {
1525		u32 tmp;
1526
1527		tmp = RREG32(mmSRBM_SOFT_RESET);
1528		tmp |= srbm_soft_reset;
1529		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1530		WREG32(mmSRBM_SOFT_RESET, tmp);
1531		tmp = RREG32(mmSRBM_SOFT_RESET);
1532
1533		udelay(50);
1534
1535		tmp &= ~srbm_soft_reset;
1536		WREG32(mmSRBM_SOFT_RESET, tmp);
1537		tmp = RREG32(mmSRBM_SOFT_RESET);
1538
1539		/* Wait a little for things to settle down */
1540		udelay(50);
1541	}
1542
1543	return 0;
1544}
1545
1546static int uvd_v7_0_post_soft_reset(void *handle)
1547{
1548	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1549
1550	if (!adev->uvd.inst[ring->me].srbm_soft_reset)
1551		return 0;
1552
1553	mdelay(5);
1554
1555	return uvd_v7_0_start(adev);
1556}
1557#endif
1558
1559static int uvd_v7_0_set_interrupt_state(struct amdgpu_device *adev,
1560					struct amdgpu_irq_src *source,
1561					unsigned type,
1562					enum amdgpu_interrupt_state state)
1563{
1564	// TODO
1565	return 0;
1566}
1567
1568static int uvd_v7_0_process_interrupt(struct amdgpu_device *adev,
1569				      struct amdgpu_irq_src *source,
1570				      struct amdgpu_iv_entry *entry)
1571{
1572	uint32_t ip_instance;
1573
1574	switch (entry->client_id) {
1575	case SOC15_IH_CLIENTID_UVD:
1576		ip_instance = 0;
1577		break;
1578	case SOC15_IH_CLIENTID_UVD1:
1579		ip_instance = 1;
1580		break;
1581	default:
1582		DRM_ERROR("Unhandled client id: %d\n", entry->client_id);
1583		return 0;
1584	}
1585
1586	DRM_DEBUG("IH: UVD TRAP\n");
1587
1588	switch (entry->src_id) {
1589	case 124:
1590		amdgpu_fence_process(&adev->uvd.inst[ip_instance].ring);
1591		break;
1592	case 119:
1593		amdgpu_fence_process(&adev->uvd.inst[ip_instance].ring_enc[0]);
1594		break;
1595	case 120:
1596		if (!amdgpu_sriov_vf(adev))
1597			amdgpu_fence_process(&adev->uvd.inst[ip_instance].ring_enc[1]);
1598		break;
1599	default:
1600		DRM_ERROR("Unhandled interrupt: %d %d\n",
1601			  entry->src_id, entry->src_data[0]);
1602		break;
1603	}
1604
1605	return 0;
1606}
1607
1608#if 0
1609static void uvd_v7_0_set_sw_clock_gating(struct amdgpu_device *adev)
1610{
1611	uint32_t data, data1, data2, suvd_flags;
1612
1613	data = RREG32_SOC15(UVD, ring->me, mmUVD_CGC_CTRL);
1614	data1 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE);
1615	data2 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_CTRL);
1616
1617	data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK |
1618		  UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
1619
1620	suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
1621		     UVD_SUVD_CGC_GATE__SIT_MASK |
1622		     UVD_SUVD_CGC_GATE__SMP_MASK |
1623		     UVD_SUVD_CGC_GATE__SCM_MASK |
1624		     UVD_SUVD_CGC_GATE__SDB_MASK;
1625
1626	data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
1627		(1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) |
1628		(4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY));
1629
1630	data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
1631			UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
1632			UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
1633			UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
1634			UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
1635			UVD_CGC_CTRL__SYS_MODE_MASK |
1636			UVD_CGC_CTRL__UDEC_MODE_MASK |
1637			UVD_CGC_CTRL__MPEG2_MODE_MASK |
1638			UVD_CGC_CTRL__REGS_MODE_MASK |
1639			UVD_CGC_CTRL__RBC_MODE_MASK |
1640			UVD_CGC_CTRL__LMI_MC_MODE_MASK |
1641			UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
1642			UVD_CGC_CTRL__IDCT_MODE_MASK |
1643			UVD_CGC_CTRL__MPRD_MODE_MASK |
1644			UVD_CGC_CTRL__MPC_MODE_MASK |
1645			UVD_CGC_CTRL__LBSI_MODE_MASK |
1646			UVD_CGC_CTRL__LRBBM_MODE_MASK |
1647			UVD_CGC_CTRL__WCB_MODE_MASK |
1648			UVD_CGC_CTRL__VCPU_MODE_MASK |
1649			UVD_CGC_CTRL__JPEG_MODE_MASK |
1650			UVD_CGC_CTRL__JPEG2_MODE_MASK |
1651			UVD_CGC_CTRL__SCPU_MODE_MASK);
1652	data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK |
1653			UVD_SUVD_CGC_CTRL__SIT_MODE_MASK |
1654			UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
1655			UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
1656			UVD_SUVD_CGC_CTRL__SDB_MODE_MASK);
1657	data1 |= suvd_flags;
1658
1659	WREG32_SOC15(UVD, ring->me, mmUVD_CGC_CTRL, data);
1660	WREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE, 0);
1661	WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE, data1);
1662	WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_CTRL, data2);
1663}
1664
1665static void uvd_v7_0_set_hw_clock_gating(struct amdgpu_device *adev)
1666{
1667	uint32_t data, data1, cgc_flags, suvd_flags;
1668
1669	data = RREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE);
1670	data1 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE);
1671
1672	cgc_flags = UVD_CGC_GATE__SYS_MASK |
1673		UVD_CGC_GATE__UDEC_MASK |
1674		UVD_CGC_GATE__MPEG2_MASK |
1675		UVD_CGC_GATE__RBC_MASK |
1676		UVD_CGC_GATE__LMI_MC_MASK |
1677		UVD_CGC_GATE__IDCT_MASK |
1678		UVD_CGC_GATE__MPRD_MASK |
1679		UVD_CGC_GATE__MPC_MASK |
1680		UVD_CGC_GATE__LBSI_MASK |
1681		UVD_CGC_GATE__LRBBM_MASK |
1682		UVD_CGC_GATE__UDEC_RE_MASK |
1683		UVD_CGC_GATE__UDEC_CM_MASK |
1684		UVD_CGC_GATE__UDEC_IT_MASK |
1685		UVD_CGC_GATE__UDEC_DB_MASK |
1686		UVD_CGC_GATE__UDEC_MP_MASK |
1687		UVD_CGC_GATE__WCB_MASK |
1688		UVD_CGC_GATE__VCPU_MASK |
1689		UVD_CGC_GATE__SCPU_MASK |
1690		UVD_CGC_GATE__JPEG_MASK |
1691		UVD_CGC_GATE__JPEG2_MASK;
1692
1693	suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
1694				UVD_SUVD_CGC_GATE__SIT_MASK |
1695				UVD_SUVD_CGC_GATE__SMP_MASK |
1696				UVD_SUVD_CGC_GATE__SCM_MASK |
1697				UVD_SUVD_CGC_GATE__SDB_MASK;
1698
1699	data |= cgc_flags;
1700	data1 |= suvd_flags;
1701
1702	WREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE, data);
1703	WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE, data1);
1704}
1705
1706static void uvd_v7_0_set_bypass_mode(struct amdgpu_device *adev, bool enable)
1707{
1708	u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
1709
1710	if (enable)
1711		tmp |= (GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
1712			GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
1713	else
1714		tmp &= ~(GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
1715			 GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
1716
1717	WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp);
1718}
1719
1720
1721static int uvd_v7_0_set_clockgating_state(void *handle,
1722					  enum amd_clockgating_state state)
1723{
1724	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1725	bool enable = (state == AMD_CG_STATE_GATE);
1726
1727	uvd_v7_0_set_bypass_mode(adev, enable);
1728
1729	if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
1730		return 0;
1731
1732	if (enable) {
1733		/* disable HW gating and enable Sw gating */
1734		uvd_v7_0_set_sw_clock_gating(adev);
1735	} else {
1736		/* wait for STATUS to clear */
1737		if (uvd_v7_0_wait_for_idle(handle))
1738			return -EBUSY;
1739
1740		/* enable HW gates because UVD is idle */
1741		/* uvd_v7_0_set_hw_clock_gating(adev); */
1742	}
1743
1744	return 0;
1745}
1746
1747static int uvd_v7_0_set_powergating_state(void *handle,
1748					  enum amd_powergating_state state)
1749{
1750	/* This doesn't actually powergate the UVD block.
1751	 * That's done in the dpm code via the SMC.  This
1752	 * just re-inits the block as necessary.  The actual
1753	 * gating still happens in the dpm code.  We should
1754	 * revisit this when there is a cleaner line between
1755	 * the smc and the hw blocks
1756	 */
1757	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1758
1759	if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
1760		return 0;
1761
1762	WREG32_SOC15(UVD, ring->me, mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK);
1763
1764	if (state == AMD_PG_STATE_GATE) {
1765		uvd_v7_0_stop(adev);
1766		return 0;
1767	} else {
1768		return uvd_v7_0_start(adev);
1769	}
1770}
1771#endif
1772
1773static int uvd_v7_0_set_clockgating_state(void *handle,
1774					  enum amd_clockgating_state state)
1775{
1776	/* needed for driver unload*/
1777	return 0;
1778}
1779
1780const struct amd_ip_funcs uvd_v7_0_ip_funcs = {
1781	.name = "uvd_v7_0",
1782	.early_init = uvd_v7_0_early_init,
1783	.late_init = NULL,
1784	.sw_init = uvd_v7_0_sw_init,
1785	.sw_fini = uvd_v7_0_sw_fini,
1786	.hw_init = uvd_v7_0_hw_init,
1787	.hw_fini = uvd_v7_0_hw_fini,
1788	.suspend = uvd_v7_0_suspend,
1789	.resume = uvd_v7_0_resume,
1790	.is_idle = NULL /* uvd_v7_0_is_idle */,
1791	.wait_for_idle = NULL /* uvd_v7_0_wait_for_idle */,
1792	.check_soft_reset = NULL /* uvd_v7_0_check_soft_reset */,
1793	.pre_soft_reset = NULL /* uvd_v7_0_pre_soft_reset */,
1794	.soft_reset = NULL /* uvd_v7_0_soft_reset */,
1795	.post_soft_reset = NULL /* uvd_v7_0_post_soft_reset */,
1796	.set_clockgating_state = uvd_v7_0_set_clockgating_state,
1797	.set_powergating_state = NULL /* uvd_v7_0_set_powergating_state */,
1798};
1799
1800static const struct amdgpu_ring_funcs uvd_v7_0_ring_vm_funcs = {
1801	.type = AMDGPU_RING_TYPE_UVD,
1802	.align_mask = 0xf,
1803	.support_64bit_ptrs = false,
1804	.no_user_fence = true,
1805	.vmhub = AMDGPU_MMHUB_0,
1806	.get_rptr = uvd_v7_0_ring_get_rptr,
1807	.get_wptr = uvd_v7_0_ring_get_wptr,
1808	.set_wptr = uvd_v7_0_ring_set_wptr,
1809	.patch_cs_in_place = uvd_v7_0_ring_patch_cs_in_place,
1810	.emit_frame_size =
1811		6 + /* hdp invalidate */
1812		SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
1813		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
1814		8 + /* uvd_v7_0_ring_emit_vm_flush */
1815		14 + 14, /* uvd_v7_0_ring_emit_fence x2 vm fence */
1816	.emit_ib_size = 8, /* uvd_v7_0_ring_emit_ib */
1817	.emit_ib = uvd_v7_0_ring_emit_ib,
1818	.emit_fence = uvd_v7_0_ring_emit_fence,
1819	.emit_vm_flush = uvd_v7_0_ring_emit_vm_flush,
1820	.emit_hdp_flush = uvd_v7_0_ring_emit_hdp_flush,
1821	.test_ring = uvd_v7_0_ring_test_ring,
1822	.test_ib = amdgpu_uvd_ring_test_ib,
1823	.insert_nop = uvd_v7_0_ring_insert_nop,
1824	.pad_ib = amdgpu_ring_generic_pad_ib,
1825	.begin_use = amdgpu_uvd_ring_begin_use,
1826	.end_use = amdgpu_uvd_ring_end_use,
1827	.emit_wreg = uvd_v7_0_ring_emit_wreg,
1828	.emit_reg_wait = uvd_v7_0_ring_emit_reg_wait,
1829	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1830};
1831
1832static const struct amdgpu_ring_funcs uvd_v7_0_enc_ring_vm_funcs = {
1833	.type = AMDGPU_RING_TYPE_UVD_ENC,
1834	.align_mask = 0x3f,
1835	.nop = HEVC_ENC_CMD_NO_OP,
1836	.support_64bit_ptrs = false,
1837	.no_user_fence = true,
1838	.vmhub = AMDGPU_MMHUB_0,
1839	.get_rptr = uvd_v7_0_enc_ring_get_rptr,
1840	.get_wptr = uvd_v7_0_enc_ring_get_wptr,
1841	.set_wptr = uvd_v7_0_enc_ring_set_wptr,
1842	.emit_frame_size =
1843		3 + 3 + /* hdp flush / invalidate */
1844		SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1845		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
1846		4 + /* uvd_v7_0_enc_ring_emit_vm_flush */
1847		5 + 5 + /* uvd_v7_0_enc_ring_emit_fence x2 vm fence */
1848		1, /* uvd_v7_0_enc_ring_insert_end */
1849	.emit_ib_size = 5, /* uvd_v7_0_enc_ring_emit_ib */
1850	.emit_ib = uvd_v7_0_enc_ring_emit_ib,
1851	.emit_fence = uvd_v7_0_enc_ring_emit_fence,
1852	.emit_vm_flush = uvd_v7_0_enc_ring_emit_vm_flush,
1853	.test_ring = uvd_v7_0_enc_ring_test_ring,
1854	.test_ib = uvd_v7_0_enc_ring_test_ib,
1855	.insert_nop = amdgpu_ring_insert_nop,
1856	.insert_end = uvd_v7_0_enc_ring_insert_end,
1857	.pad_ib = amdgpu_ring_generic_pad_ib,
1858	.begin_use = amdgpu_uvd_ring_begin_use,
1859	.end_use = amdgpu_uvd_ring_end_use,
1860	.emit_wreg = uvd_v7_0_enc_ring_emit_wreg,
1861	.emit_reg_wait = uvd_v7_0_enc_ring_emit_reg_wait,
1862	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1863};
1864
1865static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev)
1866{
1867	int i;
1868
1869	for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
1870		if (adev->uvd.harvest_config & (1 << i))
1871			continue;
1872		adev->uvd.inst[i].ring.funcs = &uvd_v7_0_ring_vm_funcs;
1873		adev->uvd.inst[i].ring.me = i;
1874		DRM_INFO("UVD(%d) is enabled in VM mode\n", i);
1875	}
1876}
1877
1878static void uvd_v7_0_set_enc_ring_funcs(struct amdgpu_device *adev)
1879{
1880	int i, j;
1881
1882	for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
1883		if (adev->uvd.harvest_config & (1 << j))
1884			continue;
1885		for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
1886			adev->uvd.inst[j].ring_enc[i].funcs = &uvd_v7_0_enc_ring_vm_funcs;
1887			adev->uvd.inst[j].ring_enc[i].me = j;
1888		}
1889
1890		DRM_INFO("UVD(%d) ENC is enabled in VM mode\n", j);
1891	}
1892}
1893
1894static const struct amdgpu_irq_src_funcs uvd_v7_0_irq_funcs = {
1895	.set = uvd_v7_0_set_interrupt_state,
1896	.process = uvd_v7_0_process_interrupt,
1897};
1898
1899static void uvd_v7_0_set_irq_funcs(struct amdgpu_device *adev)
1900{
1901	int i;
1902
1903	for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
1904		if (adev->uvd.harvest_config & (1 << i))
1905			continue;
1906		adev->uvd.inst[i].irq.num_types = adev->uvd.num_enc_rings + 1;
1907		adev->uvd.inst[i].irq.funcs = &uvd_v7_0_irq_funcs;
1908	}
1909}
1910
1911const struct amdgpu_ip_block_version uvd_v7_0_ip_block =
1912{
1913		.type = AMD_IP_BLOCK_TYPE_UVD,
1914		.major = 7,
1915		.minor = 0,
1916		.rev = 0,
1917		.funcs = &uvd_v7_0_ip_funcs,
1918};
v5.14.15
   1/*
   2 * Copyright 2016 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
  23
  24#include <linux/firmware.h>
  25
  26#include "amdgpu.h"
  27#include "amdgpu_uvd.h"
 
  28#include "soc15.h"
  29#include "soc15d.h"
  30#include "soc15_common.h"
  31#include "mmsch_v1_0.h"
  32
  33#include "uvd/uvd_7_0_offset.h"
  34#include "uvd/uvd_7_0_sh_mask.h"
  35#include "vce/vce_4_0_offset.h"
  36#include "vce/vce_4_0_default.h"
  37#include "vce/vce_4_0_sh_mask.h"
  38#include "nbif/nbif_6_1_offset.h"
  39#include "mmhub/mmhub_1_0_offset.h"
  40#include "mmhub/mmhub_1_0_sh_mask.h"
  41#include "ivsrcid/uvd/irqsrcs_uvd_7_0.h"
  42
  43#define mmUVD_PG0_CC_UVD_HARVESTING                                                                    0x00c7
  44#define mmUVD_PG0_CC_UVD_HARVESTING_BASE_IDX                                                           1
  45//UVD_PG0_CC_UVD_HARVESTING
  46#define UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE__SHIFT                                                         0x1
  47#define UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE_MASK                                                           0x00000002L
  48
  49#define UVD7_MAX_HW_INSTANCES_VEGA20			2
  50
  51static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev);
  52static void uvd_v7_0_set_enc_ring_funcs(struct amdgpu_device *adev);
  53static void uvd_v7_0_set_irq_funcs(struct amdgpu_device *adev);
  54static int uvd_v7_0_start(struct amdgpu_device *adev);
  55static void uvd_v7_0_stop(struct amdgpu_device *adev);
  56static int uvd_v7_0_sriov_start(struct amdgpu_device *adev);
  57
  58static int amdgpu_ih_clientid_uvds[] = {
  59	SOC15_IH_CLIENTID_UVD,
  60	SOC15_IH_CLIENTID_UVD1
  61};
  62
  63/**
  64 * uvd_v7_0_ring_get_rptr - get read pointer
  65 *
  66 * @ring: amdgpu_ring pointer
  67 *
  68 * Returns the current hardware read pointer
  69 */
  70static uint64_t uvd_v7_0_ring_get_rptr(struct amdgpu_ring *ring)
  71{
  72	struct amdgpu_device *adev = ring->adev;
  73
  74	return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_RPTR);
  75}
  76
  77/**
  78 * uvd_v7_0_enc_ring_get_rptr - get enc read pointer
  79 *
  80 * @ring: amdgpu_ring pointer
  81 *
  82 * Returns the current hardware enc read pointer
  83 */
  84static uint64_t uvd_v7_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
  85{
  86	struct amdgpu_device *adev = ring->adev;
  87
  88	if (ring == &adev->uvd.inst[ring->me].ring_enc[0])
  89		return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR);
  90	else
  91		return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR2);
  92}
  93
  94/**
  95 * uvd_v7_0_ring_get_wptr - get write pointer
  96 *
  97 * @ring: amdgpu_ring pointer
  98 *
  99 * Returns the current hardware write pointer
 100 */
 101static uint64_t uvd_v7_0_ring_get_wptr(struct amdgpu_ring *ring)
 102{
 103	struct amdgpu_device *adev = ring->adev;
 104
 105	return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR);
 106}
 107
 108/**
 109 * uvd_v7_0_enc_ring_get_wptr - get enc write pointer
 110 *
 111 * @ring: amdgpu_ring pointer
 112 *
 113 * Returns the current hardware enc write pointer
 114 */
 115static uint64_t uvd_v7_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
 116{
 117	struct amdgpu_device *adev = ring->adev;
 118
 119	if (ring->use_doorbell)
 120		return adev->wb.wb[ring->wptr_offs];
 121
 122	if (ring == &adev->uvd.inst[ring->me].ring_enc[0])
 123		return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR);
 124	else
 125		return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2);
 126}
 127
 128/**
 129 * uvd_v7_0_ring_set_wptr - set write pointer
 130 *
 131 * @ring: amdgpu_ring pointer
 132 *
 133 * Commits the write pointer to the hardware
 134 */
 135static void uvd_v7_0_ring_set_wptr(struct amdgpu_ring *ring)
 136{
 137	struct amdgpu_device *adev = ring->adev;
 138
 139	WREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
 140}
 141
 142/**
 143 * uvd_v7_0_enc_ring_set_wptr - set enc write pointer
 144 *
 145 * @ring: amdgpu_ring pointer
 146 *
 147 * Commits the enc write pointer to the hardware
 148 */
 149static void uvd_v7_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
 150{
 151	struct amdgpu_device *adev = ring->adev;
 152
 153	if (ring->use_doorbell) {
 154		/* XXX check if swapping is necessary on BE */
 155		adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
 156		WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
 157		return;
 158	}
 159
 160	if (ring == &adev->uvd.inst[ring->me].ring_enc[0])
 161		WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR,
 162			lower_32_bits(ring->wptr));
 163	else
 164		WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2,
 165			lower_32_bits(ring->wptr));
 166}
 167
 168/**
 169 * uvd_v7_0_enc_ring_test_ring - test if UVD ENC ring is working
 170 *
 171 * @ring: the engine to test on
 172 *
 173 */
 174static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring)
 175{
 176	struct amdgpu_device *adev = ring->adev;
 177	uint32_t rptr;
 178	unsigned i;
 179	int r;
 180
 181	if (amdgpu_sriov_vf(adev))
 182		return 0;
 183
 184	r = amdgpu_ring_alloc(ring, 16);
 185	if (r)
 186		return r;
 187
 188	rptr = amdgpu_ring_get_rptr(ring);
 189
 190	amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
 191	amdgpu_ring_commit(ring);
 192
 193	for (i = 0; i < adev->usec_timeout; i++) {
 194		if (amdgpu_ring_get_rptr(ring) != rptr)
 195			break;
 196		udelay(1);
 197	}
 198
 199	if (i >= adev->usec_timeout)
 200		r = -ETIMEDOUT;
 201
 202	return r;
 203}
 204
 205/**
 206 * uvd_v7_0_enc_get_create_msg - generate a UVD ENC create msg
 207 *
 208 * @ring: ring we should submit the msg to
 209 * @handle: session handle to use
 210 * @bo: amdgpu object for which we query the offset
 211 * @fence: optional fence to return
 212 *
 213 * Open up a stream for HW test
 214 */
 215static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
 216				       struct amdgpu_bo *bo,
 217				       struct dma_fence **fence)
 218{
 219	const unsigned ib_size_dw = 16;
 220	struct amdgpu_job *job;
 221	struct amdgpu_ib *ib;
 222	struct dma_fence *f = NULL;
 223	uint64_t addr;
 224	int i, r;
 225
 226	r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
 227					AMDGPU_IB_POOL_DIRECT, &job);
 228	if (r)
 229		return r;
 230
 231	ib = &job->ibs[0];
 232	addr = amdgpu_bo_gpu_offset(bo);
 233
 234	ib->length_dw = 0;
 235	ib->ptr[ib->length_dw++] = 0x00000018;
 236	ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
 237	ib->ptr[ib->length_dw++] = handle;
 238	ib->ptr[ib->length_dw++] = 0x00000000;
 239	ib->ptr[ib->length_dw++] = upper_32_bits(addr);
 240	ib->ptr[ib->length_dw++] = addr;
 241
 242	ib->ptr[ib->length_dw++] = 0x00000014;
 243	ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
 244	ib->ptr[ib->length_dw++] = 0x0000001c;
 245	ib->ptr[ib->length_dw++] = 0x00000000;
 246	ib->ptr[ib->length_dw++] = 0x00000000;
 247
 248	ib->ptr[ib->length_dw++] = 0x00000008;
 249	ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */
 250
 251	for (i = ib->length_dw; i < ib_size_dw; ++i)
 252		ib->ptr[i] = 0x0;
 253
 254	r = amdgpu_job_submit_direct(job, ring, &f);
 255	if (r)
 256		goto err;
 257
 258	if (fence)
 259		*fence = dma_fence_get(f);
 260	dma_fence_put(f);
 261	return 0;
 262
 263err:
 264	amdgpu_job_free(job);
 265	return r;
 266}
 267
 268/**
 269 * uvd_v7_0_enc_get_destroy_msg - generate a UVD ENC destroy msg
 270 *
 271 * @ring: ring we should submit the msg to
 272 * @handle: session handle to use
 273 * @bo: amdgpu object for which we query the offset
 274 * @fence: optional fence to return
 275 *
 276 * Close up a stream for HW test or if userspace failed to do so
 277 */
 278static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
 279					struct amdgpu_bo *bo,
 280					struct dma_fence **fence)
 281{
 282	const unsigned ib_size_dw = 16;
 283	struct amdgpu_job *job;
 284	struct amdgpu_ib *ib;
 285	struct dma_fence *f = NULL;
 286	uint64_t addr;
 287	int i, r;
 288
 289	r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
 290					AMDGPU_IB_POOL_DIRECT, &job);
 291	if (r)
 292		return r;
 293
 294	ib = &job->ibs[0];
 295	addr = amdgpu_bo_gpu_offset(bo);
 296
 297	ib->length_dw = 0;
 298	ib->ptr[ib->length_dw++] = 0x00000018;
 299	ib->ptr[ib->length_dw++] = 0x00000001;
 300	ib->ptr[ib->length_dw++] = handle;
 301	ib->ptr[ib->length_dw++] = 0x00000000;
 302	ib->ptr[ib->length_dw++] = upper_32_bits(addr);
 303	ib->ptr[ib->length_dw++] = addr;
 304
 305	ib->ptr[ib->length_dw++] = 0x00000014;
 306	ib->ptr[ib->length_dw++] = 0x00000002;
 307	ib->ptr[ib->length_dw++] = 0x0000001c;
 308	ib->ptr[ib->length_dw++] = 0x00000000;
 309	ib->ptr[ib->length_dw++] = 0x00000000;
 310
 311	ib->ptr[ib->length_dw++] = 0x00000008;
 312	ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */
 313
 314	for (i = ib->length_dw; i < ib_size_dw; ++i)
 315		ib->ptr[i] = 0x0;
 316
 317	r = amdgpu_job_submit_direct(job, ring, &f);
 318	if (r)
 319		goto err;
 320
 321	if (fence)
 322		*fence = dma_fence_get(f);
 323	dma_fence_put(f);
 324	return 0;
 325
 326err:
 327	amdgpu_job_free(job);
 328	return r;
 329}
 330
 331/**
 332 * uvd_v7_0_enc_ring_test_ib - test if UVD ENC IBs are working
 333 *
 334 * @ring: the engine to test on
 335 * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
 336 *
 337 */
 338static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 339{
 340	struct dma_fence *fence = NULL;
 341	struct amdgpu_bo *bo = NULL;
 342	long r;
 343
 344	r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE,
 345				      AMDGPU_GEM_DOMAIN_VRAM,
 346				      &bo, NULL, NULL);
 347	if (r)
 348		return r;
 349
 350	r = uvd_v7_0_enc_get_create_msg(ring, 1, bo, NULL);
 351	if (r)
 352		goto error;
 353
 354	r = uvd_v7_0_enc_get_destroy_msg(ring, 1, bo, &fence);
 355	if (r)
 356		goto error;
 357
 358	r = dma_fence_wait_timeout(fence, false, timeout);
 359	if (r == 0)
 360		r = -ETIMEDOUT;
 361	else if (r > 0)
 362		r = 0;
 363
 364error:
 365	dma_fence_put(fence);
 366	amdgpu_bo_unpin(bo);
 367	amdgpu_bo_unreserve(bo);
 368	amdgpu_bo_unref(&bo);
 369	return r;
 370}
 371
 372static int uvd_v7_0_early_init(void *handle)
 373{
 374	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 375
 376	if (adev->asic_type == CHIP_VEGA20) {
 377		u32 harvest;
 378		int i;
 379
 380		adev->uvd.num_uvd_inst = UVD7_MAX_HW_INSTANCES_VEGA20;
 381		for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
 382			harvest = RREG32_SOC15(UVD, i, mmUVD_PG0_CC_UVD_HARVESTING);
 383			if (harvest & UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE_MASK) {
 384				adev->uvd.harvest_config |= 1 << i;
 385			}
 386		}
 387		if (adev->uvd.harvest_config == (AMDGPU_UVD_HARVEST_UVD0 |
 388						 AMDGPU_UVD_HARVEST_UVD1))
 389			/* both instances are harvested, disable the block */
 390			return -ENOENT;
 391	} else {
 392		adev->uvd.num_uvd_inst = 1;
 393	}
 394
 395	if (amdgpu_sriov_vf(adev))
 396		adev->uvd.num_enc_rings = 1;
 397	else
 398		adev->uvd.num_enc_rings = 2;
 399	uvd_v7_0_set_ring_funcs(adev);
 400	uvd_v7_0_set_enc_ring_funcs(adev);
 401	uvd_v7_0_set_irq_funcs(adev);
 402
 403	return 0;
 404}
 405
 406static int uvd_v7_0_sw_init(void *handle)
 407{
 408	struct amdgpu_ring *ring;
 409
 410	int i, j, r;
 411	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 412
 413	for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
 414		if (adev->uvd.harvest_config & (1 << j))
 415			continue;
 416		/* UVD TRAP */
 417		r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_uvds[j], UVD_7_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->uvd.inst[j].irq);
 418		if (r)
 419			return r;
 420
 421		/* UVD ENC TRAP */
 422		for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
 423			r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_uvds[j], i + UVD_7_0__SRCID__UVD_ENC_GEN_PURP, &adev->uvd.inst[j].irq);
 424			if (r)
 425				return r;
 426		}
 427	}
 428
 429	r = amdgpu_uvd_sw_init(adev);
 430	if (r)
 431		return r;
 432
 433	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
 434		const struct common_firmware_header *hdr;
 435		hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
 436		adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].ucode_id = AMDGPU_UCODE_ID_UVD;
 437		adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].fw = adev->uvd.fw;
 438		adev->firmware.fw_size +=
 439			ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
 440
 441		if (adev->uvd.num_uvd_inst == UVD7_MAX_HW_INSTANCES_VEGA20) {
 442			adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].ucode_id = AMDGPU_UCODE_ID_UVD1;
 443			adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].fw = adev->uvd.fw;
 444			adev->firmware.fw_size +=
 445				ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
 446		}
 447		DRM_INFO("PSP loading UVD firmware\n");
 448	}
 449
 450	for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
 451		if (adev->uvd.harvest_config & (1 << j))
 452			continue;
 453		if (!amdgpu_sriov_vf(adev)) {
 454			ring = &adev->uvd.inst[j].ring;
 455			sprintf(ring->name, "uvd_%d", ring->me);
 456			r = amdgpu_ring_init(adev, ring, 512,
 457					     &adev->uvd.inst[j].irq, 0,
 458					     AMDGPU_RING_PRIO_DEFAULT, NULL);
 459			if (r)
 460				return r;
 461		}
 462
 463		for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
 464			ring = &adev->uvd.inst[j].ring_enc[i];
 465			sprintf(ring->name, "uvd_enc_%d.%d", ring->me, i);
 466			if (amdgpu_sriov_vf(adev)) {
 467				ring->use_doorbell = true;
 468
 469				/* currently only use the first enconding ring for
 470				 * sriov, so set unused location for other unused rings.
 471				 */
 472				if (i == 0)
 473					ring->doorbell_index = adev->doorbell_index.uvd_vce.uvd_ring0_1 * 2;
 474				else
 475					ring->doorbell_index = adev->doorbell_index.uvd_vce.uvd_ring2_3 * 2 + 1;
 476			}
 477			r = amdgpu_ring_init(adev, ring, 512,
 478					     &adev->uvd.inst[j].irq, 0,
 479					     AMDGPU_RING_PRIO_DEFAULT, NULL);
 480			if (r)
 481				return r;
 482		}
 483	}
 484
 485	r = amdgpu_uvd_resume(adev);
 486	if (r)
 487		return r;
 488
 489	r = amdgpu_uvd_entity_init(adev);
 490	if (r)
 491		return r;
 492
 493	r = amdgpu_virt_alloc_mm_table(adev);
 494	if (r)
 495		return r;
 496
 497	return r;
 498}
 499
 500static int uvd_v7_0_sw_fini(void *handle)
 501{
 502	int i, j, r;
 503	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 504
 505	amdgpu_virt_free_mm_table(adev);
 506
 507	r = amdgpu_uvd_suspend(adev);
 508	if (r)
 509		return r;
 510
 511	for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
 512		if (adev->uvd.harvest_config & (1 << j))
 513			continue;
 514		for (i = 0; i < adev->uvd.num_enc_rings; ++i)
 515			amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]);
 516	}
 517	return amdgpu_uvd_sw_fini(adev);
 518}
 519
 520/**
 521 * uvd_v7_0_hw_init - start and test UVD block
 522 *
 523 * @handle: handle used to pass amdgpu_device pointer
 524 *
 525 * Initialize the hardware, boot up the VCPU and do some testing
 526 */
 527static int uvd_v7_0_hw_init(void *handle)
 528{
 529	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 530	struct amdgpu_ring *ring;
 531	uint32_t tmp;
 532	int i, j, r;
 533
 534	if (amdgpu_sriov_vf(adev))
 535		r = uvd_v7_0_sriov_start(adev);
 536	else
 537		r = uvd_v7_0_start(adev);
 538	if (r)
 539		goto done;
 540
 541	for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
 542		if (adev->uvd.harvest_config & (1 << j))
 543			continue;
 544		ring = &adev->uvd.inst[j].ring;
 545
 546		if (!amdgpu_sriov_vf(adev)) {
 547			r = amdgpu_ring_test_helper(ring);
 548			if (r)
 549				goto done;
 550
 551			r = amdgpu_ring_alloc(ring, 10);
 552			if (r) {
 553				DRM_ERROR("amdgpu: (%d)ring failed to lock UVD ring (%d).\n", j, r);
 554				goto done;
 555			}
 556
 557			tmp = PACKET0(SOC15_REG_OFFSET(UVD, j,
 558				mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL), 0);
 559			amdgpu_ring_write(ring, tmp);
 560			amdgpu_ring_write(ring, 0xFFFFF);
 561
 562			tmp = PACKET0(SOC15_REG_OFFSET(UVD, j,
 563				mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL), 0);
 564			amdgpu_ring_write(ring, tmp);
 565			amdgpu_ring_write(ring, 0xFFFFF);
 566
 567			tmp = PACKET0(SOC15_REG_OFFSET(UVD, j,
 568				mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL), 0);
 569			amdgpu_ring_write(ring, tmp);
 570			amdgpu_ring_write(ring, 0xFFFFF);
 571
 572			/* Clear timeout status bits */
 573			amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, j,
 574				mmUVD_SEMA_TIMEOUT_STATUS), 0));
 575			amdgpu_ring_write(ring, 0x8);
 576
 577			amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, j,
 578				mmUVD_SEMA_CNTL), 0));
 579			amdgpu_ring_write(ring, 3);
 580
 581			amdgpu_ring_commit(ring);
 582		}
 583
 584		for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
 585			ring = &adev->uvd.inst[j].ring_enc[i];
 586			r = amdgpu_ring_test_helper(ring);
 587			if (r)
 588				goto done;
 589		}
 590	}
 591done:
 592	if (!r)
 593		DRM_INFO("UVD and UVD ENC initialized successfully.\n");
 594
 595	return r;
 596}
 597
 598/**
 599 * uvd_v7_0_hw_fini - stop the hardware block
 600 *
 601 * @handle: handle used to pass amdgpu_device pointer
 602 *
 603 * Stop the UVD block, mark ring as not ready any more
 604 */
 605static int uvd_v7_0_hw_fini(void *handle)
 606{
 607	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 608
 
 
 609	if (!amdgpu_sriov_vf(adev))
 610		uvd_v7_0_stop(adev);
 611	else {
 612		/* full access mode, so don't touch any UVD register */
 613		DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
 614	}
 615
 616	return 0;
 617}
 618
 619static int uvd_v7_0_suspend(void *handle)
 620{
 621	int r;
 622	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 623
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 624	r = uvd_v7_0_hw_fini(adev);
 625	if (r)
 626		return r;
 627
 628	return amdgpu_uvd_suspend(adev);
 629}
 630
 631static int uvd_v7_0_resume(void *handle)
 632{
 633	int r;
 634	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 635
 636	r = amdgpu_uvd_resume(adev);
 637	if (r)
 638		return r;
 639
 640	return uvd_v7_0_hw_init(adev);
 641}
 642
 643/**
 644 * uvd_v7_0_mc_resume - memory controller programming
 645 *
 646 * @adev: amdgpu_device pointer
 647 *
 648 * Let the UVD memory controller know it's offsets
 649 */
 650static void uvd_v7_0_mc_resume(struct amdgpu_device *adev)
 651{
 652	uint32_t size = AMDGPU_UVD_FIRMWARE_SIZE(adev);
 653	uint32_t offset;
 654	int i;
 655
 656	for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
 657		if (adev->uvd.harvest_config & (1 << i))
 658			continue;
 659		if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
 660			WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
 661				i == 0 ?
 662				adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_lo:
 663				adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].tmr_mc_addr_lo);
 664			WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
 665				i == 0 ?
 666				adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_hi:
 667				adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].tmr_mc_addr_hi);
 668			WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0, 0);
 669			offset = 0;
 670		} else {
 671			WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
 672				lower_32_bits(adev->uvd.inst[i].gpu_addr));
 673			WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
 674				upper_32_bits(adev->uvd.inst[i].gpu_addr));
 675			offset = size;
 676			WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0,
 677					AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
 678		}
 679
 680		WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE0, size);
 681
 682		WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
 683				lower_32_bits(adev->uvd.inst[i].gpu_addr + offset));
 684		WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
 685				upper_32_bits(adev->uvd.inst[i].gpu_addr + offset));
 686		WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET1, (1 << 21));
 687		WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_UVD_HEAP_SIZE);
 688
 689		WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
 690				lower_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
 691		WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
 692				upper_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
 693		WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET2, (2 << 21));
 694		WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE2,
 695				AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40));
 696
 697		WREG32_SOC15(UVD, i, mmUVD_UDEC_ADDR_CONFIG,
 698				adev->gfx.config.gb_addr_config);
 699		WREG32_SOC15(UVD, i, mmUVD_UDEC_DB_ADDR_CONFIG,
 700				adev->gfx.config.gb_addr_config);
 701		WREG32_SOC15(UVD, i, mmUVD_UDEC_DBW_ADDR_CONFIG,
 702				adev->gfx.config.gb_addr_config);
 703
 704		WREG32_SOC15(UVD, i, mmUVD_GP_SCRATCH4, adev->uvd.max_handles);
 705	}
 706}
 707
 708static int uvd_v7_0_mmsch_start(struct amdgpu_device *adev,
 709				struct amdgpu_mm_table *table)
 710{
 711	uint32_t data = 0, loop;
 712	uint64_t addr = table->gpu_addr;
 713	struct mmsch_v1_0_init_header *header = (struct mmsch_v1_0_init_header *)table->cpu_addr;
 714	uint32_t size;
 715	int i;
 716
 717	size = header->header_size + header->vce_table_size + header->uvd_table_size;
 718
 719	/* 1, write to vce_mmsch_vf_ctx_addr_lo/hi register with GPU mc addr of memory descriptor location */
 720	WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_CTX_ADDR_LO, lower_32_bits(addr));
 721	WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_CTX_ADDR_HI, upper_32_bits(addr));
 722
 723	/* 2, update vmid of descriptor */
 724	data = RREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_VMID);
 725	data &= ~VCE_MMSCH_VF_VMID__VF_CTX_VMID_MASK;
 726	data |= (0 << VCE_MMSCH_VF_VMID__VF_CTX_VMID__SHIFT); /* use domain0 for MM scheduler */
 727	WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_VMID, data);
 728
 729	/* 3, notify mmsch about the size of this descriptor */
 730	WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_CTX_SIZE, size);
 731
 732	/* 4, set resp to zero */
 733	WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP, 0);
 734
 735	for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
 736		if (adev->uvd.harvest_config & (1 << i))
 737			continue;
 738		WDOORBELL32(adev->uvd.inst[i].ring_enc[0].doorbell_index, 0);
 739		adev->wb.wb[adev->uvd.inst[i].ring_enc[0].wptr_offs] = 0;
 740		adev->uvd.inst[i].ring_enc[0].wptr = 0;
 741		adev->uvd.inst[i].ring_enc[0].wptr_old = 0;
 742	}
 743	/* 5, kick off the initialization and wait until VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero */
 744	WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_HOST, 0x10000001);
 745
 746	data = RREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP);
 747	loop = 1000;
 748	while ((data & 0x10000002) != 0x10000002) {
 749		udelay(10);
 750		data = RREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP);
 751		loop--;
 752		if (!loop)
 753			break;
 754	}
 755
 756	if (!loop) {
 757		dev_err(adev->dev, "failed to init MMSCH, mmVCE_MMSCH_VF_MAILBOX_RESP = %x\n", data);
 758		return -EBUSY;
 759	}
 760
 761	return 0;
 762}
 763
 764static int uvd_v7_0_sriov_start(struct amdgpu_device *adev)
 765{
 766	struct amdgpu_ring *ring;
 767	uint32_t offset, size, tmp;
 768	uint32_t table_size = 0;
 769	struct mmsch_v1_0_cmd_direct_write direct_wt = { {0} };
 770	struct mmsch_v1_0_cmd_direct_read_modify_write direct_rd_mod_wt = { {0} };
 771	struct mmsch_v1_0_cmd_direct_polling direct_poll = { {0} };
 772	struct mmsch_v1_0_cmd_end end = { {0} };
 773	uint32_t *init_table = adev->virt.mm_table.cpu_addr;
 774	struct mmsch_v1_0_init_header *header = (struct mmsch_v1_0_init_header *)init_table;
 775	uint8_t i = 0;
 776
 777	direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE;
 778	direct_rd_mod_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
 779	direct_poll.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_POLLING;
 780	end.cmd_header.command_type = MMSCH_COMMAND__END;
 781
 782	if (header->uvd_table_offset == 0 && header->uvd_table_size == 0) {
 783		header->version = MMSCH_VERSION;
 784		header->header_size = sizeof(struct mmsch_v1_0_init_header) >> 2;
 785
 786		if (header->vce_table_offset == 0 && header->vce_table_size == 0)
 787			header->uvd_table_offset = header->header_size;
 788		else
 789			header->uvd_table_offset = header->vce_table_size + header->vce_table_offset;
 790
 791		init_table += header->uvd_table_offset;
 792
 793		for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
 794			if (adev->uvd.harvest_config & (1 << i))
 795				continue;
 796			ring = &adev->uvd.inst[i].ring;
 797			ring->wptr = 0;
 798			size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4);
 799
 800			MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS),
 801							   0xFFFFFFFF, 0x00000004);
 802			/* mc resume*/
 803			if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
 804				MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i,
 805							mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
 806							adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_lo);
 807				MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i,
 808							mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
 809							adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_hi);
 810				MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0);
 811				offset = 0;
 812			} else {
 813				MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
 814							    lower_32_bits(adev->uvd.inst[i].gpu_addr));
 815				MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
 816							    upper_32_bits(adev->uvd.inst[i].gpu_addr));
 817				offset = size;
 818				MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0),
 819							AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
 820
 821			}
 822
 823			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE0), size);
 824
 825			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
 826						    lower_32_bits(adev->uvd.inst[i].gpu_addr + offset));
 827			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
 828						    upper_32_bits(adev->uvd.inst[i].gpu_addr + offset));
 829			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET1), (1 << 21));
 830			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_UVD_HEAP_SIZE);
 831
 832			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
 833						    lower_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
 834			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
 835						    upper_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
 836			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET2), (2 << 21));
 837			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE2),
 838						    AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40));
 839
 840			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_GP_SCRATCH4), adev->uvd.max_handles);
 841			/* mc resume end*/
 842
 843			/* disable clock gating */
 844			MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_CGC_CTRL),
 845							   ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK, 0);
 846
 847			/* disable interupt */
 848			MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN),
 849							   ~UVD_MASTINT_EN__VCPU_EN_MASK, 0);
 850
 851			/* stall UMC and register bus before resetting VCPU */
 852			MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2),
 853							   ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
 854							   UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
 855
 856			/* put LMI, VCPU, RBC etc... into reset */
 857			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_SOFT_RESET),
 858						    (uint32_t)(UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
 859							       UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
 860							       UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
 861							       UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
 862							       UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
 863							       UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
 864							       UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
 865							       UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK));
 866
 867			/* initialize UVD memory controller */
 868			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL),
 869						    (uint32_t)((0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
 870							       UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
 871							       UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
 872							       UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
 873							       UVD_LMI_CTRL__REQ_MODE_MASK |
 874							       0x00100000L));
 875
 876			/* take all subblocks out of reset, except VCPU */
 877			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_SOFT_RESET),
 878						    UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
 879
 880			/* enable VCPU clock */
 881			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL),
 882						    UVD_VCPU_CNTL__CLK_EN_MASK);
 883
 884			/* enable master interrupt */
 885			MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN),
 886							   ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
 887							   (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
 888
 889			/* clear the bit 4 of UVD_STATUS */
 890			MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS),
 891							   ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT), 0);
 892
 893			/* force RBC into idle state */
 894			size = order_base_2(ring->ring_size);
 895			tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, size);
 896			tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
 897			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RBC_RB_CNTL), tmp);
 898
 899			ring = &adev->uvd.inst[i].ring_enc[0];
 900			ring->wptr = 0;
 901			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_LO), ring->gpu_addr);
 902			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_HI), upper_32_bits(ring->gpu_addr));
 903			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_SIZE), ring->ring_size / 4);
 904
 905			/* boot up the VCPU */
 906			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_SOFT_RESET), 0);
 907
 908			/* enable UMC */
 909			MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2),
 910											   ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, 0);
 911
 912			MMSCH_V1_0_INSERT_DIRECT_POLL(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS), 0x02, 0x02);
 913		}
 914		/* add end packet */
 915		memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end));
 916		table_size += sizeof(struct mmsch_v1_0_cmd_end) / 4;
 917		header->uvd_table_size = table_size;
 918
 919	}
 920	return uvd_v7_0_mmsch_start(adev, &adev->virt.mm_table);
 921}
 922
 923/**
 924 * uvd_v7_0_start - start UVD block
 925 *
 926 * @adev: amdgpu_device pointer
 927 *
 928 * Setup and start the UVD block
 929 */
 930static int uvd_v7_0_start(struct amdgpu_device *adev)
 931{
 932	struct amdgpu_ring *ring;
 933	uint32_t rb_bufsz, tmp;
 934	uint32_t lmi_swap_cntl;
 935	uint32_t mp_swap_cntl;
 936	int i, j, k, r;
 937
 938	for (k = 0; k < adev->uvd.num_uvd_inst; ++k) {
 939		if (adev->uvd.harvest_config & (1 << k))
 940			continue;
 941		/* disable DPG */
 942		WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_POWER_STATUS), 0,
 943				~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
 944	}
 945
 946	/* disable byte swapping */
 947	lmi_swap_cntl = 0;
 948	mp_swap_cntl = 0;
 949
 950	uvd_v7_0_mc_resume(adev);
 951
 952	for (k = 0; k < adev->uvd.num_uvd_inst; ++k) {
 953		if (adev->uvd.harvest_config & (1 << k))
 954			continue;
 955		ring = &adev->uvd.inst[k].ring;
 956		/* disable clock gating */
 957		WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_CGC_CTRL), 0,
 958				~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK);
 959
 960		/* disable interupt */
 961		WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_MASTINT_EN), 0,
 962				~UVD_MASTINT_EN__VCPU_EN_MASK);
 963
 964		/* stall UMC and register bus before resetting VCPU */
 965		WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_LMI_CTRL2),
 966				UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
 967				~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
 968		mdelay(1);
 969
 970		/* put LMI, VCPU, RBC etc... into reset */
 971		WREG32_SOC15(UVD, k, mmUVD_SOFT_RESET,
 972			UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
 973			UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
 974			UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
 975			UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
 976			UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
 977			UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
 978			UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
 979			UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
 980		mdelay(5);
 981
 982		/* initialize UVD memory controller */
 983		WREG32_SOC15(UVD, k, mmUVD_LMI_CTRL,
 984			(0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
 985			UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
 986			UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
 987			UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
 988			UVD_LMI_CTRL__REQ_MODE_MASK |
 989			0x00100000L);
 990
 991#ifdef __BIG_ENDIAN
 992		/* swap (8 in 32) RB and IB */
 993		lmi_swap_cntl = 0xa;
 994		mp_swap_cntl = 0;
 995#endif
 996		WREG32_SOC15(UVD, k, mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
 997		WREG32_SOC15(UVD, k, mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
 998
 999		WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXA0, 0x40c2040);
1000		WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXA1, 0x0);
1001		WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXB0, 0x40c2040);
1002		WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXB1, 0x0);
1003		WREG32_SOC15(UVD, k, mmUVD_MPC_SET_ALU, 0);
1004		WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUX, 0x88);
1005
1006		/* take all subblocks out of reset, except VCPU */
1007		WREG32_SOC15(UVD, k, mmUVD_SOFT_RESET,
1008				UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1009		mdelay(5);
1010
1011		/* enable VCPU clock */
1012		WREG32_SOC15(UVD, k, mmUVD_VCPU_CNTL,
1013				UVD_VCPU_CNTL__CLK_EN_MASK);
1014
1015		/* enable UMC */
1016		WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_LMI_CTRL2), 0,
1017				~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1018
1019		/* boot up the VCPU */
1020		WREG32_SOC15(UVD, k, mmUVD_SOFT_RESET, 0);
1021		mdelay(10);
1022
1023		for (i = 0; i < 10; ++i) {
1024			uint32_t status;
1025
1026			for (j = 0; j < 100; ++j) {
1027				status = RREG32_SOC15(UVD, k, mmUVD_STATUS);
1028				if (status & 2)
1029					break;
1030				mdelay(10);
1031			}
1032			r = 0;
1033			if (status & 2)
1034				break;
1035
1036			DRM_ERROR("UVD(%d) not responding, trying to reset the VCPU!!!\n", k);
1037			WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_SOFT_RESET),
1038					UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
1039					~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1040			mdelay(10);
1041			WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_SOFT_RESET), 0,
1042					~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1043			mdelay(10);
1044			r = -1;
1045		}
1046
1047		if (r) {
1048			DRM_ERROR("UVD(%d) not responding, giving up!!!\n", k);
1049			return r;
1050		}
1051		/* enable master interrupt */
1052		WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_MASTINT_EN),
1053			(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
1054			~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
1055
1056		/* clear the bit 4 of UVD_STATUS */
1057		WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_STATUS), 0,
1058				~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
1059
1060		/* force RBC into idle state */
1061		rb_bufsz = order_base_2(ring->ring_size);
1062		tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
1063		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1064		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1065		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
1066		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1067		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1068		WREG32_SOC15(UVD, k, mmUVD_RBC_RB_CNTL, tmp);
1069
1070		/* set the write pointer delay */
1071		WREG32_SOC15(UVD, k, mmUVD_RBC_RB_WPTR_CNTL, 0);
1072
1073		/* set the wb address */
1074		WREG32_SOC15(UVD, k, mmUVD_RBC_RB_RPTR_ADDR,
1075				(upper_32_bits(ring->gpu_addr) >> 2));
1076
1077		/* program the RB_BASE for ring buffer */
1078		WREG32_SOC15(UVD, k, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
1079				lower_32_bits(ring->gpu_addr));
1080		WREG32_SOC15(UVD, k, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
1081				upper_32_bits(ring->gpu_addr));
1082
1083		/* Initialize the ring buffer's read and write pointers */
1084		WREG32_SOC15(UVD, k, mmUVD_RBC_RB_RPTR, 0);
1085
1086		ring->wptr = RREG32_SOC15(UVD, k, mmUVD_RBC_RB_RPTR);
1087		WREG32_SOC15(UVD, k, mmUVD_RBC_RB_WPTR,
1088				lower_32_bits(ring->wptr));
1089
1090		WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_RBC_RB_CNTL), 0,
1091				~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
1092
1093		ring = &adev->uvd.inst[k].ring_enc[0];
1094		WREG32_SOC15(UVD, k, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1095		WREG32_SOC15(UVD, k, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1096		WREG32_SOC15(UVD, k, mmUVD_RB_BASE_LO, ring->gpu_addr);
1097		WREG32_SOC15(UVD, k, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1098		WREG32_SOC15(UVD, k, mmUVD_RB_SIZE, ring->ring_size / 4);
1099
1100		ring = &adev->uvd.inst[k].ring_enc[1];
1101		WREG32_SOC15(UVD, k, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1102		WREG32_SOC15(UVD, k, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1103		WREG32_SOC15(UVD, k, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1104		WREG32_SOC15(UVD, k, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1105		WREG32_SOC15(UVD, k, mmUVD_RB_SIZE2, ring->ring_size / 4);
1106	}
1107	return 0;
1108}
1109
1110/**
1111 * uvd_v7_0_stop - stop UVD block
1112 *
1113 * @adev: amdgpu_device pointer
1114 *
1115 * stop the UVD block
1116 */
1117static void uvd_v7_0_stop(struct amdgpu_device *adev)
1118{
1119	uint8_t i = 0;
1120
1121	for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
1122		if (adev->uvd.harvest_config & (1 << i))
1123			continue;
1124		/* force RBC into idle state */
1125		WREG32_SOC15(UVD, i, mmUVD_RBC_RB_CNTL, 0x11010101);
1126
1127		/* Stall UMC and register bus before resetting VCPU */
1128		WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2),
1129				UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
1130				~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1131		mdelay(1);
1132
1133		/* put VCPU into reset */
1134		WREG32_SOC15(UVD, i, mmUVD_SOFT_RESET,
1135				UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1136		mdelay(5);
1137
1138		/* disable VCPU clock */
1139		WREG32_SOC15(UVD, i, mmUVD_VCPU_CNTL, 0x0);
1140
1141		/* Unstall UMC and register bus */
1142		WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2), 0,
1143				~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1144	}
1145}
1146
1147/**
1148 * uvd_v7_0_ring_emit_fence - emit an fence & trap command
1149 *
1150 * @ring: amdgpu_ring pointer
1151 * @addr: address
1152 * @seq: sequence number
1153 * @flags: fence related flags
1154 *
1155 * Write a fence and a trap command to the ring.
1156 */
1157static void uvd_v7_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
1158				     unsigned flags)
1159{
1160	struct amdgpu_device *adev = ring->adev;
1161
1162	WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1163
1164	amdgpu_ring_write(ring,
1165		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_CONTEXT_ID), 0));
1166	amdgpu_ring_write(ring, seq);
1167	amdgpu_ring_write(ring,
1168		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
1169	amdgpu_ring_write(ring, addr & 0xffffffff);
1170	amdgpu_ring_write(ring,
1171		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
1172	amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
1173	amdgpu_ring_write(ring,
1174		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
1175	amdgpu_ring_write(ring, 0);
1176
1177	amdgpu_ring_write(ring,
1178		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
1179	amdgpu_ring_write(ring, 0);
1180	amdgpu_ring_write(ring,
1181		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
1182	amdgpu_ring_write(ring, 0);
1183	amdgpu_ring_write(ring,
1184		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
1185	amdgpu_ring_write(ring, 2);
1186}
1187
1188/**
1189 * uvd_v7_0_enc_ring_emit_fence - emit an enc fence & trap command
1190 *
1191 * @ring: amdgpu_ring pointer
1192 * @addr: address
1193 * @seq: sequence number
1194 * @flags: fence related flags
1195 *
1196 * Write enc a fence and a trap command to the ring.
1197 */
1198static void uvd_v7_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
1199			u64 seq, unsigned flags)
1200{
1201
1202	WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1203
1204	amdgpu_ring_write(ring, HEVC_ENC_CMD_FENCE);
1205	amdgpu_ring_write(ring, addr);
1206	amdgpu_ring_write(ring, upper_32_bits(addr));
1207	amdgpu_ring_write(ring, seq);
1208	amdgpu_ring_write(ring, HEVC_ENC_CMD_TRAP);
1209}
1210
1211/**
1212 * uvd_v7_0_ring_emit_hdp_flush - skip HDP flushing
1213 *
1214 * @ring: amdgpu_ring pointer
1215 */
1216static void uvd_v7_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
1217{
1218	/* The firmware doesn't seem to like touching registers at this point. */
1219}
1220
1221/**
1222 * uvd_v7_0_ring_test_ring - register write test
1223 *
1224 * @ring: amdgpu_ring pointer
1225 *
1226 * Test if we can successfully write to the context register
1227 */
1228static int uvd_v7_0_ring_test_ring(struct amdgpu_ring *ring)
1229{
1230	struct amdgpu_device *adev = ring->adev;
1231	uint32_t tmp = 0;
1232	unsigned i;
1233	int r;
1234
1235	WREG32_SOC15(UVD, ring->me, mmUVD_CONTEXT_ID, 0xCAFEDEAD);
1236	r = amdgpu_ring_alloc(ring, 3);
1237	if (r)
1238		return r;
1239
1240	amdgpu_ring_write(ring,
1241		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_CONTEXT_ID), 0));
1242	amdgpu_ring_write(ring, 0xDEADBEEF);
1243	amdgpu_ring_commit(ring);
1244	for (i = 0; i < adev->usec_timeout; i++) {
1245		tmp = RREG32_SOC15(UVD, ring->me, mmUVD_CONTEXT_ID);
1246		if (tmp == 0xDEADBEEF)
1247			break;
1248		udelay(1);
1249	}
1250
1251	if (i >= adev->usec_timeout)
1252		r = -ETIMEDOUT;
1253
1254	return r;
1255}
1256
1257/**
1258 * uvd_v7_0_ring_patch_cs_in_place - Patch the IB for command submission.
1259 *
1260 * @p: the CS parser with the IBs
1261 * @ib_idx: which IB to patch
 
1262 *
1263 */
1264static int uvd_v7_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
1265					   uint32_t ib_idx)
 
1266{
1267	struct amdgpu_ring *ring = to_amdgpu_ring(p->entity->rq->sched);
1268	struct amdgpu_ib *ib = &p->job->ibs[ib_idx];
1269	unsigned i;
1270
1271	/* No patching necessary for the first instance */
1272	if (!ring->me)
1273		return 0;
1274
1275	for (i = 0; i < ib->length_dw; i += 2) {
1276		uint32_t reg = amdgpu_get_ib_value(p, ib_idx, i);
1277
1278		reg -= p->adev->reg_offset[UVD_HWIP][0][1];
1279		reg += p->adev->reg_offset[UVD_HWIP][1][1];
1280
1281		amdgpu_set_ib_value(p, ib_idx, i, reg);
1282	}
1283	return 0;
1284}
1285
1286/**
1287 * uvd_v7_0_ring_emit_ib - execute indirect buffer
1288 *
1289 * @ring: amdgpu_ring pointer
1290 * @job: job to retrieve vmid from
1291 * @ib: indirect buffer to execute
1292 * @flags: unused
1293 *
1294 * Write ring commands to execute the indirect buffer
1295 */
1296static void uvd_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
1297				  struct amdgpu_job *job,
1298				  struct amdgpu_ib *ib,
1299				  uint32_t flags)
1300{
1301	struct amdgpu_device *adev = ring->adev;
1302	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1303
1304	amdgpu_ring_write(ring,
1305		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_VMID), 0));
1306	amdgpu_ring_write(ring, vmid);
1307
1308	amdgpu_ring_write(ring,
1309		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_64BIT_BAR_LOW), 0));
1310	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1311	amdgpu_ring_write(ring,
1312		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH), 0));
1313	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1314	amdgpu_ring_write(ring,
1315		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_RBC_IB_SIZE), 0));
1316	amdgpu_ring_write(ring, ib->length_dw);
1317}
1318
1319/**
1320 * uvd_v7_0_enc_ring_emit_ib - enc execute indirect buffer
1321 *
1322 * @ring: amdgpu_ring pointer
1323 * @job: job to retrive vmid from
1324 * @ib: indirect buffer to execute
1325 * @flags: unused
1326 *
1327 * Write enc ring commands to execute the indirect buffer
1328 */
1329static void uvd_v7_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
1330					struct amdgpu_job *job,
1331					struct amdgpu_ib *ib,
1332					uint32_t flags)
1333{
1334	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1335
1336	amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM);
1337	amdgpu_ring_write(ring, vmid);
1338	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1339	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1340	amdgpu_ring_write(ring, ib->length_dw);
1341}
1342
1343static void uvd_v7_0_ring_emit_wreg(struct amdgpu_ring *ring,
1344				    uint32_t reg, uint32_t val)
1345{
1346	struct amdgpu_device *adev = ring->adev;
1347
1348	amdgpu_ring_write(ring,
1349		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
1350	amdgpu_ring_write(ring, reg << 2);
1351	amdgpu_ring_write(ring,
1352		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
1353	amdgpu_ring_write(ring, val);
1354	amdgpu_ring_write(ring,
1355		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
1356	amdgpu_ring_write(ring, 8);
1357}
1358
1359static void uvd_v7_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
1360					uint32_t val, uint32_t mask)
1361{
1362	struct amdgpu_device *adev = ring->adev;
1363
1364	amdgpu_ring_write(ring,
1365		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
1366	amdgpu_ring_write(ring, reg << 2);
1367	amdgpu_ring_write(ring,
1368		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
1369	amdgpu_ring_write(ring, val);
1370	amdgpu_ring_write(ring,
1371		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GP_SCRATCH8), 0));
1372	amdgpu_ring_write(ring, mask);
1373	amdgpu_ring_write(ring,
1374		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
1375	amdgpu_ring_write(ring, 12);
1376}
1377
1378static void uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
1379					unsigned vmid, uint64_t pd_addr)
1380{
1381	struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
1382	uint32_t data0, data1, mask;
1383
1384	pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1385
1386	/* wait for reg writes */
1387	data0 = hub->ctx0_ptb_addr_lo32 + vmid * hub->ctx_addr_distance;
1388	data1 = lower_32_bits(pd_addr);
1389	mask = 0xffffffff;
1390	uvd_v7_0_ring_emit_reg_wait(ring, data0, data1, mask);
1391}
1392
1393static void uvd_v7_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
1394{
1395	struct amdgpu_device *adev = ring->adev;
1396	int i;
1397
1398	WARN_ON(ring->wptr % 2 || count % 2);
1399
1400	for (i = 0; i < count / 2; i++) {
1401		amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_NO_OP), 0));
1402		amdgpu_ring_write(ring, 0);
1403	}
1404}
1405
1406static void uvd_v7_0_enc_ring_insert_end(struct amdgpu_ring *ring)
1407{
1408	amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
1409}
1410
1411static void uvd_v7_0_enc_ring_emit_reg_wait(struct amdgpu_ring *ring,
1412					    uint32_t reg, uint32_t val,
1413					    uint32_t mask)
1414{
1415	amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WAIT);
1416	amdgpu_ring_write(ring,	reg << 2);
1417	amdgpu_ring_write(ring, mask);
1418	amdgpu_ring_write(ring, val);
1419}
1420
1421static void uvd_v7_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
1422					    unsigned int vmid, uint64_t pd_addr)
1423{
1424	struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
1425
1426	pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1427
1428	/* wait for reg writes */
1429	uvd_v7_0_enc_ring_emit_reg_wait(ring, hub->ctx0_ptb_addr_lo32 +
1430					vmid * hub->ctx_addr_distance,
1431					lower_32_bits(pd_addr), 0xffffffff);
1432}
1433
1434static void uvd_v7_0_enc_ring_emit_wreg(struct amdgpu_ring *ring,
1435					uint32_t reg, uint32_t val)
1436{
1437	amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE);
1438	amdgpu_ring_write(ring,	reg << 2);
1439	amdgpu_ring_write(ring, val);
1440}
1441
1442#if 0
1443static bool uvd_v7_0_is_idle(void *handle)
1444{
1445	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1446
1447	return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
1448}
1449
1450static int uvd_v7_0_wait_for_idle(void *handle)
1451{
1452	unsigned i;
1453	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1454
1455	for (i = 0; i < adev->usec_timeout; i++) {
1456		if (uvd_v7_0_is_idle(handle))
1457			return 0;
1458	}
1459	return -ETIMEDOUT;
1460}
1461
1462#define AMDGPU_UVD_STATUS_BUSY_MASK    0xfd
1463static bool uvd_v7_0_check_soft_reset(void *handle)
1464{
1465	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1466	u32 srbm_soft_reset = 0;
1467	u32 tmp = RREG32(mmSRBM_STATUS);
1468
1469	if (REG_GET_FIELD(tmp, SRBM_STATUS, UVD_RQ_PENDING) ||
1470	    REG_GET_FIELD(tmp, SRBM_STATUS, UVD_BUSY) ||
1471	    (RREG32_SOC15(UVD, ring->me, mmUVD_STATUS) &
1472		    AMDGPU_UVD_STATUS_BUSY_MASK))
1473		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1474				SRBM_SOFT_RESET, SOFT_RESET_UVD, 1);
1475
1476	if (srbm_soft_reset) {
1477		adev->uvd.inst[ring->me].srbm_soft_reset = srbm_soft_reset;
1478		return true;
1479	} else {
1480		adev->uvd.inst[ring->me].srbm_soft_reset = 0;
1481		return false;
1482	}
1483}
1484
1485static int uvd_v7_0_pre_soft_reset(void *handle)
1486{
1487	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1488
1489	if (!adev->uvd.inst[ring->me].srbm_soft_reset)
1490		return 0;
1491
1492	uvd_v7_0_stop(adev);
1493	return 0;
1494}
1495
1496static int uvd_v7_0_soft_reset(void *handle)
1497{
1498	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1499	u32 srbm_soft_reset;
1500
1501	if (!adev->uvd.inst[ring->me].srbm_soft_reset)
1502		return 0;
1503	srbm_soft_reset = adev->uvd.inst[ring->me].srbm_soft_reset;
1504
1505	if (srbm_soft_reset) {
1506		u32 tmp;
1507
1508		tmp = RREG32(mmSRBM_SOFT_RESET);
1509		tmp |= srbm_soft_reset;
1510		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1511		WREG32(mmSRBM_SOFT_RESET, tmp);
1512		tmp = RREG32(mmSRBM_SOFT_RESET);
1513
1514		udelay(50);
1515
1516		tmp &= ~srbm_soft_reset;
1517		WREG32(mmSRBM_SOFT_RESET, tmp);
1518		tmp = RREG32(mmSRBM_SOFT_RESET);
1519
1520		/* Wait a little for things to settle down */
1521		udelay(50);
1522	}
1523
1524	return 0;
1525}
1526
1527static int uvd_v7_0_post_soft_reset(void *handle)
1528{
1529	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1530
1531	if (!adev->uvd.inst[ring->me].srbm_soft_reset)
1532		return 0;
1533
1534	mdelay(5);
1535
1536	return uvd_v7_0_start(adev);
1537}
1538#endif
1539
1540static int uvd_v7_0_set_interrupt_state(struct amdgpu_device *adev,
1541					struct amdgpu_irq_src *source,
1542					unsigned type,
1543					enum amdgpu_interrupt_state state)
1544{
1545	// TODO
1546	return 0;
1547}
1548
1549static int uvd_v7_0_process_interrupt(struct amdgpu_device *adev,
1550				      struct amdgpu_irq_src *source,
1551				      struct amdgpu_iv_entry *entry)
1552{
1553	uint32_t ip_instance;
1554
1555	switch (entry->client_id) {
1556	case SOC15_IH_CLIENTID_UVD:
1557		ip_instance = 0;
1558		break;
1559	case SOC15_IH_CLIENTID_UVD1:
1560		ip_instance = 1;
1561		break;
1562	default:
1563		DRM_ERROR("Unhandled client id: %d\n", entry->client_id);
1564		return 0;
1565	}
1566
1567	DRM_DEBUG("IH: UVD TRAP\n");
1568
1569	switch (entry->src_id) {
1570	case 124:
1571		amdgpu_fence_process(&adev->uvd.inst[ip_instance].ring);
1572		break;
1573	case 119:
1574		amdgpu_fence_process(&adev->uvd.inst[ip_instance].ring_enc[0]);
1575		break;
1576	case 120:
1577		if (!amdgpu_sriov_vf(adev))
1578			amdgpu_fence_process(&adev->uvd.inst[ip_instance].ring_enc[1]);
1579		break;
1580	default:
1581		DRM_ERROR("Unhandled interrupt: %d %d\n",
1582			  entry->src_id, entry->src_data[0]);
1583		break;
1584	}
1585
1586	return 0;
1587}
1588
1589#if 0
1590static void uvd_v7_0_set_sw_clock_gating(struct amdgpu_device *adev)
1591{
1592	uint32_t data, data1, data2, suvd_flags;
1593
1594	data = RREG32_SOC15(UVD, ring->me, mmUVD_CGC_CTRL);
1595	data1 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE);
1596	data2 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_CTRL);
1597
1598	data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK |
1599		  UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
1600
1601	suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
1602		     UVD_SUVD_CGC_GATE__SIT_MASK |
1603		     UVD_SUVD_CGC_GATE__SMP_MASK |
1604		     UVD_SUVD_CGC_GATE__SCM_MASK |
1605		     UVD_SUVD_CGC_GATE__SDB_MASK;
1606
1607	data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
1608		(1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) |
1609		(4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY));
1610
1611	data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
1612			UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
1613			UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
1614			UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
1615			UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
1616			UVD_CGC_CTRL__SYS_MODE_MASK |
1617			UVD_CGC_CTRL__UDEC_MODE_MASK |
1618			UVD_CGC_CTRL__MPEG2_MODE_MASK |
1619			UVD_CGC_CTRL__REGS_MODE_MASK |
1620			UVD_CGC_CTRL__RBC_MODE_MASK |
1621			UVD_CGC_CTRL__LMI_MC_MODE_MASK |
1622			UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
1623			UVD_CGC_CTRL__IDCT_MODE_MASK |
1624			UVD_CGC_CTRL__MPRD_MODE_MASK |
1625			UVD_CGC_CTRL__MPC_MODE_MASK |
1626			UVD_CGC_CTRL__LBSI_MODE_MASK |
1627			UVD_CGC_CTRL__LRBBM_MODE_MASK |
1628			UVD_CGC_CTRL__WCB_MODE_MASK |
1629			UVD_CGC_CTRL__VCPU_MODE_MASK |
1630			UVD_CGC_CTRL__JPEG_MODE_MASK |
1631			UVD_CGC_CTRL__JPEG2_MODE_MASK |
1632			UVD_CGC_CTRL__SCPU_MODE_MASK);
1633	data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK |
1634			UVD_SUVD_CGC_CTRL__SIT_MODE_MASK |
1635			UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
1636			UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
1637			UVD_SUVD_CGC_CTRL__SDB_MODE_MASK);
1638	data1 |= suvd_flags;
1639
1640	WREG32_SOC15(UVD, ring->me, mmUVD_CGC_CTRL, data);
1641	WREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE, 0);
1642	WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE, data1);
1643	WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_CTRL, data2);
1644}
1645
1646static void uvd_v7_0_set_hw_clock_gating(struct amdgpu_device *adev)
1647{
1648	uint32_t data, data1, cgc_flags, suvd_flags;
1649
1650	data = RREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE);
1651	data1 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE);
1652
1653	cgc_flags = UVD_CGC_GATE__SYS_MASK |
1654		UVD_CGC_GATE__UDEC_MASK |
1655		UVD_CGC_GATE__MPEG2_MASK |
1656		UVD_CGC_GATE__RBC_MASK |
1657		UVD_CGC_GATE__LMI_MC_MASK |
1658		UVD_CGC_GATE__IDCT_MASK |
1659		UVD_CGC_GATE__MPRD_MASK |
1660		UVD_CGC_GATE__MPC_MASK |
1661		UVD_CGC_GATE__LBSI_MASK |
1662		UVD_CGC_GATE__LRBBM_MASK |
1663		UVD_CGC_GATE__UDEC_RE_MASK |
1664		UVD_CGC_GATE__UDEC_CM_MASK |
1665		UVD_CGC_GATE__UDEC_IT_MASK |
1666		UVD_CGC_GATE__UDEC_DB_MASK |
1667		UVD_CGC_GATE__UDEC_MP_MASK |
1668		UVD_CGC_GATE__WCB_MASK |
1669		UVD_CGC_GATE__VCPU_MASK |
1670		UVD_CGC_GATE__SCPU_MASK |
1671		UVD_CGC_GATE__JPEG_MASK |
1672		UVD_CGC_GATE__JPEG2_MASK;
1673
1674	suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
1675				UVD_SUVD_CGC_GATE__SIT_MASK |
1676				UVD_SUVD_CGC_GATE__SMP_MASK |
1677				UVD_SUVD_CGC_GATE__SCM_MASK |
1678				UVD_SUVD_CGC_GATE__SDB_MASK;
1679
1680	data |= cgc_flags;
1681	data1 |= suvd_flags;
1682
1683	WREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE, data);
1684	WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE, data1);
1685}
1686
1687static void uvd_v7_0_set_bypass_mode(struct amdgpu_device *adev, bool enable)
1688{
1689	u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
1690
1691	if (enable)
1692		tmp |= (GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
1693			GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
1694	else
1695		tmp &= ~(GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
1696			 GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
1697
1698	WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp);
1699}
1700
1701
1702static int uvd_v7_0_set_clockgating_state(void *handle,
1703					  enum amd_clockgating_state state)
1704{
1705	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1706	bool enable = (state == AMD_CG_STATE_GATE);
1707
1708	uvd_v7_0_set_bypass_mode(adev, enable);
1709
1710	if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
1711		return 0;
1712
1713	if (enable) {
1714		/* disable HW gating and enable Sw gating */
1715		uvd_v7_0_set_sw_clock_gating(adev);
1716	} else {
1717		/* wait for STATUS to clear */
1718		if (uvd_v7_0_wait_for_idle(handle))
1719			return -EBUSY;
1720
1721		/* enable HW gates because UVD is idle */
1722		/* uvd_v7_0_set_hw_clock_gating(adev); */
1723	}
1724
1725	return 0;
1726}
1727
1728static int uvd_v7_0_set_powergating_state(void *handle,
1729					  enum amd_powergating_state state)
1730{
1731	/* This doesn't actually powergate the UVD block.
1732	 * That's done in the dpm code via the SMC.  This
1733	 * just re-inits the block as necessary.  The actual
1734	 * gating still happens in the dpm code.  We should
1735	 * revisit this when there is a cleaner line between
1736	 * the smc and the hw blocks
1737	 */
1738	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1739
1740	if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
1741		return 0;
1742
1743	WREG32_SOC15(UVD, ring->me, mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK);
1744
1745	if (state == AMD_PG_STATE_GATE) {
1746		uvd_v7_0_stop(adev);
1747		return 0;
1748	} else {
1749		return uvd_v7_0_start(adev);
1750	}
1751}
1752#endif
1753
1754static int uvd_v7_0_set_clockgating_state(void *handle,
1755					  enum amd_clockgating_state state)
1756{
1757	/* needed for driver unload*/
1758	return 0;
1759}
1760
1761const struct amd_ip_funcs uvd_v7_0_ip_funcs = {
1762	.name = "uvd_v7_0",
1763	.early_init = uvd_v7_0_early_init,
1764	.late_init = NULL,
1765	.sw_init = uvd_v7_0_sw_init,
1766	.sw_fini = uvd_v7_0_sw_fini,
1767	.hw_init = uvd_v7_0_hw_init,
1768	.hw_fini = uvd_v7_0_hw_fini,
1769	.suspend = uvd_v7_0_suspend,
1770	.resume = uvd_v7_0_resume,
1771	.is_idle = NULL /* uvd_v7_0_is_idle */,
1772	.wait_for_idle = NULL /* uvd_v7_0_wait_for_idle */,
1773	.check_soft_reset = NULL /* uvd_v7_0_check_soft_reset */,
1774	.pre_soft_reset = NULL /* uvd_v7_0_pre_soft_reset */,
1775	.soft_reset = NULL /* uvd_v7_0_soft_reset */,
1776	.post_soft_reset = NULL /* uvd_v7_0_post_soft_reset */,
1777	.set_clockgating_state = uvd_v7_0_set_clockgating_state,
1778	.set_powergating_state = NULL /* uvd_v7_0_set_powergating_state */,
1779};
1780
1781static const struct amdgpu_ring_funcs uvd_v7_0_ring_vm_funcs = {
1782	.type = AMDGPU_RING_TYPE_UVD,
1783	.align_mask = 0xf,
1784	.support_64bit_ptrs = false,
1785	.no_user_fence = true,
1786	.vmhub = AMDGPU_MMHUB_0,
1787	.get_rptr = uvd_v7_0_ring_get_rptr,
1788	.get_wptr = uvd_v7_0_ring_get_wptr,
1789	.set_wptr = uvd_v7_0_ring_set_wptr,
1790	.patch_cs_in_place = uvd_v7_0_ring_patch_cs_in_place,
1791	.emit_frame_size =
1792		6 + /* hdp invalidate */
1793		SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
1794		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
1795		8 + /* uvd_v7_0_ring_emit_vm_flush */
1796		14 + 14, /* uvd_v7_0_ring_emit_fence x2 vm fence */
1797	.emit_ib_size = 8, /* uvd_v7_0_ring_emit_ib */
1798	.emit_ib = uvd_v7_0_ring_emit_ib,
1799	.emit_fence = uvd_v7_0_ring_emit_fence,
1800	.emit_vm_flush = uvd_v7_0_ring_emit_vm_flush,
1801	.emit_hdp_flush = uvd_v7_0_ring_emit_hdp_flush,
1802	.test_ring = uvd_v7_0_ring_test_ring,
1803	.test_ib = amdgpu_uvd_ring_test_ib,
1804	.insert_nop = uvd_v7_0_ring_insert_nop,
1805	.pad_ib = amdgpu_ring_generic_pad_ib,
1806	.begin_use = amdgpu_uvd_ring_begin_use,
1807	.end_use = amdgpu_uvd_ring_end_use,
1808	.emit_wreg = uvd_v7_0_ring_emit_wreg,
1809	.emit_reg_wait = uvd_v7_0_ring_emit_reg_wait,
1810	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1811};
1812
1813static const struct amdgpu_ring_funcs uvd_v7_0_enc_ring_vm_funcs = {
1814	.type = AMDGPU_RING_TYPE_UVD_ENC,
1815	.align_mask = 0x3f,
1816	.nop = HEVC_ENC_CMD_NO_OP,
1817	.support_64bit_ptrs = false,
1818	.no_user_fence = true,
1819	.vmhub = AMDGPU_MMHUB_0,
1820	.get_rptr = uvd_v7_0_enc_ring_get_rptr,
1821	.get_wptr = uvd_v7_0_enc_ring_get_wptr,
1822	.set_wptr = uvd_v7_0_enc_ring_set_wptr,
1823	.emit_frame_size =
1824		3 + 3 + /* hdp flush / invalidate */
1825		SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1826		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
1827		4 + /* uvd_v7_0_enc_ring_emit_vm_flush */
1828		5 + 5 + /* uvd_v7_0_enc_ring_emit_fence x2 vm fence */
1829		1, /* uvd_v7_0_enc_ring_insert_end */
1830	.emit_ib_size = 5, /* uvd_v7_0_enc_ring_emit_ib */
1831	.emit_ib = uvd_v7_0_enc_ring_emit_ib,
1832	.emit_fence = uvd_v7_0_enc_ring_emit_fence,
1833	.emit_vm_flush = uvd_v7_0_enc_ring_emit_vm_flush,
1834	.test_ring = uvd_v7_0_enc_ring_test_ring,
1835	.test_ib = uvd_v7_0_enc_ring_test_ib,
1836	.insert_nop = amdgpu_ring_insert_nop,
1837	.insert_end = uvd_v7_0_enc_ring_insert_end,
1838	.pad_ib = amdgpu_ring_generic_pad_ib,
1839	.begin_use = amdgpu_uvd_ring_begin_use,
1840	.end_use = amdgpu_uvd_ring_end_use,
1841	.emit_wreg = uvd_v7_0_enc_ring_emit_wreg,
1842	.emit_reg_wait = uvd_v7_0_enc_ring_emit_reg_wait,
1843	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1844};
1845
1846static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev)
1847{
1848	int i;
1849
1850	for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
1851		if (adev->uvd.harvest_config & (1 << i))
1852			continue;
1853		adev->uvd.inst[i].ring.funcs = &uvd_v7_0_ring_vm_funcs;
1854		adev->uvd.inst[i].ring.me = i;
1855		DRM_INFO("UVD(%d) is enabled in VM mode\n", i);
1856	}
1857}
1858
1859static void uvd_v7_0_set_enc_ring_funcs(struct amdgpu_device *adev)
1860{
1861	int i, j;
1862
1863	for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
1864		if (adev->uvd.harvest_config & (1 << j))
1865			continue;
1866		for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
1867			adev->uvd.inst[j].ring_enc[i].funcs = &uvd_v7_0_enc_ring_vm_funcs;
1868			adev->uvd.inst[j].ring_enc[i].me = j;
1869		}
1870
1871		DRM_INFO("UVD(%d) ENC is enabled in VM mode\n", j);
1872	}
1873}
1874
1875static const struct amdgpu_irq_src_funcs uvd_v7_0_irq_funcs = {
1876	.set = uvd_v7_0_set_interrupt_state,
1877	.process = uvd_v7_0_process_interrupt,
1878};
1879
1880static void uvd_v7_0_set_irq_funcs(struct amdgpu_device *adev)
1881{
1882	int i;
1883
1884	for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
1885		if (adev->uvd.harvest_config & (1 << i))
1886			continue;
1887		adev->uvd.inst[i].irq.num_types = adev->uvd.num_enc_rings + 1;
1888		adev->uvd.inst[i].irq.funcs = &uvd_v7_0_irq_funcs;
1889	}
1890}
1891
1892const struct amdgpu_ip_block_version uvd_v7_0_ip_block =
1893{
1894		.type = AMD_IP_BLOCK_TYPE_UVD,
1895		.major = 7,
1896		.minor = 0,
1897		.rev = 0,
1898		.funcs = &uvd_v7_0_ip_funcs,
1899};