Linux Audio

Check our new training course

Loading...
v5.4
   1/*
   2 * Copyright 2016 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
  23
  24#include <linux/firmware.h>
  25
  26#include "amdgpu.h"
  27#include "amdgpu_uvd.h"
  28#include "soc15.h"
  29#include "soc15d.h"
  30#include "soc15_common.h"
  31#include "mmsch_v1_0.h"
  32
  33#include "uvd/uvd_7_0_offset.h"
  34#include "uvd/uvd_7_0_sh_mask.h"
  35#include "vce/vce_4_0_offset.h"
  36#include "vce/vce_4_0_default.h"
  37#include "vce/vce_4_0_sh_mask.h"
  38#include "nbif/nbif_6_1_offset.h"
  39#include "hdp/hdp_4_0_offset.h"
  40#include "mmhub/mmhub_1_0_offset.h"
  41#include "mmhub/mmhub_1_0_sh_mask.h"
  42#include "ivsrcid/uvd/irqsrcs_uvd_7_0.h"
  43
  44#define mmUVD_PG0_CC_UVD_HARVESTING                                                                    0x00c7
  45#define mmUVD_PG0_CC_UVD_HARVESTING_BASE_IDX                                                           1
  46//UVD_PG0_CC_UVD_HARVESTING
  47#define UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE__SHIFT                                                         0x1
  48#define UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE_MASK                                                           0x00000002L
  49
  50#define UVD7_MAX_HW_INSTANCES_VEGA20			2
  51
  52static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev);
  53static void uvd_v7_0_set_enc_ring_funcs(struct amdgpu_device *adev);
  54static void uvd_v7_0_set_irq_funcs(struct amdgpu_device *adev);
  55static int uvd_v7_0_start(struct amdgpu_device *adev);
  56static void uvd_v7_0_stop(struct amdgpu_device *adev);
  57static int uvd_v7_0_sriov_start(struct amdgpu_device *adev);
  58
  59static int amdgpu_ih_clientid_uvds[] = {
  60	SOC15_IH_CLIENTID_UVD,
  61	SOC15_IH_CLIENTID_UVD1
  62};
  63
  64/**
  65 * uvd_v7_0_ring_get_rptr - get read pointer
  66 *
  67 * @ring: amdgpu_ring pointer
  68 *
  69 * Returns the current hardware read pointer
  70 */
  71static uint64_t uvd_v7_0_ring_get_rptr(struct amdgpu_ring *ring)
  72{
  73	struct amdgpu_device *adev = ring->adev;
  74
  75	return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_RPTR);
  76}
  77
  78/**
  79 * uvd_v7_0_enc_ring_get_rptr - get enc read pointer
  80 *
  81 * @ring: amdgpu_ring pointer
  82 *
  83 * Returns the current hardware enc read pointer
  84 */
  85static uint64_t uvd_v7_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
  86{
  87	struct amdgpu_device *adev = ring->adev;
  88
  89	if (ring == &adev->uvd.inst[ring->me].ring_enc[0])
  90		return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR);
  91	else
  92		return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR2);
  93}
  94
  95/**
  96 * uvd_v7_0_ring_get_wptr - get write pointer
  97 *
  98 * @ring: amdgpu_ring pointer
  99 *
 100 * Returns the current hardware write pointer
 101 */
 102static uint64_t uvd_v7_0_ring_get_wptr(struct amdgpu_ring *ring)
 103{
 104	struct amdgpu_device *adev = ring->adev;
 105
 106	return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR);
 107}
 108
 109/**
 110 * uvd_v7_0_enc_ring_get_wptr - get enc write pointer
 111 *
 112 * @ring: amdgpu_ring pointer
 113 *
 114 * Returns the current hardware enc write pointer
 115 */
 116static uint64_t uvd_v7_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
 117{
 118	struct amdgpu_device *adev = ring->adev;
 119
 120	if (ring->use_doorbell)
 121		return adev->wb.wb[ring->wptr_offs];
 122
 123	if (ring == &adev->uvd.inst[ring->me].ring_enc[0])
 124		return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR);
 125	else
 126		return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2);
 127}
 128
 129/**
 130 * uvd_v7_0_ring_set_wptr - set write pointer
 131 *
 132 * @ring: amdgpu_ring pointer
 133 *
 134 * Commits the write pointer to the hardware
 135 */
 136static void uvd_v7_0_ring_set_wptr(struct amdgpu_ring *ring)
 137{
 138	struct amdgpu_device *adev = ring->adev;
 139
 140	WREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
 141}
 142
 143/**
 144 * uvd_v7_0_enc_ring_set_wptr - set enc write pointer
 145 *
 146 * @ring: amdgpu_ring pointer
 147 *
 148 * Commits the enc write pointer to the hardware
 149 */
 150static void uvd_v7_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
 151{
 152	struct amdgpu_device *adev = ring->adev;
 153
 154	if (ring->use_doorbell) {
 155		/* XXX check if swapping is necessary on BE */
 156		adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
 157		WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
 158		return;
 159	}
 160
 161	if (ring == &adev->uvd.inst[ring->me].ring_enc[0])
 162		WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR,
 163			lower_32_bits(ring->wptr));
 164	else
 165		WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2,
 166			lower_32_bits(ring->wptr));
 167}
 168
 169/**
 170 * uvd_v7_0_enc_ring_test_ring - test if UVD ENC ring is working
 171 *
 172 * @ring: the engine to test on
 173 *
 174 */
 175static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring)
 176{
 177	struct amdgpu_device *adev = ring->adev;
 178	uint32_t rptr;
 179	unsigned i;
 180	int r;
 181
 182	if (amdgpu_sriov_vf(adev))
 183		return 0;
 184
 185	r = amdgpu_ring_alloc(ring, 16);
 186	if (r)
 
 
 187		return r;
 188
 189	rptr = amdgpu_ring_get_rptr(ring);
 190
 191	amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
 192	amdgpu_ring_commit(ring);
 193
 194	for (i = 0; i < adev->usec_timeout; i++) {
 195		if (amdgpu_ring_get_rptr(ring) != rptr)
 196			break;
 197		udelay(1);
 198	}
 199
 200	if (i >= adev->usec_timeout)
 
 
 
 
 
 201		r = -ETIMEDOUT;
 
 202
 203	return r;
 204}
 205
 206/**
 207 * uvd_v7_0_enc_get_create_msg - generate a UVD ENC create msg
 208 *
 209 * @adev: amdgpu_device pointer
 210 * @ring: ring we should submit the msg to
 211 * @handle: session handle to use
 212 * @fence: optional fence to return
 213 *
 214 * Open up a stream for HW test
 215 */
 216static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
 217				       struct amdgpu_bo *bo,
 218				       struct dma_fence **fence)
 219{
 220	const unsigned ib_size_dw = 16;
 221	struct amdgpu_job *job;
 222	struct amdgpu_ib *ib;
 223	struct dma_fence *f = NULL;
 224	uint64_t addr;
 225	int i, r;
 226
 227	r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
 228	if (r)
 229		return r;
 230
 231	ib = &job->ibs[0];
 232	addr = amdgpu_bo_gpu_offset(bo);
 233
 234	ib->length_dw = 0;
 235	ib->ptr[ib->length_dw++] = 0x00000018;
 236	ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
 237	ib->ptr[ib->length_dw++] = handle;
 238	ib->ptr[ib->length_dw++] = 0x00000000;
 239	ib->ptr[ib->length_dw++] = upper_32_bits(addr);
 240	ib->ptr[ib->length_dw++] = addr;
 241
 242	ib->ptr[ib->length_dw++] = 0x00000014;
 243	ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
 244	ib->ptr[ib->length_dw++] = 0x0000001c;
 245	ib->ptr[ib->length_dw++] = 0x00000000;
 246	ib->ptr[ib->length_dw++] = 0x00000000;
 247
 248	ib->ptr[ib->length_dw++] = 0x00000008;
 249	ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */
 250
 251	for (i = ib->length_dw; i < ib_size_dw; ++i)
 252		ib->ptr[i] = 0x0;
 253
 254	r = amdgpu_job_submit_direct(job, ring, &f);
 
 255	if (r)
 256		goto err;
 257
 
 258	if (fence)
 259		*fence = dma_fence_get(f);
 260	dma_fence_put(f);
 261	return 0;
 262
 263err:
 264	amdgpu_job_free(job);
 265	return r;
 266}
 267
 268/**
 269 * uvd_v7_0_enc_get_destroy_msg - generate a UVD ENC destroy msg
 270 *
 271 * @adev: amdgpu_device pointer
 272 * @ring: ring we should submit the msg to
 273 * @handle: session handle to use
 274 * @fence: optional fence to return
 275 *
 276 * Close up a stream for HW test or if userspace failed to do so
 277 */
 278static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
 279					struct amdgpu_bo *bo,
 280					struct dma_fence **fence)
 281{
 282	const unsigned ib_size_dw = 16;
 283	struct amdgpu_job *job;
 284	struct amdgpu_ib *ib;
 285	struct dma_fence *f = NULL;
 286	uint64_t addr;
 287	int i, r;
 288
 289	r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
 290	if (r)
 291		return r;
 292
 293	ib = &job->ibs[0];
 294	addr = amdgpu_bo_gpu_offset(bo);
 295
 296	ib->length_dw = 0;
 297	ib->ptr[ib->length_dw++] = 0x00000018;
 298	ib->ptr[ib->length_dw++] = 0x00000001;
 299	ib->ptr[ib->length_dw++] = handle;
 300	ib->ptr[ib->length_dw++] = 0x00000000;
 301	ib->ptr[ib->length_dw++] = upper_32_bits(addr);
 302	ib->ptr[ib->length_dw++] = addr;
 303
 304	ib->ptr[ib->length_dw++] = 0x00000014;
 305	ib->ptr[ib->length_dw++] = 0x00000002;
 306	ib->ptr[ib->length_dw++] = 0x0000001c;
 307	ib->ptr[ib->length_dw++] = 0x00000000;
 308	ib->ptr[ib->length_dw++] = 0x00000000;
 309
 310	ib->ptr[ib->length_dw++] = 0x00000008;
 311	ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */
 312
 313	for (i = ib->length_dw; i < ib_size_dw; ++i)
 314		ib->ptr[i] = 0x0;
 315
 316	r = amdgpu_job_submit_direct(job, ring, &f);
 317	if (r)
 318		goto err;
 
 
 
 
 
 
 
 
 
 
 319
 320	if (fence)
 321		*fence = dma_fence_get(f);
 322	dma_fence_put(f);
 323	return 0;
 324
 325err:
 326	amdgpu_job_free(job);
 327	return r;
 328}
 329
 330/**
 331 * uvd_v7_0_enc_ring_test_ib - test if UVD ENC IBs are working
 332 *
 333 * @ring: the engine to test on
 334 *
 335 */
 336static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 337{
 338	struct dma_fence *fence = NULL;
 339	struct amdgpu_bo *bo = NULL;
 340	long r;
 341
 342	r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE,
 343				      AMDGPU_GEM_DOMAIN_VRAM,
 344				      &bo, NULL, NULL);
 345	if (r)
 346		return r;
 347
 348	r = uvd_v7_0_enc_get_create_msg(ring, 1, bo, NULL);
 349	if (r)
 350		goto error;
 
 351
 352	r = uvd_v7_0_enc_get_destroy_msg(ring, 1, bo, &fence);
 353	if (r)
 
 354		goto error;
 
 355
 356	r = dma_fence_wait_timeout(fence, false, timeout);
 357	if (r == 0)
 
 358		r = -ETIMEDOUT;
 359	else if (r > 0)
 
 
 
 360		r = 0;
 361
 362error:
 363	dma_fence_put(fence);
 364	amdgpu_bo_unreserve(bo);
 365	amdgpu_bo_unref(&bo);
 366	return r;
 367}
 368
 369static int uvd_v7_0_early_init(void *handle)
 370{
 371	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 372
 373	if (adev->asic_type == CHIP_VEGA20) {
 374		u32 harvest;
 375		int i;
 376
 377		adev->uvd.num_uvd_inst = UVD7_MAX_HW_INSTANCES_VEGA20;
 378		for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
 379			harvest = RREG32_SOC15(UVD, i, mmUVD_PG0_CC_UVD_HARVESTING);
 380			if (harvest & UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE_MASK) {
 381				adev->uvd.harvest_config |= 1 << i;
 382			}
 383		}
 384		if (adev->uvd.harvest_config == (AMDGPU_UVD_HARVEST_UVD0 |
 385						 AMDGPU_UVD_HARVEST_UVD1))
 386			/* both instances are harvested, disable the block */
 387			return -ENOENT;
 388	} else {
 389		adev->uvd.num_uvd_inst = 1;
 390	}
 391
 392	if (amdgpu_sriov_vf(adev))
 393		adev->uvd.num_enc_rings = 1;
 394	else
 395		adev->uvd.num_enc_rings = 2;
 396	uvd_v7_0_set_ring_funcs(adev);
 397	uvd_v7_0_set_enc_ring_funcs(adev);
 398	uvd_v7_0_set_irq_funcs(adev);
 399
 400	return 0;
 401}
 402
 403static int uvd_v7_0_sw_init(void *handle)
 404{
 405	struct amdgpu_ring *ring;
 406
 407	int i, j, r;
 408	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 409
 410	for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
 411		if (adev->uvd.harvest_config & (1 << j))
 412			continue;
 413		/* UVD TRAP */
 414		r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_uvds[j], UVD_7_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->uvd.inst[j].irq);
 
 
 
 415		if (r)
 416			return r;
 417
 418		/* UVD ENC TRAP */
 419		for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
 420			r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_uvds[j], i + UVD_7_0__SRCID__UVD_ENC_GEN_PURP, &adev->uvd.inst[j].irq);
 421			if (r)
 422				return r;
 423		}
 424	}
 425
 426	r = amdgpu_uvd_sw_init(adev);
 427	if (r)
 428		return r;
 429
 430	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
 431		const struct common_firmware_header *hdr;
 432		hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
 433		adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].ucode_id = AMDGPU_UCODE_ID_UVD;
 434		adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].fw = adev->uvd.fw;
 435		adev->firmware.fw_size +=
 436			ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
 437
 438		if (adev->uvd.num_uvd_inst == UVD7_MAX_HW_INSTANCES_VEGA20) {
 439			adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].ucode_id = AMDGPU_UCODE_ID_UVD1;
 440			adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].fw = adev->uvd.fw;
 441			adev->firmware.fw_size +=
 442				ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
 443		}
 444		DRM_INFO("PSP loading UVD firmware\n");
 445	}
 446
 447	for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
 448		if (adev->uvd.harvest_config & (1 << j))
 449			continue;
 450		if (!amdgpu_sriov_vf(adev)) {
 451			ring = &adev->uvd.inst[j].ring;
 452			sprintf(ring->name, "uvd_%d", ring->me);
 453			r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst[j].irq, 0);
 454			if (r)
 455				return r;
 456		}
 457
 458		for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
 459			ring = &adev->uvd.inst[j].ring_enc[i];
 460			sprintf(ring->name, "uvd_enc_%d.%d", ring->me, i);
 461			if (amdgpu_sriov_vf(adev)) {
 462				ring->use_doorbell = true;
 463
 464				/* currently only use the first enconding ring for
 465				 * sriov, so set unused location for other unused rings.
 466				 */
 467				if (i == 0)
 468					ring->doorbell_index = adev->doorbell_index.uvd_vce.uvd_ring0_1 * 2;
 469				else
 470					ring->doorbell_index = adev->doorbell_index.uvd_vce.uvd_ring2_3 * 2 + 1;
 471			}
 472			r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst[j].irq, 0);
 473			if (r)
 474				return r;
 475		}
 476	}
 477
 478	r = amdgpu_uvd_resume(adev);
 479	if (r)
 480		return r;
 
 
 
 
 
 
 
 481
 482	r = amdgpu_uvd_entity_init(adev);
 483	if (r)
 484		return r;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 485
 486	r = amdgpu_virt_alloc_mm_table(adev);
 487	if (r)
 488		return r;
 489
 490	return r;
 491}
 492
 493static int uvd_v7_0_sw_fini(void *handle)
 494{
 495	int i, j, r;
 496	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 497
 498	amdgpu_virt_free_mm_table(adev);
 499
 500	r = amdgpu_uvd_suspend(adev);
 501	if (r)
 502		return r;
 503
 504	for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
 505		if (adev->uvd.harvest_config & (1 << j))
 506			continue;
 507		for (i = 0; i < adev->uvd.num_enc_rings; ++i)
 508			amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]);
 509	}
 510	return amdgpu_uvd_sw_fini(adev);
 511}
 512
 513/**
 514 * uvd_v7_0_hw_init - start and test UVD block
 515 *
 516 * @adev: amdgpu_device pointer
 517 *
 518 * Initialize the hardware, boot up the VCPU and do some testing
 519 */
 520static int uvd_v7_0_hw_init(void *handle)
 521{
 522	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 523	struct amdgpu_ring *ring;
 524	uint32_t tmp;
 525	int i, j, r;
 526
 527	if (amdgpu_sriov_vf(adev))
 528		r = uvd_v7_0_sriov_start(adev);
 529	else
 530		r = uvd_v7_0_start(adev);
 531	if (r)
 532		goto done;
 533
 534	for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
 535		if (adev->uvd.harvest_config & (1 << j))
 536			continue;
 537		ring = &adev->uvd.inst[j].ring;
 538
 539		if (!amdgpu_sriov_vf(adev)) {
 540			r = amdgpu_ring_test_helper(ring);
 541			if (r)
 542				goto done;
 543
 544			r = amdgpu_ring_alloc(ring, 10);
 545			if (r) {
 546				DRM_ERROR("amdgpu: (%d)ring failed to lock UVD ring (%d).\n", j, r);
 547				goto done;
 548			}
 549
 550			tmp = PACKET0(SOC15_REG_OFFSET(UVD, j,
 551				mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL), 0);
 552			amdgpu_ring_write(ring, tmp);
 553			amdgpu_ring_write(ring, 0xFFFFF);
 554
 555			tmp = PACKET0(SOC15_REG_OFFSET(UVD, j,
 556				mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL), 0);
 557			amdgpu_ring_write(ring, tmp);
 558			amdgpu_ring_write(ring, 0xFFFFF);
 559
 560			tmp = PACKET0(SOC15_REG_OFFSET(UVD, j,
 561				mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL), 0);
 562			amdgpu_ring_write(ring, tmp);
 563			amdgpu_ring_write(ring, 0xFFFFF);
 564
 565			/* Clear timeout status bits */
 566			amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, j,
 567				mmUVD_SEMA_TIMEOUT_STATUS), 0));
 568			amdgpu_ring_write(ring, 0x8);
 569
 570			amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, j,
 571				mmUVD_SEMA_CNTL), 0));
 572			amdgpu_ring_write(ring, 3);
 573
 574			amdgpu_ring_commit(ring);
 
 
 
 575		}
 576
 577		for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
 578			ring = &adev->uvd.inst[j].ring_enc[i];
 579			r = amdgpu_ring_test_helper(ring);
 580			if (r)
 581				goto done;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 582		}
 583	}
 
 584done:
 585	if (!r)
 586		DRM_INFO("UVD and UVD ENC initialized successfully.\n");
 587
 588	return r;
 589}
 590
 591/**
 592 * uvd_v7_0_hw_fini - stop the hardware block
 593 *
 594 * @adev: amdgpu_device pointer
 595 *
 596 * Stop the UVD block, mark ring as not ready any more
 597 */
 598static int uvd_v7_0_hw_fini(void *handle)
 599{
 600	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 601	int i;
 602
 603	if (!amdgpu_sriov_vf(adev))
 604		uvd_v7_0_stop(adev);
 605	else {
 606		/* full access mode, so don't touch any UVD register */
 607		DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
 608	}
 609
 610	for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
 611		if (adev->uvd.harvest_config & (1 << i))
 612			continue;
 613		adev->uvd.inst[i].ring.sched.ready = false;
 614	}
 615
 616	return 0;
 617}
 618
 619static int uvd_v7_0_suspend(void *handle)
 620{
 621	int r;
 622	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 623
 624	r = uvd_v7_0_hw_fini(adev);
 625	if (r)
 626		return r;
 627
 628	return amdgpu_uvd_suspend(adev);
 629}
 630
 631static int uvd_v7_0_resume(void *handle)
 632{
 633	int r;
 634	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 635
 636	r = amdgpu_uvd_resume(adev);
 637	if (r)
 638		return r;
 639
 640	return uvd_v7_0_hw_init(adev);
 641}
 642
 643/**
 644 * uvd_v7_0_mc_resume - memory controller programming
 645 *
 646 * @adev: amdgpu_device pointer
 647 *
 648 * Let the UVD memory controller know it's offsets
 649 */
 650static void uvd_v7_0_mc_resume(struct amdgpu_device *adev)
 651{
 652	uint32_t size = AMDGPU_UVD_FIRMWARE_SIZE(adev);
 653	uint32_t offset;
 654	int i;
 655
 656	for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
 657		if (adev->uvd.harvest_config & (1 << i))
 658			continue;
 659		if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
 660			WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
 661				i == 0 ?
 662				adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_lo:
 663				adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].tmr_mc_addr_lo);
 664			WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
 665				i == 0 ?
 666				adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_hi:
 667				adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].tmr_mc_addr_hi);
 668			WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0, 0);
 669			offset = 0;
 670		} else {
 671			WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
 672				lower_32_bits(adev->uvd.inst[i].gpu_addr));
 673			WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
 674				upper_32_bits(adev->uvd.inst[i].gpu_addr));
 675			offset = size;
 676			WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0,
 677					AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
 678		}
 679
 680		WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE0, size);
 681
 682		WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
 683				lower_32_bits(adev->uvd.inst[i].gpu_addr + offset));
 684		WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
 685				upper_32_bits(adev->uvd.inst[i].gpu_addr + offset));
 686		WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET1, (1 << 21));
 687		WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_UVD_HEAP_SIZE);
 688
 689		WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
 690				lower_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
 691		WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
 692				upper_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
 693		WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET2, (2 << 21));
 694		WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE2,
 695				AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40));
 696
 697		WREG32_SOC15(UVD, i, mmUVD_UDEC_ADDR_CONFIG,
 698				adev->gfx.config.gb_addr_config);
 699		WREG32_SOC15(UVD, i, mmUVD_UDEC_DB_ADDR_CONFIG,
 700				adev->gfx.config.gb_addr_config);
 701		WREG32_SOC15(UVD, i, mmUVD_UDEC_DBW_ADDR_CONFIG,
 702				adev->gfx.config.gb_addr_config);
 703
 704		WREG32_SOC15(UVD, i, mmUVD_GP_SCRATCH4, adev->uvd.max_handles);
 705	}
 706}
 707
 708static int uvd_v7_0_mmsch_start(struct amdgpu_device *adev,
 709				struct amdgpu_mm_table *table)
 710{
 711	uint32_t data = 0, loop;
 712	uint64_t addr = table->gpu_addr;
 713	struct mmsch_v1_0_init_header *header = (struct mmsch_v1_0_init_header *)table->cpu_addr;
 714	uint32_t size;
 715	int i;
 716
 717	size = header->header_size + header->vce_table_size + header->uvd_table_size;
 718
 719	/* 1, write to vce_mmsch_vf_ctx_addr_lo/hi register with GPU mc addr of memory descriptor location */
 720	WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_CTX_ADDR_LO, lower_32_bits(addr));
 721	WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_CTX_ADDR_HI, upper_32_bits(addr));
 722
 723	/* 2, update vmid of descriptor */
 724	data = RREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_VMID);
 725	data &= ~VCE_MMSCH_VF_VMID__VF_CTX_VMID_MASK;
 726	data |= (0 << VCE_MMSCH_VF_VMID__VF_CTX_VMID__SHIFT); /* use domain0 for MM scheduler */
 727	WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_VMID, data);
 728
 729	/* 3, notify mmsch about the size of this descriptor */
 730	WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_CTX_SIZE, size);
 731
 732	/* 4, set resp to zero */
 733	WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP, 0);
 734
 735	for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
 736		if (adev->uvd.harvest_config & (1 << i))
 737			continue;
 738		WDOORBELL32(adev->uvd.inst[i].ring_enc[0].doorbell_index, 0);
 739		adev->wb.wb[adev->uvd.inst[i].ring_enc[0].wptr_offs] = 0;
 740		adev->uvd.inst[i].ring_enc[0].wptr = 0;
 741		adev->uvd.inst[i].ring_enc[0].wptr_old = 0;
 742	}
 743	/* 5, kick off the initialization and wait until VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero */
 744	WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_HOST, 0x10000001);
 745
 746	data = RREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP);
 747	loop = 1000;
 748	while ((data & 0x10000002) != 0x10000002) {
 749		udelay(10);
 750		data = RREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP);
 751		loop--;
 752		if (!loop)
 753			break;
 754	}
 755
 756	if (!loop) {
 757		dev_err(adev->dev, "failed to init MMSCH, mmVCE_MMSCH_VF_MAILBOX_RESP = %x\n", data);
 758		return -EBUSY;
 759	}
 760
 761	return 0;
 762}
 763
 764static int uvd_v7_0_sriov_start(struct amdgpu_device *adev)
 765{
 766	struct amdgpu_ring *ring;
 767	uint32_t offset, size, tmp;
 768	uint32_t table_size = 0;
 769	struct mmsch_v1_0_cmd_direct_write direct_wt = { {0} };
 770	struct mmsch_v1_0_cmd_direct_read_modify_write direct_rd_mod_wt = { {0} };
 771	struct mmsch_v1_0_cmd_direct_polling direct_poll = { {0} };
 772	struct mmsch_v1_0_cmd_end end = { {0} };
 773	uint32_t *init_table = adev->virt.mm_table.cpu_addr;
 774	struct mmsch_v1_0_init_header *header = (struct mmsch_v1_0_init_header *)init_table;
 775	uint8_t i = 0;
 776
 777	direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE;
 778	direct_rd_mod_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
 779	direct_poll.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_POLLING;
 780	end.cmd_header.command_type = MMSCH_COMMAND__END;
 781
 782	if (header->uvd_table_offset == 0 && header->uvd_table_size == 0) {
 783		header->version = MMSCH_VERSION;
 784		header->header_size = sizeof(struct mmsch_v1_0_init_header) >> 2;
 785
 786		if (header->vce_table_offset == 0 && header->vce_table_size == 0)
 787			header->uvd_table_offset = header->header_size;
 788		else
 789			header->uvd_table_offset = header->vce_table_size + header->vce_table_offset;
 790
 791		init_table += header->uvd_table_offset;
 792
 793		for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
 794			if (adev->uvd.harvest_config & (1 << i))
 795				continue;
 796			ring = &adev->uvd.inst[i].ring;
 797			ring->wptr = 0;
 798			size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4);
 799
 800			MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS),
 801							   0xFFFFFFFF, 0x00000004);
 802			/* mc resume*/
 803			if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
 804				MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i,
 805							mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
 806							adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_lo);
 807				MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i,
 808							mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
 809							adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_hi);
 810				MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0);
 811				offset = 0;
 812			} else {
 813				MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
 814							    lower_32_bits(adev->uvd.inst[i].gpu_addr));
 815				MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
 816							    upper_32_bits(adev->uvd.inst[i].gpu_addr));
 817				offset = size;
 818				MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0),
 819							AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
 820
 821			}
 822
 823			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE0), size);
 824
 825			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
 826						    lower_32_bits(adev->uvd.inst[i].gpu_addr + offset));
 827			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
 828						    upper_32_bits(adev->uvd.inst[i].gpu_addr + offset));
 829			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET1), (1 << 21));
 830			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_UVD_HEAP_SIZE);
 831
 832			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
 833						    lower_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
 834			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
 835						    upper_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
 836			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET2), (2 << 21));
 837			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE2),
 838						    AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40));
 839
 840			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_GP_SCRATCH4), adev->uvd.max_handles);
 841			/* mc resume end*/
 842
 843			/* disable clock gating */
 844			MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_CGC_CTRL),
 845							   ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK, 0);
 846
 847			/* disable interupt */
 848			MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN),
 849							   ~UVD_MASTINT_EN__VCPU_EN_MASK, 0);
 850
 851			/* stall UMC and register bus before resetting VCPU */
 852			MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2),
 853							   ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
 854							   UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
 855
 856			/* put LMI, VCPU, RBC etc... into reset */
 857			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_SOFT_RESET),
 858						    (uint32_t)(UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
 859							       UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
 860							       UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
 861							       UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
 862							       UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
 863							       UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
 864							       UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
 865							       UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK));
 866
 867			/* initialize UVD memory controller */
 868			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL),
 869						    (uint32_t)((0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
 870							       UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
 871							       UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
 872							       UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
 873							       UVD_LMI_CTRL__REQ_MODE_MASK |
 874							       0x00100000L));
 875
 876			/* take all subblocks out of reset, except VCPU */
 877			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_SOFT_RESET),
 878						    UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
 879
 880			/* enable VCPU clock */
 881			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL),
 882						    UVD_VCPU_CNTL__CLK_EN_MASK);
 883
 884			/* enable master interrupt */
 885			MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN),
 886							   ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
 887							   (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
 888
 889			/* clear the bit 4 of UVD_STATUS */
 890			MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS),
 891							   ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT), 0);
 892
 893			/* force RBC into idle state */
 894			size = order_base_2(ring->ring_size);
 895			tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, size);
 896			tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
 897			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RBC_RB_CNTL), tmp);
 898
 899			ring = &adev->uvd.inst[i].ring_enc[0];
 900			ring->wptr = 0;
 901			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_LO), ring->gpu_addr);
 902			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_HI), upper_32_bits(ring->gpu_addr));
 903			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_SIZE), ring->ring_size / 4);
 904
 905			/* boot up the VCPU */
 906			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_SOFT_RESET), 0);
 907
 908			/* enable UMC */
 909			MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2),
 910											   ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, 0);
 911
 912			MMSCH_V1_0_INSERT_DIRECT_POLL(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS), 0x02, 0x02);
 913		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 914		/* add end packet */
 915		memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end));
 916		table_size += sizeof(struct mmsch_v1_0_cmd_end) / 4;
 917		header->uvd_table_size = table_size;
 918
 919	}
 920	return uvd_v7_0_mmsch_start(adev, &adev->virt.mm_table);
 921}
 922
 923/**
 924 * uvd_v7_0_start - start UVD block
 925 *
 926 * @adev: amdgpu_device pointer
 927 *
 928 * Setup and start the UVD block
 929 */
 930static int uvd_v7_0_start(struct amdgpu_device *adev)
 931{
 932	struct amdgpu_ring *ring;
 933	uint32_t rb_bufsz, tmp;
 934	uint32_t lmi_swap_cntl;
 935	uint32_t mp_swap_cntl;
 936	int i, j, k, r;
 937
 938	for (k = 0; k < adev->uvd.num_uvd_inst; ++k) {
 939		if (adev->uvd.harvest_config & (1 << k))
 940			continue;
 941		/* disable DPG */
 942		WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_POWER_STATUS), 0,
 943				~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
 944	}
 945
 946	/* disable byte swapping */
 947	lmi_swap_cntl = 0;
 948	mp_swap_cntl = 0;
 949
 950	uvd_v7_0_mc_resume(adev);
 951
 952	for (k = 0; k < adev->uvd.num_uvd_inst; ++k) {
 953		if (adev->uvd.harvest_config & (1 << k))
 954			continue;
 955		ring = &adev->uvd.inst[k].ring;
 956		/* disable clock gating */
 957		WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_CGC_CTRL), 0,
 958				~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK);
 959
 960		/* disable interupt */
 961		WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_MASTINT_EN), 0,
 962				~UVD_MASTINT_EN__VCPU_EN_MASK);
 963
 964		/* stall UMC and register bus before resetting VCPU */
 965		WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_LMI_CTRL2),
 966				UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
 967				~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
 968		mdelay(1);
 969
 970		/* put LMI, VCPU, RBC etc... into reset */
 971		WREG32_SOC15(UVD, k, mmUVD_SOFT_RESET,
 972			UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
 973			UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
 974			UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
 975			UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
 976			UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
 977			UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
 978			UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
 979			UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
 980		mdelay(5);
 981
 982		/* initialize UVD memory controller */
 983		WREG32_SOC15(UVD, k, mmUVD_LMI_CTRL,
 984			(0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
 985			UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
 986			UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
 987			UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
 988			UVD_LMI_CTRL__REQ_MODE_MASK |
 989			0x00100000L);
 990
 991#ifdef __BIG_ENDIAN
 992		/* swap (8 in 32) RB and IB */
 993		lmi_swap_cntl = 0xa;
 994		mp_swap_cntl = 0;
 995#endif
 996		WREG32_SOC15(UVD, k, mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
 997		WREG32_SOC15(UVD, k, mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
 998
 999		WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXA0, 0x40c2040);
1000		WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXA1, 0x0);
1001		WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXB0, 0x40c2040);
1002		WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXB1, 0x0);
1003		WREG32_SOC15(UVD, k, mmUVD_MPC_SET_ALU, 0);
1004		WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUX, 0x88);
 
 
 
 
 
1005
1006		/* take all subblocks out of reset, except VCPU */
1007		WREG32_SOC15(UVD, k, mmUVD_SOFT_RESET,
1008				UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1009		mdelay(5);
 
 
 
 
 
 
 
1010
1011		/* enable VCPU clock */
1012		WREG32_SOC15(UVD, k, mmUVD_VCPU_CNTL,
1013				UVD_VCPU_CNTL__CLK_EN_MASK);
1014
1015		/* enable UMC */
1016		WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_LMI_CTRL2), 0,
1017				~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
 
 
 
 
 
 
1018
1019		/* boot up the VCPU */
1020		WREG32_SOC15(UVD, k, mmUVD_SOFT_RESET, 0);
 
 
1021		mdelay(10);
 
 
 
 
 
1022
1023		for (i = 0; i < 10; ++i) {
1024			uint32_t status;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1025
1026			for (j = 0; j < 100; ++j) {
1027				status = RREG32_SOC15(UVD, k, mmUVD_STATUS);
1028				if (status & 2)
1029					break;
1030				mdelay(10);
1031			}
1032			r = 0;
1033			if (status & 2)
1034				break;
1035
1036			DRM_ERROR("UVD(%d) not responding, trying to reset the VCPU!!!\n", k);
1037			WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_SOFT_RESET),
1038					UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
1039					~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1040			mdelay(10);
1041			WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_SOFT_RESET), 0,
1042					~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1043			mdelay(10);
1044			r = -1;
1045		}
1046
1047		if (r) {
1048			DRM_ERROR("UVD(%d) not responding, giving up!!!\n", k);
1049			return r;
1050		}
1051		/* enable master interrupt */
1052		WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_MASTINT_EN),
1053			(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
1054			~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
1055
1056		/* clear the bit 4 of UVD_STATUS */
1057		WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_STATUS), 0,
1058				~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
 
 
 
 
 
 
 
 
 
 
1059
1060		/* force RBC into idle state */
1061		rb_bufsz = order_base_2(ring->ring_size);
1062		tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
1063		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1064		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1065		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
1066		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1067		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1068		WREG32_SOC15(UVD, k, mmUVD_RBC_RB_CNTL, tmp);
1069
1070		/* set the write pointer delay */
1071		WREG32_SOC15(UVD, k, mmUVD_RBC_RB_WPTR_CNTL, 0);
1072
1073		/* set the wb address */
1074		WREG32_SOC15(UVD, k, mmUVD_RBC_RB_RPTR_ADDR,
1075				(upper_32_bits(ring->gpu_addr) >> 2));
1076
1077		/* programm the RB_BASE for ring buffer */
1078		WREG32_SOC15(UVD, k, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
1079				lower_32_bits(ring->gpu_addr));
1080		WREG32_SOC15(UVD, k, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
1081				upper_32_bits(ring->gpu_addr));
1082
1083		/* Initialize the ring buffer's read and write pointers */
1084		WREG32_SOC15(UVD, k, mmUVD_RBC_RB_RPTR, 0);
1085
1086		ring->wptr = RREG32_SOC15(UVD, k, mmUVD_RBC_RB_RPTR);
1087		WREG32_SOC15(UVD, k, mmUVD_RBC_RB_WPTR,
1088				lower_32_bits(ring->wptr));
1089
1090		WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_RBC_RB_CNTL), 0,
1091				~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
1092
1093		ring = &adev->uvd.inst[k].ring_enc[0];
1094		WREG32_SOC15(UVD, k, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1095		WREG32_SOC15(UVD, k, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1096		WREG32_SOC15(UVD, k, mmUVD_RB_BASE_LO, ring->gpu_addr);
1097		WREG32_SOC15(UVD, k, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1098		WREG32_SOC15(UVD, k, mmUVD_RB_SIZE, ring->ring_size / 4);
1099
1100		ring = &adev->uvd.inst[k].ring_enc[1];
1101		WREG32_SOC15(UVD, k, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1102		WREG32_SOC15(UVD, k, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1103		WREG32_SOC15(UVD, k, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1104		WREG32_SOC15(UVD, k, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1105		WREG32_SOC15(UVD, k, mmUVD_RB_SIZE2, ring->ring_size / 4);
1106	}
1107	return 0;
1108}
1109
1110/**
1111 * uvd_v7_0_stop - stop UVD block
1112 *
1113 * @adev: amdgpu_device pointer
1114 *
1115 * stop the UVD block
1116 */
1117static void uvd_v7_0_stop(struct amdgpu_device *adev)
1118{
1119	uint8_t i = 0;
 
1120
1121	for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
1122		if (adev->uvd.harvest_config & (1 << i))
1123			continue;
1124		/* force RBC into idle state */
1125		WREG32_SOC15(UVD, i, mmUVD_RBC_RB_CNTL, 0x11010101);
 
 
 
 
 
1126
1127		/* Stall UMC and register bus before resetting VCPU */
1128		WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2),
1129				UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
1130				~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1131		mdelay(1);
1132
1133		/* put VCPU into reset */
1134		WREG32_SOC15(UVD, i, mmUVD_SOFT_RESET,
1135				UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1136		mdelay(5);
1137
1138		/* disable VCPU clock */
1139		WREG32_SOC15(UVD, i, mmUVD_VCPU_CNTL, 0x0);
1140
1141		/* Unstall UMC and register bus */
1142		WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2), 0,
1143				~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1144	}
1145}
1146
1147/**
1148 * uvd_v7_0_ring_emit_fence - emit an fence & trap command
1149 *
1150 * @ring: amdgpu_ring pointer
1151 * @fence: fence to emit
1152 *
1153 * Write a fence and a trap command to the ring.
1154 */
1155static void uvd_v7_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
1156				     unsigned flags)
1157{
1158	struct amdgpu_device *adev = ring->adev;
1159
1160	WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1161
1162	amdgpu_ring_write(ring,
1163		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_CONTEXT_ID), 0));
1164	amdgpu_ring_write(ring, seq);
1165	amdgpu_ring_write(ring,
1166		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
1167	amdgpu_ring_write(ring, addr & 0xffffffff);
1168	amdgpu_ring_write(ring,
1169		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
1170	amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
1171	amdgpu_ring_write(ring,
1172		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
1173	amdgpu_ring_write(ring, 0);
1174
1175	amdgpu_ring_write(ring,
1176		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
1177	amdgpu_ring_write(ring, 0);
1178	amdgpu_ring_write(ring,
1179		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
1180	amdgpu_ring_write(ring, 0);
1181	amdgpu_ring_write(ring,
1182		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
1183	amdgpu_ring_write(ring, 2);
1184}
1185
1186/**
1187 * uvd_v7_0_enc_ring_emit_fence - emit an enc fence & trap command
1188 *
1189 * @ring: amdgpu_ring pointer
1190 * @fence: fence to emit
1191 *
1192 * Write enc a fence and a trap command to the ring.
1193 */
1194static void uvd_v7_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
1195			u64 seq, unsigned flags)
1196{
1197
1198	WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1199
1200	amdgpu_ring_write(ring, HEVC_ENC_CMD_FENCE);
1201	amdgpu_ring_write(ring, addr);
1202	amdgpu_ring_write(ring, upper_32_bits(addr));
1203	amdgpu_ring_write(ring, seq);
1204	amdgpu_ring_write(ring, HEVC_ENC_CMD_TRAP);
1205}
1206
1207/**
1208 * uvd_v7_0_ring_emit_hdp_flush - skip HDP flushing
1209 *
1210 * @ring: amdgpu_ring pointer
1211 */
1212static void uvd_v7_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
1213{
1214	/* The firmware doesn't seem to like touching registers at this point. */
1215}
1216
1217/**
1218 * uvd_v7_0_ring_test_ring - register write test
1219 *
1220 * @ring: amdgpu_ring pointer
1221 *
1222 * Test if we can successfully write to the context register
1223 */
1224static int uvd_v7_0_ring_test_ring(struct amdgpu_ring *ring)
1225{
1226	struct amdgpu_device *adev = ring->adev;
1227	uint32_t tmp = 0;
1228	unsigned i;
1229	int r;
1230
1231	WREG32_SOC15(UVD, ring->me, mmUVD_CONTEXT_ID, 0xCAFEDEAD);
1232	r = amdgpu_ring_alloc(ring, 3);
1233	if (r)
 
 
1234		return r;
1235
1236	amdgpu_ring_write(ring,
1237		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_CONTEXT_ID), 0));
1238	amdgpu_ring_write(ring, 0xDEADBEEF);
1239	amdgpu_ring_commit(ring);
1240	for (i = 0; i < adev->usec_timeout; i++) {
1241		tmp = RREG32_SOC15(UVD, ring->me, mmUVD_CONTEXT_ID);
1242		if (tmp == 0xDEADBEEF)
1243			break;
1244		udelay(1);
1245	}
1246
1247	if (i >= adev->usec_timeout)
1248		r = -ETIMEDOUT;
1249
1250	return r;
1251}
1252
1253/**
1254 * uvd_v7_0_ring_patch_cs_in_place - Patch the IB for command submission.
1255 *
1256 * @p: the CS parser with the IBs
1257 * @ib_idx: which IB to patch
1258 *
1259 */
1260static int uvd_v7_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
1261					   uint32_t ib_idx)
1262{
1263	struct amdgpu_ring *ring = to_amdgpu_ring(p->entity->rq->sched);
1264	struct amdgpu_ib *ib = &p->job->ibs[ib_idx];
1265	unsigned i;
1266
1267	/* No patching necessary for the first instance */
1268	if (!ring->me)
1269		return 0;
1270
1271	for (i = 0; i < ib->length_dw; i += 2) {
1272		uint32_t reg = amdgpu_get_ib_value(p, ib_idx, i);
1273
1274		reg -= p->adev->reg_offset[UVD_HWIP][0][1];
1275		reg += p->adev->reg_offset[UVD_HWIP][1][1];
1276
1277		amdgpu_set_ib_value(p, ib_idx, i, reg);
1278	}
1279	return 0;
1280}
1281
1282/**
1283 * uvd_v7_0_ring_emit_ib - execute indirect buffer
1284 *
1285 * @ring: amdgpu_ring pointer
1286 * @ib: indirect buffer to execute
1287 *
1288 * Write ring commands to execute the indirect buffer
1289 */
1290static void uvd_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
1291				  struct amdgpu_job *job,
1292				  struct amdgpu_ib *ib,
1293				  uint32_t flags)
1294{
1295	struct amdgpu_device *adev = ring->adev;
1296	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1297
1298	amdgpu_ring_write(ring,
1299		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_VMID), 0));
1300	amdgpu_ring_write(ring, vmid);
1301
1302	amdgpu_ring_write(ring,
1303		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_64BIT_BAR_LOW), 0));
1304	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1305	amdgpu_ring_write(ring,
1306		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH), 0));
1307	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1308	amdgpu_ring_write(ring,
1309		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_RBC_IB_SIZE), 0));
1310	amdgpu_ring_write(ring, ib->length_dw);
1311}
1312
1313/**
1314 * uvd_v7_0_enc_ring_emit_ib - enc execute indirect buffer
1315 *
1316 * @ring: amdgpu_ring pointer
1317 * @ib: indirect buffer to execute
1318 *
1319 * Write enc ring commands to execute the indirect buffer
1320 */
1321static void uvd_v7_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
1322					struct amdgpu_job *job,
1323					struct amdgpu_ib *ib,
1324					uint32_t flags)
1325{
1326	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1327
1328	amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM);
1329	amdgpu_ring_write(ring, vmid);
1330	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1331	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1332	amdgpu_ring_write(ring, ib->length_dw);
1333}
1334
1335static void uvd_v7_0_ring_emit_wreg(struct amdgpu_ring *ring,
1336				    uint32_t reg, uint32_t val)
1337{
1338	struct amdgpu_device *adev = ring->adev;
1339
1340	amdgpu_ring_write(ring,
1341		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
1342	amdgpu_ring_write(ring, reg << 2);
1343	amdgpu_ring_write(ring,
1344		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
1345	amdgpu_ring_write(ring, val);
1346	amdgpu_ring_write(ring,
1347		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
1348	amdgpu_ring_write(ring, 8);
1349}
1350
1351static void uvd_v7_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
1352					uint32_t val, uint32_t mask)
1353{
1354	struct amdgpu_device *adev = ring->adev;
1355
1356	amdgpu_ring_write(ring,
1357		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
1358	amdgpu_ring_write(ring, reg << 2);
1359	amdgpu_ring_write(ring,
1360		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
1361	amdgpu_ring_write(ring, val);
1362	amdgpu_ring_write(ring,
1363		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GP_SCRATCH8), 0));
1364	amdgpu_ring_write(ring, mask);
1365	amdgpu_ring_write(ring,
1366		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
1367	amdgpu_ring_write(ring, 12);
1368}
1369
1370static void uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
1371					unsigned vmid, uint64_t pd_addr)
1372{
1373	struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
1374	uint32_t data0, data1, mask;
1375
1376	pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1377
1378	/* wait for reg writes */
1379	data0 = hub->ctx0_ptb_addr_lo32 + vmid * 2;
1380	data1 = lower_32_bits(pd_addr);
1381	mask = 0xffffffff;
1382	uvd_v7_0_ring_emit_reg_wait(ring, data0, data1, mask);
1383}
1384
1385static void uvd_v7_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
1386{
1387	struct amdgpu_device *adev = ring->adev;
1388	int i;
 
1389
1390	WARN_ON(ring->wptr % 2 || count % 2);
 
1391
1392	for (i = 0; i < count / 2; i++) {
1393		amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_NO_OP), 0));
1394		amdgpu_ring_write(ring, 0);
1395	}
1396}
1397
1398static void uvd_v7_0_enc_ring_insert_end(struct amdgpu_ring *ring)
1399{
1400	amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
1401}
1402
1403static void uvd_v7_0_enc_ring_emit_reg_wait(struct amdgpu_ring *ring,
1404					    uint32_t reg, uint32_t val,
1405					    uint32_t mask)
1406{
1407	amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WAIT);
1408	amdgpu_ring_write(ring,	reg << 2);
1409	amdgpu_ring_write(ring, mask);
1410	amdgpu_ring_write(ring, val);
1411}
1412
1413static void uvd_v7_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
1414					    unsigned int vmid, uint64_t pd_addr)
1415{
1416	struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
1417
1418	pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1419
1420	/* wait for reg writes */
1421	uvd_v7_0_enc_ring_emit_reg_wait(ring, hub->ctx0_ptb_addr_lo32 + vmid * 2,
1422					lower_32_bits(pd_addr), 0xffffffff);
1423}
1424
1425static void uvd_v7_0_enc_ring_emit_wreg(struct amdgpu_ring *ring,
1426					uint32_t reg, uint32_t val)
1427{
1428	amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE);
1429	amdgpu_ring_write(ring,	reg << 2);
1430	amdgpu_ring_write(ring, val);
1431}
1432
1433#if 0
1434static bool uvd_v7_0_is_idle(void *handle)
1435{
1436	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1437
1438	return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
1439}
1440
1441static int uvd_v7_0_wait_for_idle(void *handle)
1442{
1443	unsigned i;
1444	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1445
1446	for (i = 0; i < adev->usec_timeout; i++) {
1447		if (uvd_v7_0_is_idle(handle))
1448			return 0;
1449	}
1450	return -ETIMEDOUT;
1451}
1452
1453#define AMDGPU_UVD_STATUS_BUSY_MASK    0xfd
1454static bool uvd_v7_0_check_soft_reset(void *handle)
1455{
1456	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1457	u32 srbm_soft_reset = 0;
1458	u32 tmp = RREG32(mmSRBM_STATUS);
1459
1460	if (REG_GET_FIELD(tmp, SRBM_STATUS, UVD_RQ_PENDING) ||
1461	    REG_GET_FIELD(tmp, SRBM_STATUS, UVD_BUSY) ||
1462	    (RREG32_SOC15(UVD, ring->me, mmUVD_STATUS) &
1463		    AMDGPU_UVD_STATUS_BUSY_MASK))
1464		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1465				SRBM_SOFT_RESET, SOFT_RESET_UVD, 1);
1466
1467	if (srbm_soft_reset) {
1468		adev->uvd.inst[ring->me].srbm_soft_reset = srbm_soft_reset;
1469		return true;
1470	} else {
1471		adev->uvd.inst[ring->me].srbm_soft_reset = 0;
1472		return false;
1473	}
1474}
1475
1476static int uvd_v7_0_pre_soft_reset(void *handle)
1477{
1478	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1479
1480	if (!adev->uvd.inst[ring->me].srbm_soft_reset)
1481		return 0;
1482
1483	uvd_v7_0_stop(adev);
1484	return 0;
1485}
1486
1487static int uvd_v7_0_soft_reset(void *handle)
1488{
1489	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1490	u32 srbm_soft_reset;
1491
1492	if (!adev->uvd.inst[ring->me].srbm_soft_reset)
1493		return 0;
1494	srbm_soft_reset = adev->uvd.inst[ring->me].srbm_soft_reset;
1495
1496	if (srbm_soft_reset) {
1497		u32 tmp;
1498
1499		tmp = RREG32(mmSRBM_SOFT_RESET);
1500		tmp |= srbm_soft_reset;
1501		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1502		WREG32(mmSRBM_SOFT_RESET, tmp);
1503		tmp = RREG32(mmSRBM_SOFT_RESET);
1504
1505		udelay(50);
1506
1507		tmp &= ~srbm_soft_reset;
1508		WREG32(mmSRBM_SOFT_RESET, tmp);
1509		tmp = RREG32(mmSRBM_SOFT_RESET);
1510
1511		/* Wait a little for things to settle down */
1512		udelay(50);
1513	}
1514
1515	return 0;
1516}
1517
1518static int uvd_v7_0_post_soft_reset(void *handle)
1519{
1520	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1521
1522	if (!adev->uvd.inst[ring->me].srbm_soft_reset)
1523		return 0;
1524
1525	mdelay(5);
1526
1527	return uvd_v7_0_start(adev);
1528}
1529#endif
1530
1531static int uvd_v7_0_set_interrupt_state(struct amdgpu_device *adev,
1532					struct amdgpu_irq_src *source,
1533					unsigned type,
1534					enum amdgpu_interrupt_state state)
1535{
1536	// TODO
1537	return 0;
1538}
1539
1540static int uvd_v7_0_process_interrupt(struct amdgpu_device *adev,
1541				      struct amdgpu_irq_src *source,
1542				      struct amdgpu_iv_entry *entry)
1543{
1544	uint32_t ip_instance;
1545
1546	switch (entry->client_id) {
1547	case SOC15_IH_CLIENTID_UVD:
1548		ip_instance = 0;
1549		break;
1550	case SOC15_IH_CLIENTID_UVD1:
1551		ip_instance = 1;
1552		break;
1553	default:
1554		DRM_ERROR("Unhandled client id: %d\n", entry->client_id);
1555		return 0;
1556	}
1557
1558	DRM_DEBUG("IH: UVD TRAP\n");
1559
1560	switch (entry->src_id) {
1561	case 124:
1562		amdgpu_fence_process(&adev->uvd.inst[ip_instance].ring);
1563		break;
1564	case 119:
1565		amdgpu_fence_process(&adev->uvd.inst[ip_instance].ring_enc[0]);
1566		break;
1567	case 120:
1568		if (!amdgpu_sriov_vf(adev))
1569			amdgpu_fence_process(&adev->uvd.inst[ip_instance].ring_enc[1]);
1570		break;
1571	default:
1572		DRM_ERROR("Unhandled interrupt: %d %d\n",
1573			  entry->src_id, entry->src_data[0]);
1574		break;
1575	}
1576
1577	return 0;
1578}
1579
1580#if 0
1581static void uvd_v7_0_set_sw_clock_gating(struct amdgpu_device *adev)
1582{
1583	uint32_t data, data1, data2, suvd_flags;
1584
1585	data = RREG32_SOC15(UVD, ring->me, mmUVD_CGC_CTRL);
1586	data1 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE);
1587	data2 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_CTRL);
1588
1589	data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK |
1590		  UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
1591
1592	suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
1593		     UVD_SUVD_CGC_GATE__SIT_MASK |
1594		     UVD_SUVD_CGC_GATE__SMP_MASK |
1595		     UVD_SUVD_CGC_GATE__SCM_MASK |
1596		     UVD_SUVD_CGC_GATE__SDB_MASK;
1597
1598	data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
1599		(1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) |
1600		(4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY));
1601
1602	data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
1603			UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
1604			UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
1605			UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
1606			UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
1607			UVD_CGC_CTRL__SYS_MODE_MASK |
1608			UVD_CGC_CTRL__UDEC_MODE_MASK |
1609			UVD_CGC_CTRL__MPEG2_MODE_MASK |
1610			UVD_CGC_CTRL__REGS_MODE_MASK |
1611			UVD_CGC_CTRL__RBC_MODE_MASK |
1612			UVD_CGC_CTRL__LMI_MC_MODE_MASK |
1613			UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
1614			UVD_CGC_CTRL__IDCT_MODE_MASK |
1615			UVD_CGC_CTRL__MPRD_MODE_MASK |
1616			UVD_CGC_CTRL__MPC_MODE_MASK |
1617			UVD_CGC_CTRL__LBSI_MODE_MASK |
1618			UVD_CGC_CTRL__LRBBM_MODE_MASK |
1619			UVD_CGC_CTRL__WCB_MODE_MASK |
1620			UVD_CGC_CTRL__VCPU_MODE_MASK |
1621			UVD_CGC_CTRL__JPEG_MODE_MASK |
1622			UVD_CGC_CTRL__JPEG2_MODE_MASK |
1623			UVD_CGC_CTRL__SCPU_MODE_MASK);
1624	data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK |
1625			UVD_SUVD_CGC_CTRL__SIT_MODE_MASK |
1626			UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
1627			UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
1628			UVD_SUVD_CGC_CTRL__SDB_MODE_MASK);
1629	data1 |= suvd_flags;
1630
1631	WREG32_SOC15(UVD, ring->me, mmUVD_CGC_CTRL, data);
1632	WREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE, 0);
1633	WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE, data1);
1634	WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_CTRL, data2);
1635}
1636
1637static void uvd_v7_0_set_hw_clock_gating(struct amdgpu_device *adev)
1638{
1639	uint32_t data, data1, cgc_flags, suvd_flags;
1640
1641	data = RREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE);
1642	data1 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE);
1643
1644	cgc_flags = UVD_CGC_GATE__SYS_MASK |
1645		UVD_CGC_GATE__UDEC_MASK |
1646		UVD_CGC_GATE__MPEG2_MASK |
1647		UVD_CGC_GATE__RBC_MASK |
1648		UVD_CGC_GATE__LMI_MC_MASK |
1649		UVD_CGC_GATE__IDCT_MASK |
1650		UVD_CGC_GATE__MPRD_MASK |
1651		UVD_CGC_GATE__MPC_MASK |
1652		UVD_CGC_GATE__LBSI_MASK |
1653		UVD_CGC_GATE__LRBBM_MASK |
1654		UVD_CGC_GATE__UDEC_RE_MASK |
1655		UVD_CGC_GATE__UDEC_CM_MASK |
1656		UVD_CGC_GATE__UDEC_IT_MASK |
1657		UVD_CGC_GATE__UDEC_DB_MASK |
1658		UVD_CGC_GATE__UDEC_MP_MASK |
1659		UVD_CGC_GATE__WCB_MASK |
1660		UVD_CGC_GATE__VCPU_MASK |
1661		UVD_CGC_GATE__SCPU_MASK |
1662		UVD_CGC_GATE__JPEG_MASK |
1663		UVD_CGC_GATE__JPEG2_MASK;
1664
1665	suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
1666				UVD_SUVD_CGC_GATE__SIT_MASK |
1667				UVD_SUVD_CGC_GATE__SMP_MASK |
1668				UVD_SUVD_CGC_GATE__SCM_MASK |
1669				UVD_SUVD_CGC_GATE__SDB_MASK;
1670
1671	data |= cgc_flags;
1672	data1 |= suvd_flags;
1673
1674	WREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE, data);
1675	WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE, data1);
1676}
1677
1678static void uvd_v7_0_set_bypass_mode(struct amdgpu_device *adev, bool enable)
1679{
1680	u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
1681
1682	if (enable)
1683		tmp |= (GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
1684			GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
1685	else
1686		tmp &= ~(GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
1687			 GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
1688
1689	WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp);
1690}
1691
1692
1693static int uvd_v7_0_set_clockgating_state(void *handle,
1694					  enum amd_clockgating_state state)
1695{
1696	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1697	bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
1698
1699	uvd_v7_0_set_bypass_mode(adev, enable);
1700
1701	if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
1702		return 0;
1703
1704	if (enable) {
1705		/* disable HW gating and enable Sw gating */
1706		uvd_v7_0_set_sw_clock_gating(adev);
1707	} else {
1708		/* wait for STATUS to clear */
1709		if (uvd_v7_0_wait_for_idle(handle))
1710			return -EBUSY;
1711
1712		/* enable HW gates because UVD is idle */
1713		/* uvd_v7_0_set_hw_clock_gating(adev); */
1714	}
1715
1716	return 0;
1717}
1718
1719static int uvd_v7_0_set_powergating_state(void *handle,
1720					  enum amd_powergating_state state)
1721{
1722	/* This doesn't actually powergate the UVD block.
1723	 * That's done in the dpm code via the SMC.  This
1724	 * just re-inits the block as necessary.  The actual
1725	 * gating still happens in the dpm code.  We should
1726	 * revisit this when there is a cleaner line between
1727	 * the smc and the hw blocks
1728	 */
1729	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1730
1731	if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
1732		return 0;
1733
1734	WREG32_SOC15(UVD, ring->me, mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK);
1735
1736	if (state == AMD_PG_STATE_GATE) {
1737		uvd_v7_0_stop(adev);
1738		return 0;
1739	} else {
1740		return uvd_v7_0_start(adev);
1741	}
1742}
1743#endif
1744
1745static int uvd_v7_0_set_clockgating_state(void *handle,
1746					  enum amd_clockgating_state state)
1747{
1748	/* needed for driver unload*/
1749	return 0;
1750}
1751
1752const struct amd_ip_funcs uvd_v7_0_ip_funcs = {
1753	.name = "uvd_v7_0",
1754	.early_init = uvd_v7_0_early_init,
1755	.late_init = NULL,
1756	.sw_init = uvd_v7_0_sw_init,
1757	.sw_fini = uvd_v7_0_sw_fini,
1758	.hw_init = uvd_v7_0_hw_init,
1759	.hw_fini = uvd_v7_0_hw_fini,
1760	.suspend = uvd_v7_0_suspend,
1761	.resume = uvd_v7_0_resume,
1762	.is_idle = NULL /* uvd_v7_0_is_idle */,
1763	.wait_for_idle = NULL /* uvd_v7_0_wait_for_idle */,
1764	.check_soft_reset = NULL /* uvd_v7_0_check_soft_reset */,
1765	.pre_soft_reset = NULL /* uvd_v7_0_pre_soft_reset */,
1766	.soft_reset = NULL /* uvd_v7_0_soft_reset */,
1767	.post_soft_reset = NULL /* uvd_v7_0_post_soft_reset */,
1768	.set_clockgating_state = uvd_v7_0_set_clockgating_state,
1769	.set_powergating_state = NULL /* uvd_v7_0_set_powergating_state */,
1770};
1771
1772static const struct amdgpu_ring_funcs uvd_v7_0_ring_vm_funcs = {
1773	.type = AMDGPU_RING_TYPE_UVD,
1774	.align_mask = 0xf,
 
1775	.support_64bit_ptrs = false,
1776	.no_user_fence = true,
1777	.vmhub = AMDGPU_MMHUB_0,
1778	.get_rptr = uvd_v7_0_ring_get_rptr,
1779	.get_wptr = uvd_v7_0_ring_get_wptr,
1780	.set_wptr = uvd_v7_0_ring_set_wptr,
1781	.patch_cs_in_place = uvd_v7_0_ring_patch_cs_in_place,
1782	.emit_frame_size =
1783		6 + /* hdp invalidate */
1784		SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
1785		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
1786		8 + /* uvd_v7_0_ring_emit_vm_flush */
1787		14 + 14, /* uvd_v7_0_ring_emit_fence x2 vm fence */
1788	.emit_ib_size = 8, /* uvd_v7_0_ring_emit_ib */
1789	.emit_ib = uvd_v7_0_ring_emit_ib,
1790	.emit_fence = uvd_v7_0_ring_emit_fence,
1791	.emit_vm_flush = uvd_v7_0_ring_emit_vm_flush,
1792	.emit_hdp_flush = uvd_v7_0_ring_emit_hdp_flush,
1793	.test_ring = uvd_v7_0_ring_test_ring,
1794	.test_ib = amdgpu_uvd_ring_test_ib,
1795	.insert_nop = uvd_v7_0_ring_insert_nop,
1796	.pad_ib = amdgpu_ring_generic_pad_ib,
1797	.begin_use = amdgpu_uvd_ring_begin_use,
1798	.end_use = amdgpu_uvd_ring_end_use,
1799	.emit_wreg = uvd_v7_0_ring_emit_wreg,
1800	.emit_reg_wait = uvd_v7_0_ring_emit_reg_wait,
1801	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1802};
1803
1804static const struct amdgpu_ring_funcs uvd_v7_0_enc_ring_vm_funcs = {
1805	.type = AMDGPU_RING_TYPE_UVD_ENC,
1806	.align_mask = 0x3f,
1807	.nop = HEVC_ENC_CMD_NO_OP,
1808	.support_64bit_ptrs = false,
1809	.no_user_fence = true,
1810	.vmhub = AMDGPU_MMHUB_0,
1811	.get_rptr = uvd_v7_0_enc_ring_get_rptr,
1812	.get_wptr = uvd_v7_0_enc_ring_get_wptr,
1813	.set_wptr = uvd_v7_0_enc_ring_set_wptr,
1814	.emit_frame_size =
1815		3 + 3 + /* hdp flush / invalidate */
1816		SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1817		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
1818		4 + /* uvd_v7_0_enc_ring_emit_vm_flush */
1819		5 + 5 + /* uvd_v7_0_enc_ring_emit_fence x2 vm fence */
1820		1, /* uvd_v7_0_enc_ring_insert_end */
1821	.emit_ib_size = 5, /* uvd_v7_0_enc_ring_emit_ib */
1822	.emit_ib = uvd_v7_0_enc_ring_emit_ib,
1823	.emit_fence = uvd_v7_0_enc_ring_emit_fence,
1824	.emit_vm_flush = uvd_v7_0_enc_ring_emit_vm_flush,
1825	.test_ring = uvd_v7_0_enc_ring_test_ring,
1826	.test_ib = uvd_v7_0_enc_ring_test_ib,
1827	.insert_nop = amdgpu_ring_insert_nop,
1828	.insert_end = uvd_v7_0_enc_ring_insert_end,
1829	.pad_ib = amdgpu_ring_generic_pad_ib,
1830	.begin_use = amdgpu_uvd_ring_begin_use,
1831	.end_use = amdgpu_uvd_ring_end_use,
1832	.emit_wreg = uvd_v7_0_enc_ring_emit_wreg,
1833	.emit_reg_wait = uvd_v7_0_enc_ring_emit_reg_wait,
1834	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1835};
1836
1837static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev)
1838{
1839	int i;
1840
1841	for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
1842		if (adev->uvd.harvest_config & (1 << i))
1843			continue;
1844		adev->uvd.inst[i].ring.funcs = &uvd_v7_0_ring_vm_funcs;
1845		adev->uvd.inst[i].ring.me = i;
1846		DRM_INFO("UVD(%d) is enabled in VM mode\n", i);
1847	}
1848}
1849
1850static void uvd_v7_0_set_enc_ring_funcs(struct amdgpu_device *adev)
1851{
1852	int i, j;
1853
1854	for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
1855		if (adev->uvd.harvest_config & (1 << j))
1856			continue;
1857		for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
1858			adev->uvd.inst[j].ring_enc[i].funcs = &uvd_v7_0_enc_ring_vm_funcs;
1859			adev->uvd.inst[j].ring_enc[i].me = j;
1860		}
1861
1862		DRM_INFO("UVD(%d) ENC is enabled in VM mode\n", j);
1863	}
1864}
1865
1866static const struct amdgpu_irq_src_funcs uvd_v7_0_irq_funcs = {
1867	.set = uvd_v7_0_set_interrupt_state,
1868	.process = uvd_v7_0_process_interrupt,
1869};
1870
1871static void uvd_v7_0_set_irq_funcs(struct amdgpu_device *adev)
1872{
1873	int i;
1874
1875	for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
1876		if (adev->uvd.harvest_config & (1 << i))
1877			continue;
1878		adev->uvd.inst[i].irq.num_types = adev->uvd.num_enc_rings + 1;
1879		adev->uvd.inst[i].irq.funcs = &uvd_v7_0_irq_funcs;
1880	}
1881}
1882
1883const struct amdgpu_ip_block_version uvd_v7_0_ip_block =
1884{
1885		.type = AMD_IP_BLOCK_TYPE_UVD,
1886		.major = 7,
1887		.minor = 0,
1888		.rev = 0,
1889		.funcs = &uvd_v7_0_ip_funcs,
1890};
v4.17
   1/*
   2 * Copyright 2016 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
  23
  24#include <linux/firmware.h>
  25#include <drm/drmP.h>
  26#include "amdgpu.h"
  27#include "amdgpu_uvd.h"
  28#include "soc15.h"
  29#include "soc15d.h"
  30#include "soc15_common.h"
  31#include "mmsch_v1_0.h"
  32
  33#include "uvd/uvd_7_0_offset.h"
  34#include "uvd/uvd_7_0_sh_mask.h"
  35#include "vce/vce_4_0_offset.h"
  36#include "vce/vce_4_0_default.h"
  37#include "vce/vce_4_0_sh_mask.h"
  38#include "nbif/nbif_6_1_offset.h"
  39#include "hdp/hdp_4_0_offset.h"
  40#include "mmhub/mmhub_1_0_offset.h"
  41#include "mmhub/mmhub_1_0_sh_mask.h"
 
 
 
 
 
 
 
 
 
  42
  43static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev);
  44static void uvd_v7_0_set_enc_ring_funcs(struct amdgpu_device *adev);
  45static void uvd_v7_0_set_irq_funcs(struct amdgpu_device *adev);
  46static int uvd_v7_0_start(struct amdgpu_device *adev);
  47static void uvd_v7_0_stop(struct amdgpu_device *adev);
  48static int uvd_v7_0_sriov_start(struct amdgpu_device *adev);
  49
 
 
 
 
 
  50/**
  51 * uvd_v7_0_ring_get_rptr - get read pointer
  52 *
  53 * @ring: amdgpu_ring pointer
  54 *
  55 * Returns the current hardware read pointer
  56 */
  57static uint64_t uvd_v7_0_ring_get_rptr(struct amdgpu_ring *ring)
  58{
  59	struct amdgpu_device *adev = ring->adev;
  60
  61	return RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
  62}
  63
  64/**
  65 * uvd_v7_0_enc_ring_get_rptr - get enc read pointer
  66 *
  67 * @ring: amdgpu_ring pointer
  68 *
  69 * Returns the current hardware enc read pointer
  70 */
  71static uint64_t uvd_v7_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
  72{
  73	struct amdgpu_device *adev = ring->adev;
  74
  75	if (ring == &adev->uvd.ring_enc[0])
  76		return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR);
  77	else
  78		return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2);
  79}
  80
  81/**
  82 * uvd_v7_0_ring_get_wptr - get write pointer
  83 *
  84 * @ring: amdgpu_ring pointer
  85 *
  86 * Returns the current hardware write pointer
  87 */
  88static uint64_t uvd_v7_0_ring_get_wptr(struct amdgpu_ring *ring)
  89{
  90	struct amdgpu_device *adev = ring->adev;
  91
  92	return RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR);
  93}
  94
  95/**
  96 * uvd_v7_0_enc_ring_get_wptr - get enc write pointer
  97 *
  98 * @ring: amdgpu_ring pointer
  99 *
 100 * Returns the current hardware enc write pointer
 101 */
 102static uint64_t uvd_v7_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
 103{
 104	struct amdgpu_device *adev = ring->adev;
 105
 106	if (ring->use_doorbell)
 107		return adev->wb.wb[ring->wptr_offs];
 108
 109	if (ring == &adev->uvd.ring_enc[0])
 110		return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR);
 111	else
 112		return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2);
 113}
 114
 115/**
 116 * uvd_v7_0_ring_set_wptr - set write pointer
 117 *
 118 * @ring: amdgpu_ring pointer
 119 *
 120 * Commits the write pointer to the hardware
 121 */
 122static void uvd_v7_0_ring_set_wptr(struct amdgpu_ring *ring)
 123{
 124	struct amdgpu_device *adev = ring->adev;
 125
 126	WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
 127}
 128
 129/**
 130 * uvd_v7_0_enc_ring_set_wptr - set enc write pointer
 131 *
 132 * @ring: amdgpu_ring pointer
 133 *
 134 * Commits the enc write pointer to the hardware
 135 */
 136static void uvd_v7_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
 137{
 138	struct amdgpu_device *adev = ring->adev;
 139
 140	if (ring->use_doorbell) {
 141		/* XXX check if swapping is necessary on BE */
 142		adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
 143		WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
 144		return;
 145	}
 146
 147	if (ring == &adev->uvd.ring_enc[0])
 148		WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR,
 149			lower_32_bits(ring->wptr));
 150	else
 151		WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2,
 152			lower_32_bits(ring->wptr));
 153}
 154
 155/**
 156 * uvd_v7_0_enc_ring_test_ring - test if UVD ENC ring is working
 157 *
 158 * @ring: the engine to test on
 159 *
 160 */
 161static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring)
 162{
 163	struct amdgpu_device *adev = ring->adev;
 164	uint32_t rptr = amdgpu_ring_get_rptr(ring);
 165	unsigned i;
 166	int r;
 167
 168	if (amdgpu_sriov_vf(adev))
 169		return 0;
 170
 171	r = amdgpu_ring_alloc(ring, 16);
 172	if (r) {
 173		DRM_ERROR("amdgpu: uvd enc failed to lock ring %d (%d).\n",
 174			  ring->idx, r);
 175		return r;
 176	}
 
 
 177	amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
 178	amdgpu_ring_commit(ring);
 179
 180	for (i = 0; i < adev->usec_timeout; i++) {
 181		if (amdgpu_ring_get_rptr(ring) != rptr)
 182			break;
 183		DRM_UDELAY(1);
 184	}
 185
 186	if (i < adev->usec_timeout) {
 187		DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
 188			 ring->idx, i);
 189	} else {
 190		DRM_ERROR("amdgpu: ring %d test failed\n",
 191			  ring->idx);
 192		r = -ETIMEDOUT;
 193	}
 194
 195	return r;
 196}
 197
 198/**
 199 * uvd_v7_0_enc_get_create_msg - generate a UVD ENC create msg
 200 *
 201 * @adev: amdgpu_device pointer
 202 * @ring: ring we should submit the msg to
 203 * @handle: session handle to use
 204 * @fence: optional fence to return
 205 *
 206 * Open up a stream for HW test
 207 */
 208static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
 
 209				       struct dma_fence **fence)
 210{
 211	const unsigned ib_size_dw = 16;
 212	struct amdgpu_job *job;
 213	struct amdgpu_ib *ib;
 214	struct dma_fence *f = NULL;
 215	uint64_t dummy;
 216	int i, r;
 217
 218	r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
 219	if (r)
 220		return r;
 221
 222	ib = &job->ibs[0];
 223	dummy = ib->gpu_addr + 1024;
 224
 225	ib->length_dw = 0;
 226	ib->ptr[ib->length_dw++] = 0x00000018;
 227	ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
 228	ib->ptr[ib->length_dw++] = handle;
 229	ib->ptr[ib->length_dw++] = 0x00000000;
 230	ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
 231	ib->ptr[ib->length_dw++] = dummy;
 232
 233	ib->ptr[ib->length_dw++] = 0x00000014;
 234	ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
 235	ib->ptr[ib->length_dw++] = 0x0000001c;
 236	ib->ptr[ib->length_dw++] = 0x00000000;
 237	ib->ptr[ib->length_dw++] = 0x00000000;
 238
 239	ib->ptr[ib->length_dw++] = 0x00000008;
 240	ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */
 241
 242	for (i = ib->length_dw; i < ib_size_dw; ++i)
 243		ib->ptr[i] = 0x0;
 244
 245	r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
 246	job->fence = dma_fence_get(f);
 247	if (r)
 248		goto err;
 249
 250	amdgpu_job_free(job);
 251	if (fence)
 252		*fence = dma_fence_get(f);
 253	dma_fence_put(f);
 254	return 0;
 255
 256err:
 257	amdgpu_job_free(job);
 258	return r;
 259}
 260
 261/**
 262 * uvd_v7_0_enc_get_destroy_msg - generate a UVD ENC destroy msg
 263 *
 264 * @adev: amdgpu_device pointer
 265 * @ring: ring we should submit the msg to
 266 * @handle: session handle to use
 267 * @fence: optional fence to return
 268 *
 269 * Close up a stream for HW test or if userspace failed to do so
 270 */
 271int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
 272				 bool direct, struct dma_fence **fence)
 
 273{
 274	const unsigned ib_size_dw = 16;
 275	struct amdgpu_job *job;
 276	struct amdgpu_ib *ib;
 277	struct dma_fence *f = NULL;
 278	uint64_t dummy;
 279	int i, r;
 280
 281	r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
 282	if (r)
 283		return r;
 284
 285	ib = &job->ibs[0];
 286	dummy = ib->gpu_addr + 1024;
 287
 288	ib->length_dw = 0;
 289	ib->ptr[ib->length_dw++] = 0x00000018;
 290	ib->ptr[ib->length_dw++] = 0x00000001;
 291	ib->ptr[ib->length_dw++] = handle;
 292	ib->ptr[ib->length_dw++] = 0x00000000;
 293	ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
 294	ib->ptr[ib->length_dw++] = dummy;
 295
 296	ib->ptr[ib->length_dw++] = 0x00000014;
 297	ib->ptr[ib->length_dw++] = 0x00000002;
 298	ib->ptr[ib->length_dw++] = 0x0000001c;
 299	ib->ptr[ib->length_dw++] = 0x00000000;
 300	ib->ptr[ib->length_dw++] = 0x00000000;
 301
 302	ib->ptr[ib->length_dw++] = 0x00000008;
 303	ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */
 304
 305	for (i = ib->length_dw; i < ib_size_dw; ++i)
 306		ib->ptr[i] = 0x0;
 307
 308	if (direct) {
 309		r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
 310		job->fence = dma_fence_get(f);
 311		if (r)
 312			goto err;
 313
 314		amdgpu_job_free(job);
 315	} else {
 316		r = amdgpu_job_submit(job, ring, &ring->adev->vce.entity,
 317				      AMDGPU_FENCE_OWNER_UNDEFINED, &f);
 318		if (r)
 319			goto err;
 320	}
 321
 322	if (fence)
 323		*fence = dma_fence_get(f);
 324	dma_fence_put(f);
 325	return 0;
 326
 327err:
 328	amdgpu_job_free(job);
 329	return r;
 330}
 331
 332/**
 333 * uvd_v7_0_enc_ring_test_ib - test if UVD ENC IBs are working
 334 *
 335 * @ring: the engine to test on
 336 *
 337 */
 338static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 339{
 340	struct dma_fence *fence = NULL;
 
 341	long r;
 342
 343	r = uvd_v7_0_enc_get_create_msg(ring, 1, NULL);
 344	if (r) {
 345		DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r);
 
 
 
 
 
 346		goto error;
 347	}
 348
 349	r = uvd_v7_0_enc_get_destroy_msg(ring, 1, true, &fence);
 350	if (r) {
 351		DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
 352		goto error;
 353	}
 354
 355	r = dma_fence_wait_timeout(fence, false, timeout);
 356	if (r == 0) {
 357		DRM_ERROR("amdgpu: IB test timed out.\n");
 358		r = -ETIMEDOUT;
 359	} else if (r < 0) {
 360		DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
 361	} else {
 362		DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
 363		r = 0;
 364	}
 365error:
 366	dma_fence_put(fence);
 
 
 367	return r;
 368}
 369
 370static int uvd_v7_0_early_init(void *handle)
 371{
 372	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 373
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 374	if (amdgpu_sriov_vf(adev))
 375		adev->uvd.num_enc_rings = 1;
 376	else
 377		adev->uvd.num_enc_rings = 2;
 378	uvd_v7_0_set_ring_funcs(adev);
 379	uvd_v7_0_set_enc_ring_funcs(adev);
 380	uvd_v7_0_set_irq_funcs(adev);
 381
 382	return 0;
 383}
 384
 385static int uvd_v7_0_sw_init(void *handle)
 386{
 387	struct amdgpu_ring *ring;
 388	struct drm_sched_rq *rq;
 389	int i, r;
 390	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 391
 392	/* UVD TRAP */
 393	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UVD, 124, &adev->uvd.irq);
 394	if (r)
 395		return r;
 396
 397	/* UVD ENC TRAP */
 398	for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
 399		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UVD, i + 119, &adev->uvd.irq);
 400		if (r)
 401			return r;
 
 
 
 
 
 
 
 402	}
 403
 404	r = amdgpu_uvd_sw_init(adev);
 405	if (r)
 406		return r;
 407
 408	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
 409		const struct common_firmware_header *hdr;
 410		hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
 411		adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].ucode_id = AMDGPU_UCODE_ID_UVD;
 412		adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].fw = adev->uvd.fw;
 413		adev->firmware.fw_size +=
 414			ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
 
 
 
 
 
 
 
 415		DRM_INFO("PSP loading UVD firmware\n");
 416	}
 417
 418	ring = &adev->uvd.ring_enc[0];
 419	rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
 420	r = drm_sched_entity_init(&ring->sched, &adev->uvd.entity_enc,
 421				  rq, amdgpu_sched_jobs, NULL);
 422	if (r) {
 423		DRM_ERROR("Failed setting up UVD ENC run queue.\n");
 424		return r;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 425	}
 426
 427	r = amdgpu_uvd_resume(adev);
 428	if (r)
 429		return r;
 430	if (!amdgpu_sriov_vf(adev)) {
 431		ring = &adev->uvd.ring;
 432		sprintf(ring->name, "uvd");
 433		r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0);
 434		if (r)
 435			return r;
 436	}
 437
 438	for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
 439		ring = &adev->uvd.ring_enc[i];
 440		sprintf(ring->name, "uvd_enc%d", i);
 441		if (amdgpu_sriov_vf(adev)) {
 442			ring->use_doorbell = true;
 443
 444			/* currently only use the first enconding ring for
 445			 * sriov, so set unused location for other unused rings.
 446			 */
 447			if (i == 0)
 448				ring->doorbell_index = AMDGPU_DOORBELL64_UVD_RING0_1 * 2;
 449			else
 450				ring->doorbell_index = AMDGPU_DOORBELL64_UVD_RING2_3 * 2 + 1;
 451		}
 452		r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0);
 453		if (r)
 454			return r;
 455	}
 456
 457	r = amdgpu_virt_alloc_mm_table(adev);
 458	if (r)
 459		return r;
 460
 461	return r;
 462}
 463
 464static int uvd_v7_0_sw_fini(void *handle)
 465{
 466	int i, r;
 467	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 468
 469	amdgpu_virt_free_mm_table(adev);
 470
 471	r = amdgpu_uvd_suspend(adev);
 472	if (r)
 473		return r;
 474
 475	drm_sched_entity_fini(&adev->uvd.ring_enc[0].sched, &adev->uvd.entity_enc);
 476
 477	for (i = 0; i < adev->uvd.num_enc_rings; ++i)
 478		amdgpu_ring_fini(&adev->uvd.ring_enc[i]);
 479
 
 480	return amdgpu_uvd_sw_fini(adev);
 481}
 482
 483/**
 484 * uvd_v7_0_hw_init - start and test UVD block
 485 *
 486 * @adev: amdgpu_device pointer
 487 *
 488 * Initialize the hardware, boot up the VCPU and do some testing
 489 */
 490static int uvd_v7_0_hw_init(void *handle)
 491{
 492	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 493	struct amdgpu_ring *ring = &adev->uvd.ring;
 494	uint32_t tmp;
 495	int i, r;
 496
 497	if (amdgpu_sriov_vf(adev))
 498		r = uvd_v7_0_sriov_start(adev);
 499	else
 500		r = uvd_v7_0_start(adev);
 501	if (r)
 502		goto done;
 503
 504	if (!amdgpu_sriov_vf(adev)) {
 505		ring->ready = true;
 506		r = amdgpu_ring_test_ring(ring);
 507		if (r) {
 508			ring->ready = false;
 509			goto done;
 510		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 511
 512		r = amdgpu_ring_alloc(ring, 10);
 513		if (r) {
 514			DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r);
 515			goto done;
 516		}
 517
 518		tmp = PACKET0(SOC15_REG_OFFSET(UVD, 0,
 519			mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL), 0);
 520		amdgpu_ring_write(ring, tmp);
 521		amdgpu_ring_write(ring, 0xFFFFF);
 522
 523		tmp = PACKET0(SOC15_REG_OFFSET(UVD, 0,
 524			mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL), 0);
 525		amdgpu_ring_write(ring, tmp);
 526		amdgpu_ring_write(ring, 0xFFFFF);
 527
 528		tmp = PACKET0(SOC15_REG_OFFSET(UVD, 0,
 529			mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL), 0);
 530		amdgpu_ring_write(ring, tmp);
 531		amdgpu_ring_write(ring, 0xFFFFF);
 532
 533		/* Clear timeout status bits */
 534		amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, 0,
 535			mmUVD_SEMA_TIMEOUT_STATUS), 0));
 536		amdgpu_ring_write(ring, 0x8);
 537
 538		amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, 0,
 539			mmUVD_SEMA_CNTL), 0));
 540		amdgpu_ring_write(ring, 3);
 541
 542		amdgpu_ring_commit(ring);
 543	}
 544
 545	for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
 546		ring = &adev->uvd.ring_enc[i];
 547		ring->ready = true;
 548		r = amdgpu_ring_test_ring(ring);
 549		if (r) {
 550			ring->ready = false;
 551			goto done;
 552		}
 553	}
 554
 555done:
 556	if (!r)
 557		DRM_INFO("UVD and UVD ENC initialized successfully.\n");
 558
 559	return r;
 560}
 561
 562/**
 563 * uvd_v7_0_hw_fini - stop the hardware block
 564 *
 565 * @adev: amdgpu_device pointer
 566 *
 567 * Stop the UVD block, mark ring as not ready any more
 568 */
 569static int uvd_v7_0_hw_fini(void *handle)
 570{
 571	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 572	struct amdgpu_ring *ring = &adev->uvd.ring;
 573
 574	if (!amdgpu_sriov_vf(adev))
 575		uvd_v7_0_stop(adev);
 576	else {
 577		/* full access mode, so don't touch any UVD register */
 578		DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
 579	}
 580
 581	ring->ready = false;
 
 
 
 
 582
 583	return 0;
 584}
 585
 586static int uvd_v7_0_suspend(void *handle)
 587{
 588	int r;
 589	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 590
 591	r = uvd_v7_0_hw_fini(adev);
 592	if (r)
 593		return r;
 594
 595	return amdgpu_uvd_suspend(adev);
 596}
 597
 598static int uvd_v7_0_resume(void *handle)
 599{
 600	int r;
 601	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 602
 603	r = amdgpu_uvd_resume(adev);
 604	if (r)
 605		return r;
 606
 607	return uvd_v7_0_hw_init(adev);
 608}
 609
 610/**
 611 * uvd_v7_0_mc_resume - memory controller programming
 612 *
 613 * @adev: amdgpu_device pointer
 614 *
 615 * Let the UVD memory controller know it's offsets
 616 */
 617static void uvd_v7_0_mc_resume(struct amdgpu_device *adev)
 618{
 619	uint32_t size = AMDGPU_UVD_FIRMWARE_SIZE(adev);
 620	uint32_t offset;
 
 621
 622	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
 623		WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
 624			lower_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr));
 625		WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
 626			upper_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr));
 627		offset = 0;
 628	} else {
 629		WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
 630			lower_32_bits(adev->uvd.gpu_addr));
 631		WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
 632			upper_32_bits(adev->uvd.gpu_addr));
 633		offset = size;
 634	}
 635
 636	WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0,
 637				AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
 638	WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size);
 639
 640	WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
 641			lower_32_bits(adev->uvd.gpu_addr + offset));
 642	WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
 643			upper_32_bits(adev->uvd.gpu_addr + offset));
 644	WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, (1 << 21));
 645	WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_UVD_HEAP_SIZE);
 646
 647	WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
 648			lower_32_bits(adev->uvd.gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
 649	WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
 650			upper_32_bits(adev->uvd.gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
 651	WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, (2 << 21));
 652	WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2,
 653			AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40));
 654
 655	WREG32_SOC15(UVD, 0, mmUVD_UDEC_ADDR_CONFIG,
 656			adev->gfx.config.gb_addr_config);
 657	WREG32_SOC15(UVD, 0, mmUVD_UDEC_DB_ADDR_CONFIG,
 658			adev->gfx.config.gb_addr_config);
 659	WREG32_SOC15(UVD, 0, mmUVD_UDEC_DBW_ADDR_CONFIG,
 660			adev->gfx.config.gb_addr_config);
 
 
 
 
 
 
 
 
 661
 662	WREG32_SOC15(UVD, 0, mmUVD_GP_SCRATCH4, adev->uvd.max_handles);
 
 663}
 664
 665static int uvd_v7_0_mmsch_start(struct amdgpu_device *adev,
 666				struct amdgpu_mm_table *table)
 667{
 668	uint32_t data = 0, loop;
 669	uint64_t addr = table->gpu_addr;
 670	struct mmsch_v1_0_init_header *header = (struct mmsch_v1_0_init_header *)table->cpu_addr;
 671	uint32_t size;
 
 672
 673	size = header->header_size + header->vce_table_size + header->uvd_table_size;
 674
 675	/* 1, write to vce_mmsch_vf_ctx_addr_lo/hi register with GPU mc addr of memory descriptor location */
 676	WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_CTX_ADDR_LO, lower_32_bits(addr));
 677	WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_CTX_ADDR_HI, upper_32_bits(addr));
 678
 679	/* 2, update vmid of descriptor */
 680	data = RREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_VMID);
 681	data &= ~VCE_MMSCH_VF_VMID__VF_CTX_VMID_MASK;
 682	data |= (0 << VCE_MMSCH_VF_VMID__VF_CTX_VMID__SHIFT); /* use domain0 for MM scheduler */
 683	WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_VMID, data);
 684
 685	/* 3, notify mmsch about the size of this descriptor */
 686	WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_CTX_SIZE, size);
 687
 688	/* 4, set resp to zero */
 689	WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP, 0);
 690
 691	WDOORBELL32(adev->uvd.ring_enc[0].doorbell_index, 0);
 692	adev->wb.wb[adev->uvd.ring_enc[0].wptr_offs] = 0;
 693	adev->uvd.ring_enc[0].wptr = 0;
 694	adev->uvd.ring_enc[0].wptr_old = 0;
 695
 
 
 
 696	/* 5, kick off the initialization and wait until VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero */
 697	WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_HOST, 0x10000001);
 698
 699	data = RREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP);
 700	loop = 1000;
 701	while ((data & 0x10000002) != 0x10000002) {
 702		udelay(10);
 703		data = RREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP);
 704		loop--;
 705		if (!loop)
 706			break;
 707	}
 708
 709	if (!loop) {
 710		dev_err(adev->dev, "failed to init MMSCH, mmVCE_MMSCH_VF_MAILBOX_RESP = %x\n", data);
 711		return -EBUSY;
 712	}
 713
 714	return 0;
 715}
 716
 717static int uvd_v7_0_sriov_start(struct amdgpu_device *adev)
 718{
 719	struct amdgpu_ring *ring;
 720	uint32_t offset, size, tmp;
 721	uint32_t table_size = 0;
 722	struct mmsch_v1_0_cmd_direct_write direct_wt = { {0} };
 723	struct mmsch_v1_0_cmd_direct_read_modify_write direct_rd_mod_wt = { {0} };
 724	struct mmsch_v1_0_cmd_direct_polling direct_poll = { {0} };
 725	struct mmsch_v1_0_cmd_end end = { {0} };
 726	uint32_t *init_table = adev->virt.mm_table.cpu_addr;
 727	struct mmsch_v1_0_init_header *header = (struct mmsch_v1_0_init_header *)init_table;
 
 728
 729	direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE;
 730	direct_rd_mod_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
 731	direct_poll.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_POLLING;
 732	end.cmd_header.command_type = MMSCH_COMMAND__END;
 733
 734	if (header->uvd_table_offset == 0 && header->uvd_table_size == 0) {
 735		header->version = MMSCH_VERSION;
 736		header->header_size = sizeof(struct mmsch_v1_0_init_header) >> 2;
 737
 738		if (header->vce_table_offset == 0 && header->vce_table_size == 0)
 739			header->uvd_table_offset = header->header_size;
 740		else
 741			header->uvd_table_offset = header->vce_table_size + header->vce_table_offset;
 742
 743		init_table += header->uvd_table_offset;
 744
 745		ring = &adev->uvd.ring;
 746		ring->wptr = 0;
 747		size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4);
 748
 749		MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS),
 750						   0xFFFFFFFF, 0x00000004);
 751		/* mc resume*/
 752		if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
 753			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
 754						    lower_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr));
 755			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
 756						    upper_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr));
 757			offset = 0;
 758		} else {
 759			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
 760						    lower_32_bits(adev->uvd.gpu_addr));
 761			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
 762						    upper_32_bits(adev->uvd.gpu_addr));
 763			offset = size;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 764		}
 765
 766		MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0),
 767					    AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
 768		MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_SIZE0), size);
 769
 770		MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
 771					    lower_32_bits(adev->uvd.gpu_addr + offset));
 772		MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
 773					    upper_32_bits(adev->uvd.gpu_addr + offset));
 774		MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), (1 << 21));
 775		MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_UVD_HEAP_SIZE);
 776
 777		MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
 778					    lower_32_bits(adev->uvd.gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
 779		MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
 780					    upper_32_bits(adev->uvd.gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
 781		MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2), (2 << 21));
 782		MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_SIZE2),
 783					    AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40));
 784
 785		MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_GP_SCRATCH4), adev->uvd.max_handles);
 786		/* mc resume end*/
 787
 788		/* disable clock gating */
 789		MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_CGC_CTRL),
 790						   ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK, 0);
 791
 792		/* disable interupt */
 793		MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN),
 794						   ~UVD_MASTINT_EN__VCPU_EN_MASK, 0);
 795
 796		/* stall UMC and register bus before resetting VCPU */
 797		MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2),
 798						   ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
 799						   UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
 800
 801		/* put LMI, VCPU, RBC etc... into reset */
 802		MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
 803					    (uint32_t)(UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
 804						       UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
 805						       UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
 806						       UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
 807						       UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
 808						       UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
 809						       UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
 810						       UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK));
 811
 812		/* initialize UVD memory controller */
 813		MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL),
 814					    (uint32_t)((0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
 815						       UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
 816						       UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
 817						       UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
 818						       UVD_LMI_CTRL__REQ_MODE_MASK |
 819						       0x00100000L));
 820
 821		/* take all subblocks out of reset, except VCPU */
 822		MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
 823					    UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
 824
 825		/* enable VCPU clock */
 826		MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL),
 827					    UVD_VCPU_CNTL__CLK_EN_MASK);
 828
 829		/* enable master interrupt */
 830		MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN),
 831						   ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
 832						   (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
 833
 834		/* clear the bit 4 of UVD_STATUS */
 835		MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS),
 836						   ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT), 0);
 837
 838		/* force RBC into idle state */
 839		size = order_base_2(ring->ring_size);
 840		tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, size);
 841		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
 842		MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), tmp);
 843
 844		ring = &adev->uvd.ring_enc[0];
 845		ring->wptr = 0;
 846		MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_BASE_LO), ring->gpu_addr);
 847		MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_BASE_HI), upper_32_bits(ring->gpu_addr));
 848		MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_SIZE), ring->ring_size / 4);
 849
 850		/* boot up the VCPU */
 851		MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0);
 852
 853		/* enable UMC */
 854		MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2),
 855										   ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, 0);
 856
 857		MMSCH_V1_0_INSERT_DIRECT_POLL(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS), 0x02, 0x02);
 858
 859		/* add end packet */
 860		memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end));
 861		table_size += sizeof(struct mmsch_v1_0_cmd_end) / 4;
 862		header->uvd_table_size = table_size;
 863
 864	}
 865	return uvd_v7_0_mmsch_start(adev, &adev->virt.mm_table);
 866}
 867
 868/**
 869 * uvd_v7_0_start - start UVD block
 870 *
 871 * @adev: amdgpu_device pointer
 872 *
 873 * Setup and start the UVD block
 874 */
 875static int uvd_v7_0_start(struct amdgpu_device *adev)
 876{
 877	struct amdgpu_ring *ring = &adev->uvd.ring;
 878	uint32_t rb_bufsz, tmp;
 879	uint32_t lmi_swap_cntl;
 880	uint32_t mp_swap_cntl;
 881	int i, j, r;
 882
 883	/* disable DPG */
 884	WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), 0,
 885			~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
 
 
 
 
 886
 887	/* disable byte swapping */
 888	lmi_swap_cntl = 0;
 889	mp_swap_cntl = 0;
 890
 891	uvd_v7_0_mc_resume(adev);
 892
 893	/* disable clock gating */
 894	WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_CGC_CTRL), 0,
 895			~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK);
 896
 897	/* disable interupt */
 898	WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN), 0,
 899			~UVD_MASTINT_EN__VCPU_EN_MASK);
 900
 901	/* stall UMC and register bus before resetting VCPU */
 902	WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2),
 903			UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
 904			~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
 905	mdelay(1);
 906
 907	/* put LMI, VCPU, RBC etc... into reset */
 908	WREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET,
 909		UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
 910		UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
 911		UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
 912		UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
 913		UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
 914		UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
 915		UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
 916		UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
 917	mdelay(5);
 
 
 
 
 918
 919	/* initialize UVD memory controller */
 920	WREG32_SOC15(UVD, 0, mmUVD_LMI_CTRL,
 921		(0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
 922		UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
 923		UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
 924		UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
 925		UVD_LMI_CTRL__REQ_MODE_MASK |
 926		0x00100000L);
 927
 928#ifdef __BIG_ENDIAN
 929	/* swap (8 in 32) RB and IB */
 930	lmi_swap_cntl = 0xa;
 931	mp_swap_cntl = 0;
 932#endif
 933	WREG32_SOC15(UVD, 0, mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
 934	WREG32_SOC15(UVD, 0, mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
 935
 936	WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXA0, 0x40c2040);
 937	WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXA1, 0x0);
 938	WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXB0, 0x40c2040);
 939	WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXB1, 0x0);
 940	WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_ALU, 0);
 941	WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUX, 0x88);
 942
 943	/* take all subblocks out of reset, except VCPU */
 944	WREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET,
 945			UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
 946	mdelay(5);
 947
 948	/* enable VCPU clock */
 949	WREG32_SOC15(UVD, 0, mmUVD_VCPU_CNTL,
 950			UVD_VCPU_CNTL__CLK_EN_MASK);
 951
 952	/* enable UMC */
 953	WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), 0,
 954			~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
 955
 956	/* boot up the VCPU */
 957	WREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET, 0);
 958	mdelay(10);
 959
 960	for (i = 0; i < 10; ++i) {
 961		uint32_t status;
 
 962
 963		for (j = 0; j < 100; ++j) {
 964			status = RREG32_SOC15(UVD, 0, mmUVD_STATUS);
 965			if (status & 2)
 966				break;
 967			mdelay(10);
 968		}
 969		r = 0;
 970		if (status & 2)
 971			break;
 972
 973		DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
 974		WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
 975				UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
 976				~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
 977		mdelay(10);
 978		WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0,
 979				~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
 980		mdelay(10);
 981		r = -1;
 982	}
 983
 984	if (r) {
 985		DRM_ERROR("UVD not responding, giving up!!!\n");
 986		return r;
 987	}
 988	/* enable master interrupt */
 989	WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN),
 990		(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
 991		~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
 992
 993	/* clear the bit 4 of UVD_STATUS */
 994	WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS), 0,
 995			~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
 996
 997	/* force RBC into idle state */
 998	rb_bufsz = order_base_2(ring->ring_size);
 999	tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
1000	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1001	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1002	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
1003	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1004	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1005	WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp);
1006
1007	/* set the write pointer delay */
1008	WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR_CNTL, 0);
1009
1010	/* set the wb address */
1011	WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR_ADDR,
1012			(upper_32_bits(ring->gpu_addr) >> 2));
1013
1014	/* programm the RB_BASE for ring buffer */
1015	WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
1016			lower_32_bits(ring->gpu_addr));
1017	WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
1018			upper_32_bits(ring->gpu_addr));
1019
1020	/* Initialize the ring buffer's read and write pointers */
1021	WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR, 0);
 
 
 
 
 
 
 
1022
1023	ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
1024	WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
1025			lower_32_bits(ring->wptr));
 
 
 
 
 
 
 
1026
1027	WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), 0,
1028			~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
 
 
 
 
 
 
1029
1030	ring = &adev->uvd.ring_enc[0];
1031	WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1032	WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1033	WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr);
1034	WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1035	WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4);
1036
1037	ring = &adev->uvd.ring_enc[1];
1038	WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1039	WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1040	WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1041	WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1042	WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
1043
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1044	return 0;
1045}
1046
1047/**
1048 * uvd_v7_0_stop - stop UVD block
1049 *
1050 * @adev: amdgpu_device pointer
1051 *
1052 * stop the UVD block
1053 */
1054static void uvd_v7_0_stop(struct amdgpu_device *adev)
1055{
1056	/* force RBC into idle state */
1057	WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, 0x11010101);
1058
1059	/* Stall UMC and register bus before resetting VCPU */
1060	WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2),
1061			UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
1062			~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1063	mdelay(1);
1064
1065	/* put VCPU into reset */
1066	WREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET,
1067			UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1068	mdelay(5);
1069
1070	/* disable VCPU clock */
1071	WREG32_SOC15(UVD, 0, mmUVD_VCPU_CNTL, 0x0);
1072
1073	/* Unstall UMC and register bus */
1074	WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), 0,
1075			~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
 
 
 
 
 
 
 
 
 
 
 
 
1076}
1077
1078/**
1079 * uvd_v7_0_ring_emit_fence - emit an fence & trap command
1080 *
1081 * @ring: amdgpu_ring pointer
1082 * @fence: fence to emit
1083 *
1084 * Write a fence and a trap command to the ring.
1085 */
1086static void uvd_v7_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
1087				     unsigned flags)
1088{
1089	struct amdgpu_device *adev = ring->adev;
1090
1091	WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1092
1093	amdgpu_ring_write(ring,
1094		PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0));
1095	amdgpu_ring_write(ring, seq);
1096	amdgpu_ring_write(ring,
1097		PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
1098	amdgpu_ring_write(ring, addr & 0xffffffff);
1099	amdgpu_ring_write(ring,
1100		PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
1101	amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
1102	amdgpu_ring_write(ring,
1103		PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
1104	amdgpu_ring_write(ring, 0);
1105
1106	amdgpu_ring_write(ring,
1107		PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
1108	amdgpu_ring_write(ring, 0);
1109	amdgpu_ring_write(ring,
1110		PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
1111	amdgpu_ring_write(ring, 0);
1112	amdgpu_ring_write(ring,
1113		PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
1114	amdgpu_ring_write(ring, 2);
1115}
1116
1117/**
1118 * uvd_v7_0_enc_ring_emit_fence - emit an enc fence & trap command
1119 *
1120 * @ring: amdgpu_ring pointer
1121 * @fence: fence to emit
1122 *
1123 * Write enc a fence and a trap command to the ring.
1124 */
1125static void uvd_v7_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
1126			u64 seq, unsigned flags)
1127{
1128
1129	WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1130
1131	amdgpu_ring_write(ring, HEVC_ENC_CMD_FENCE);
1132	amdgpu_ring_write(ring, addr);
1133	amdgpu_ring_write(ring, upper_32_bits(addr));
1134	amdgpu_ring_write(ring, seq);
1135	amdgpu_ring_write(ring, HEVC_ENC_CMD_TRAP);
1136}
1137
1138/**
 
 
 
 
 
 
 
 
 
 
1139 * uvd_v7_0_ring_test_ring - register write test
1140 *
1141 * @ring: amdgpu_ring pointer
1142 *
1143 * Test if we can successfully write to the context register
1144 */
1145static int uvd_v7_0_ring_test_ring(struct amdgpu_ring *ring)
1146{
1147	struct amdgpu_device *adev = ring->adev;
1148	uint32_t tmp = 0;
1149	unsigned i;
1150	int r;
1151
1152	WREG32_SOC15(UVD, 0, mmUVD_CONTEXT_ID, 0xCAFEDEAD);
1153	r = amdgpu_ring_alloc(ring, 3);
1154	if (r) {
1155		DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
1156			  ring->idx, r);
1157		return r;
1158	}
1159	amdgpu_ring_write(ring,
1160		PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0));
1161	amdgpu_ring_write(ring, 0xDEADBEEF);
1162	amdgpu_ring_commit(ring);
1163	for (i = 0; i < adev->usec_timeout; i++) {
1164		tmp = RREG32_SOC15(UVD, 0, mmUVD_CONTEXT_ID);
1165		if (tmp == 0xDEADBEEF)
1166			break;
1167		DRM_UDELAY(1);
1168	}
1169
1170	if (i < adev->usec_timeout) {
1171		DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
1172			 ring->idx, i);
1173	} else {
1174		DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
1175			  ring->idx, tmp);
1176		r = -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1177	}
1178	return r;
1179}
1180
1181/**
1182 * uvd_v7_0_ring_emit_ib - execute indirect buffer
1183 *
1184 * @ring: amdgpu_ring pointer
1185 * @ib: indirect buffer to execute
1186 *
1187 * Write ring commands to execute the indirect buffer
1188 */
1189static void uvd_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
 
1190				  struct amdgpu_ib *ib,
1191				  unsigned vmid, bool ctx_switch)
1192{
1193	struct amdgpu_device *adev = ring->adev;
 
1194
1195	amdgpu_ring_write(ring,
1196		PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_VMID), 0));
1197	amdgpu_ring_write(ring, vmid);
1198
1199	amdgpu_ring_write(ring,
1200		PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_64BIT_BAR_LOW), 0));
1201	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1202	amdgpu_ring_write(ring,
1203		PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH), 0));
1204	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1205	amdgpu_ring_write(ring,
1206		PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_IB_SIZE), 0));
1207	amdgpu_ring_write(ring, ib->length_dw);
1208}
1209
1210/**
1211 * uvd_v7_0_enc_ring_emit_ib - enc execute indirect buffer
1212 *
1213 * @ring: amdgpu_ring pointer
1214 * @ib: indirect buffer to execute
1215 *
1216 * Write enc ring commands to execute the indirect buffer
1217 */
1218static void uvd_v7_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
1219		struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch)
 
 
1220{
 
 
1221	amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM);
1222	amdgpu_ring_write(ring, vmid);
1223	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1224	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1225	amdgpu_ring_write(ring, ib->length_dw);
1226}
1227
1228static void uvd_v7_0_ring_emit_wreg(struct amdgpu_ring *ring,
1229				    uint32_t reg, uint32_t val)
1230{
1231	struct amdgpu_device *adev = ring->adev;
1232
1233	amdgpu_ring_write(ring,
1234		PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
1235	amdgpu_ring_write(ring, reg << 2);
1236	amdgpu_ring_write(ring,
1237		PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
1238	amdgpu_ring_write(ring, val);
1239	amdgpu_ring_write(ring,
1240		PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
1241	amdgpu_ring_write(ring, 8);
1242}
1243
1244static void uvd_v7_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
1245					uint32_t val, uint32_t mask)
1246{
1247	struct amdgpu_device *adev = ring->adev;
1248
1249	amdgpu_ring_write(ring,
1250		PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
1251	amdgpu_ring_write(ring, reg << 2);
1252	amdgpu_ring_write(ring,
1253		PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
1254	amdgpu_ring_write(ring, val);
1255	amdgpu_ring_write(ring,
1256		PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GP_SCRATCH8), 0));
1257	amdgpu_ring_write(ring, mask);
1258	amdgpu_ring_write(ring,
1259		PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
1260	amdgpu_ring_write(ring, 12);
1261}
1262
1263static void uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
1264					unsigned vmid, uint64_t pd_addr)
1265{
1266	struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
1267	uint32_t data0, data1, mask;
1268
1269	pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1270
1271	/* wait for reg writes */
1272	data0 = hub->ctx0_ptb_addr_lo32 + vmid * 2;
1273	data1 = lower_32_bits(pd_addr);
1274	mask = 0xffffffff;
1275	uvd_v7_0_ring_emit_reg_wait(ring, data0, data1, mask);
1276}
1277
1278static void uvd_v7_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
1279{
 
1280	int i;
1281	struct amdgpu_device *adev = ring->adev;
1282
1283	for (i = 0; i < count; i++)
1284		amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP), 0));
1285
 
 
 
 
1286}
1287
1288static void uvd_v7_0_enc_ring_insert_end(struct amdgpu_ring *ring)
1289{
1290	amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
1291}
1292
1293static void uvd_v7_0_enc_ring_emit_reg_wait(struct amdgpu_ring *ring,
1294					    uint32_t reg, uint32_t val,
1295					    uint32_t mask)
1296{
1297	amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WAIT);
1298	amdgpu_ring_write(ring,	reg << 2);
1299	amdgpu_ring_write(ring, mask);
1300	amdgpu_ring_write(ring, val);
1301}
1302
1303static void uvd_v7_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
1304					    unsigned int vmid, uint64_t pd_addr)
1305{
1306	struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
1307
1308	pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1309
1310	/* wait for reg writes */
1311	uvd_v7_0_enc_ring_emit_reg_wait(ring, hub->ctx0_ptb_addr_lo32 + vmid * 2,
1312					lower_32_bits(pd_addr), 0xffffffff);
1313}
1314
1315static void uvd_v7_0_enc_ring_emit_wreg(struct amdgpu_ring *ring,
1316					uint32_t reg, uint32_t val)
1317{
1318	amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE);
1319	amdgpu_ring_write(ring,	reg << 2);
1320	amdgpu_ring_write(ring, val);
1321}
1322
1323#if 0
1324static bool uvd_v7_0_is_idle(void *handle)
1325{
1326	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1327
1328	return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
1329}
1330
1331static int uvd_v7_0_wait_for_idle(void *handle)
1332{
1333	unsigned i;
1334	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1335
1336	for (i = 0; i < adev->usec_timeout; i++) {
1337		if (uvd_v7_0_is_idle(handle))
1338			return 0;
1339	}
1340	return -ETIMEDOUT;
1341}
1342
1343#define AMDGPU_UVD_STATUS_BUSY_MASK    0xfd
1344static bool uvd_v7_0_check_soft_reset(void *handle)
1345{
1346	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1347	u32 srbm_soft_reset = 0;
1348	u32 tmp = RREG32(mmSRBM_STATUS);
1349
1350	if (REG_GET_FIELD(tmp, SRBM_STATUS, UVD_RQ_PENDING) ||
1351	    REG_GET_FIELD(tmp, SRBM_STATUS, UVD_BUSY) ||
1352	    (RREG32_SOC15(UVD, 0, mmUVD_STATUS) &
1353		    AMDGPU_UVD_STATUS_BUSY_MASK))
1354		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1355				SRBM_SOFT_RESET, SOFT_RESET_UVD, 1);
1356
1357	if (srbm_soft_reset) {
1358		adev->uvd.srbm_soft_reset = srbm_soft_reset;
1359		return true;
1360	} else {
1361		adev->uvd.srbm_soft_reset = 0;
1362		return false;
1363	}
1364}
1365
1366static int uvd_v7_0_pre_soft_reset(void *handle)
1367{
1368	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1369
1370	if (!adev->uvd.srbm_soft_reset)
1371		return 0;
1372
1373	uvd_v7_0_stop(adev);
1374	return 0;
1375}
1376
1377static int uvd_v7_0_soft_reset(void *handle)
1378{
1379	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1380	u32 srbm_soft_reset;
1381
1382	if (!adev->uvd.srbm_soft_reset)
1383		return 0;
1384	srbm_soft_reset = adev->uvd.srbm_soft_reset;
1385
1386	if (srbm_soft_reset) {
1387		u32 tmp;
1388
1389		tmp = RREG32(mmSRBM_SOFT_RESET);
1390		tmp |= srbm_soft_reset;
1391		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1392		WREG32(mmSRBM_SOFT_RESET, tmp);
1393		tmp = RREG32(mmSRBM_SOFT_RESET);
1394
1395		udelay(50);
1396
1397		tmp &= ~srbm_soft_reset;
1398		WREG32(mmSRBM_SOFT_RESET, tmp);
1399		tmp = RREG32(mmSRBM_SOFT_RESET);
1400
1401		/* Wait a little for things to settle down */
1402		udelay(50);
1403	}
1404
1405	return 0;
1406}
1407
1408static int uvd_v7_0_post_soft_reset(void *handle)
1409{
1410	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1411
1412	if (!adev->uvd.srbm_soft_reset)
1413		return 0;
1414
1415	mdelay(5);
1416
1417	return uvd_v7_0_start(adev);
1418}
1419#endif
1420
1421static int uvd_v7_0_set_interrupt_state(struct amdgpu_device *adev,
1422					struct amdgpu_irq_src *source,
1423					unsigned type,
1424					enum amdgpu_interrupt_state state)
1425{
1426	// TODO
1427	return 0;
1428}
1429
1430static int uvd_v7_0_process_interrupt(struct amdgpu_device *adev,
1431				      struct amdgpu_irq_src *source,
1432				      struct amdgpu_iv_entry *entry)
1433{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1434	DRM_DEBUG("IH: UVD TRAP\n");
 
1435	switch (entry->src_id) {
1436	case 124:
1437		amdgpu_fence_process(&adev->uvd.ring);
1438		break;
1439	case 119:
1440		amdgpu_fence_process(&adev->uvd.ring_enc[0]);
1441		break;
1442	case 120:
1443		if (!amdgpu_sriov_vf(adev))
1444			amdgpu_fence_process(&adev->uvd.ring_enc[1]);
1445		break;
1446	default:
1447		DRM_ERROR("Unhandled interrupt: %d %d\n",
1448			  entry->src_id, entry->src_data[0]);
1449		break;
1450	}
1451
1452	return 0;
1453}
1454
1455#if 0
1456static void uvd_v7_0_set_sw_clock_gating(struct amdgpu_device *adev)
1457{
1458	uint32_t data, data1, data2, suvd_flags;
1459
1460	data = RREG32_SOC15(UVD, 0, mmUVD_CGC_CTRL);
1461	data1 = RREG32_SOC15(UVD, 0, mmUVD_SUVD_CGC_GATE);
1462	data2 = RREG32_SOC15(UVD, 0, mmUVD_SUVD_CGC_CTRL);
1463
1464	data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK |
1465		  UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
1466
1467	suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
1468		     UVD_SUVD_CGC_GATE__SIT_MASK |
1469		     UVD_SUVD_CGC_GATE__SMP_MASK |
1470		     UVD_SUVD_CGC_GATE__SCM_MASK |
1471		     UVD_SUVD_CGC_GATE__SDB_MASK;
1472
1473	data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
1474		(1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) |
1475		(4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY));
1476
1477	data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
1478			UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
1479			UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
1480			UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
1481			UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
1482			UVD_CGC_CTRL__SYS_MODE_MASK |
1483			UVD_CGC_CTRL__UDEC_MODE_MASK |
1484			UVD_CGC_CTRL__MPEG2_MODE_MASK |
1485			UVD_CGC_CTRL__REGS_MODE_MASK |
1486			UVD_CGC_CTRL__RBC_MODE_MASK |
1487			UVD_CGC_CTRL__LMI_MC_MODE_MASK |
1488			UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
1489			UVD_CGC_CTRL__IDCT_MODE_MASK |
1490			UVD_CGC_CTRL__MPRD_MODE_MASK |
1491			UVD_CGC_CTRL__MPC_MODE_MASK |
1492			UVD_CGC_CTRL__LBSI_MODE_MASK |
1493			UVD_CGC_CTRL__LRBBM_MODE_MASK |
1494			UVD_CGC_CTRL__WCB_MODE_MASK |
1495			UVD_CGC_CTRL__VCPU_MODE_MASK |
1496			UVD_CGC_CTRL__JPEG_MODE_MASK |
1497			UVD_CGC_CTRL__JPEG2_MODE_MASK |
1498			UVD_CGC_CTRL__SCPU_MODE_MASK);
1499	data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK |
1500			UVD_SUVD_CGC_CTRL__SIT_MODE_MASK |
1501			UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
1502			UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
1503			UVD_SUVD_CGC_CTRL__SDB_MODE_MASK);
1504	data1 |= suvd_flags;
1505
1506	WREG32_SOC15(UVD, 0, mmUVD_CGC_CTRL, data);
1507	WREG32_SOC15(UVD, 0, mmUVD_CGC_GATE, 0);
1508	WREG32_SOC15(UVD, 0, mmUVD_SUVD_CGC_GATE, data1);
1509	WREG32_SOC15(UVD, 0, mmUVD_SUVD_CGC_CTRL, data2);
1510}
1511
1512static void uvd_v7_0_set_hw_clock_gating(struct amdgpu_device *adev)
1513{
1514	uint32_t data, data1, cgc_flags, suvd_flags;
1515
1516	data = RREG32_SOC15(UVD, 0, mmUVD_CGC_GATE);
1517	data1 = RREG32_SOC15(UVD, 0, mmUVD_SUVD_CGC_GATE);
1518
1519	cgc_flags = UVD_CGC_GATE__SYS_MASK |
1520		UVD_CGC_GATE__UDEC_MASK |
1521		UVD_CGC_GATE__MPEG2_MASK |
1522		UVD_CGC_GATE__RBC_MASK |
1523		UVD_CGC_GATE__LMI_MC_MASK |
1524		UVD_CGC_GATE__IDCT_MASK |
1525		UVD_CGC_GATE__MPRD_MASK |
1526		UVD_CGC_GATE__MPC_MASK |
1527		UVD_CGC_GATE__LBSI_MASK |
1528		UVD_CGC_GATE__LRBBM_MASK |
1529		UVD_CGC_GATE__UDEC_RE_MASK |
1530		UVD_CGC_GATE__UDEC_CM_MASK |
1531		UVD_CGC_GATE__UDEC_IT_MASK |
1532		UVD_CGC_GATE__UDEC_DB_MASK |
1533		UVD_CGC_GATE__UDEC_MP_MASK |
1534		UVD_CGC_GATE__WCB_MASK |
1535		UVD_CGC_GATE__VCPU_MASK |
1536		UVD_CGC_GATE__SCPU_MASK |
1537		UVD_CGC_GATE__JPEG_MASK |
1538		UVD_CGC_GATE__JPEG2_MASK;
1539
1540	suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
1541				UVD_SUVD_CGC_GATE__SIT_MASK |
1542				UVD_SUVD_CGC_GATE__SMP_MASK |
1543				UVD_SUVD_CGC_GATE__SCM_MASK |
1544				UVD_SUVD_CGC_GATE__SDB_MASK;
1545
1546	data |= cgc_flags;
1547	data1 |= suvd_flags;
1548
1549	WREG32_SOC15(UVD, 0, mmUVD_CGC_GATE, data);
1550	WREG32_SOC15(UVD, 0, mmUVD_SUVD_CGC_GATE, data1);
1551}
1552
1553static void uvd_v7_0_set_bypass_mode(struct amdgpu_device *adev, bool enable)
1554{
1555	u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
1556
1557	if (enable)
1558		tmp |= (GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
1559			GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
1560	else
1561		tmp &= ~(GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
1562			 GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
1563
1564	WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp);
1565}
1566
1567
1568static int uvd_v7_0_set_clockgating_state(void *handle,
1569					  enum amd_clockgating_state state)
1570{
1571	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1572	bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
1573
1574	uvd_v7_0_set_bypass_mode(adev, enable);
1575
1576	if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
1577		return 0;
1578
1579	if (enable) {
1580		/* disable HW gating and enable Sw gating */
1581		uvd_v7_0_set_sw_clock_gating(adev);
1582	} else {
1583		/* wait for STATUS to clear */
1584		if (uvd_v7_0_wait_for_idle(handle))
1585			return -EBUSY;
1586
1587		/* enable HW gates because UVD is idle */
1588		/* uvd_v7_0_set_hw_clock_gating(adev); */
1589	}
1590
1591	return 0;
1592}
1593
1594static int uvd_v7_0_set_powergating_state(void *handle,
1595					  enum amd_powergating_state state)
1596{
1597	/* This doesn't actually powergate the UVD block.
1598	 * That's done in the dpm code via the SMC.  This
1599	 * just re-inits the block as necessary.  The actual
1600	 * gating still happens in the dpm code.  We should
1601	 * revisit this when there is a cleaner line between
1602	 * the smc and the hw blocks
1603	 */
1604	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1605
1606	if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
1607		return 0;
1608
1609	WREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK);
1610
1611	if (state == AMD_PG_STATE_GATE) {
1612		uvd_v7_0_stop(adev);
1613		return 0;
1614	} else {
1615		return uvd_v7_0_start(adev);
1616	}
1617}
1618#endif
1619
1620static int uvd_v7_0_set_clockgating_state(void *handle,
1621					  enum amd_clockgating_state state)
1622{
1623	/* needed for driver unload*/
1624	return 0;
1625}
1626
1627const struct amd_ip_funcs uvd_v7_0_ip_funcs = {
1628	.name = "uvd_v7_0",
1629	.early_init = uvd_v7_0_early_init,
1630	.late_init = NULL,
1631	.sw_init = uvd_v7_0_sw_init,
1632	.sw_fini = uvd_v7_0_sw_fini,
1633	.hw_init = uvd_v7_0_hw_init,
1634	.hw_fini = uvd_v7_0_hw_fini,
1635	.suspend = uvd_v7_0_suspend,
1636	.resume = uvd_v7_0_resume,
1637	.is_idle = NULL /* uvd_v7_0_is_idle */,
1638	.wait_for_idle = NULL /* uvd_v7_0_wait_for_idle */,
1639	.check_soft_reset = NULL /* uvd_v7_0_check_soft_reset */,
1640	.pre_soft_reset = NULL /* uvd_v7_0_pre_soft_reset */,
1641	.soft_reset = NULL /* uvd_v7_0_soft_reset */,
1642	.post_soft_reset = NULL /* uvd_v7_0_post_soft_reset */,
1643	.set_clockgating_state = uvd_v7_0_set_clockgating_state,
1644	.set_powergating_state = NULL /* uvd_v7_0_set_powergating_state */,
1645};
1646
1647static const struct amdgpu_ring_funcs uvd_v7_0_ring_vm_funcs = {
1648	.type = AMDGPU_RING_TYPE_UVD,
1649	.align_mask = 0xf,
1650	.nop = PACKET0(0x81ff, 0),
1651	.support_64bit_ptrs = false,
1652	.vmhub = AMDGPU_MMHUB,
 
1653	.get_rptr = uvd_v7_0_ring_get_rptr,
1654	.get_wptr = uvd_v7_0_ring_get_wptr,
1655	.set_wptr = uvd_v7_0_ring_set_wptr,
 
1656	.emit_frame_size =
1657		6 + 6 + /* hdp flush / invalidate */
1658		SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
1659		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
1660		8 + /* uvd_v7_0_ring_emit_vm_flush */
1661		14 + 14, /* uvd_v7_0_ring_emit_fence x2 vm fence */
1662	.emit_ib_size = 8, /* uvd_v7_0_ring_emit_ib */
1663	.emit_ib = uvd_v7_0_ring_emit_ib,
1664	.emit_fence = uvd_v7_0_ring_emit_fence,
1665	.emit_vm_flush = uvd_v7_0_ring_emit_vm_flush,
 
1666	.test_ring = uvd_v7_0_ring_test_ring,
1667	.test_ib = amdgpu_uvd_ring_test_ib,
1668	.insert_nop = uvd_v7_0_ring_insert_nop,
1669	.pad_ib = amdgpu_ring_generic_pad_ib,
1670	.begin_use = amdgpu_uvd_ring_begin_use,
1671	.end_use = amdgpu_uvd_ring_end_use,
1672	.emit_wreg = uvd_v7_0_ring_emit_wreg,
1673	.emit_reg_wait = uvd_v7_0_ring_emit_reg_wait,
 
1674};
1675
1676static const struct amdgpu_ring_funcs uvd_v7_0_enc_ring_vm_funcs = {
1677	.type = AMDGPU_RING_TYPE_UVD_ENC,
1678	.align_mask = 0x3f,
1679	.nop = HEVC_ENC_CMD_NO_OP,
1680	.support_64bit_ptrs = false,
1681	.vmhub = AMDGPU_MMHUB,
 
1682	.get_rptr = uvd_v7_0_enc_ring_get_rptr,
1683	.get_wptr = uvd_v7_0_enc_ring_get_wptr,
1684	.set_wptr = uvd_v7_0_enc_ring_set_wptr,
1685	.emit_frame_size =
1686		3 + 3 + /* hdp flush / invalidate */
1687		SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1688		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
1689		4 + /* uvd_v7_0_enc_ring_emit_vm_flush */
1690		5 + 5 + /* uvd_v7_0_enc_ring_emit_fence x2 vm fence */
1691		1, /* uvd_v7_0_enc_ring_insert_end */
1692	.emit_ib_size = 5, /* uvd_v7_0_enc_ring_emit_ib */
1693	.emit_ib = uvd_v7_0_enc_ring_emit_ib,
1694	.emit_fence = uvd_v7_0_enc_ring_emit_fence,
1695	.emit_vm_flush = uvd_v7_0_enc_ring_emit_vm_flush,
1696	.test_ring = uvd_v7_0_enc_ring_test_ring,
1697	.test_ib = uvd_v7_0_enc_ring_test_ib,
1698	.insert_nop = amdgpu_ring_insert_nop,
1699	.insert_end = uvd_v7_0_enc_ring_insert_end,
1700	.pad_ib = amdgpu_ring_generic_pad_ib,
1701	.begin_use = amdgpu_uvd_ring_begin_use,
1702	.end_use = amdgpu_uvd_ring_end_use,
1703	.emit_wreg = uvd_v7_0_enc_ring_emit_wreg,
1704	.emit_reg_wait = uvd_v7_0_enc_ring_emit_reg_wait,
 
1705};
1706
1707static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev)
1708{
1709	adev->uvd.ring.funcs = &uvd_v7_0_ring_vm_funcs;
1710	DRM_INFO("UVD is enabled in VM mode\n");
 
 
 
 
 
 
 
1711}
1712
1713static void uvd_v7_0_set_enc_ring_funcs(struct amdgpu_device *adev)
1714{
1715	int i;
1716
1717	for (i = 0; i < adev->uvd.num_enc_rings; ++i)
1718		adev->uvd.ring_enc[i].funcs = &uvd_v7_0_enc_ring_vm_funcs;
 
 
 
 
 
1719
1720	DRM_INFO("UVD ENC is enabled in VM mode\n");
 
1721}
1722
1723static const struct amdgpu_irq_src_funcs uvd_v7_0_irq_funcs = {
1724	.set = uvd_v7_0_set_interrupt_state,
1725	.process = uvd_v7_0_process_interrupt,
1726};
1727
1728static void uvd_v7_0_set_irq_funcs(struct amdgpu_device *adev)
1729{
1730	adev->uvd.irq.num_types = adev->uvd.num_enc_rings + 1;
1731	adev->uvd.irq.funcs = &uvd_v7_0_irq_funcs;
 
 
 
 
 
 
1732}
1733
1734const struct amdgpu_ip_block_version uvd_v7_0_ip_block =
1735{
1736		.type = AMD_IP_BLOCK_TYPE_UVD,
1737		.major = 7,
1738		.minor = 0,
1739		.rev = 0,
1740		.funcs = &uvd_v7_0_ip_funcs,
1741};