Linux Audio

Check our new training course

Loading...
v4.10.11
   1/*
   2 * Copyright 2014 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 * Authors: Christian König <christian.koenig@amd.com>
  23 */
  24
  25#include <linux/firmware.h>
  26#include <drm/drmP.h>
  27#include "amdgpu.h"
  28#include "amdgpu_uvd.h"
  29#include "vid.h"
  30#include "uvd/uvd_6_0_d.h"
  31#include "uvd/uvd_6_0_sh_mask.h"
  32#include "oss/oss_2_0_d.h"
  33#include "oss/oss_2_0_sh_mask.h"
  34#include "smu/smu_7_1_3_d.h"
  35#include "smu/smu_7_1_3_sh_mask.h"
  36#include "bif/bif_5_1_d.h"
  37#include "gmc/gmc_8_1_d.h"
  38#include "vi.h"
 
 
 
 
  39
  40static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev);
 
 
  41static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev);
  42static int uvd_v6_0_start(struct amdgpu_device *adev);
  43static void uvd_v6_0_stop(struct amdgpu_device *adev);
  44static void uvd_v6_0_set_sw_clock_gating(struct amdgpu_device *adev);
  45static int uvd_v6_0_set_clockgating_state(void *handle,
  46					  enum amd_clockgating_state state);
  47static void uvd_v6_0_enable_mgcg(struct amdgpu_device *adev,
  48				 bool enable);
  49
  50/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  51 * uvd_v6_0_ring_get_rptr - get read pointer
  52 *
  53 * @ring: amdgpu_ring pointer
  54 *
  55 * Returns the current hardware read pointer
  56 */
  57static uint32_t uvd_v6_0_ring_get_rptr(struct amdgpu_ring *ring)
  58{
  59	struct amdgpu_device *adev = ring->adev;
  60
  61	return RREG32(mmUVD_RBC_RB_RPTR);
  62}
  63
  64/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  65 * uvd_v6_0_ring_get_wptr - get write pointer
  66 *
  67 * @ring: amdgpu_ring pointer
  68 *
  69 * Returns the current hardware write pointer
  70 */
  71static uint32_t uvd_v6_0_ring_get_wptr(struct amdgpu_ring *ring)
  72{
  73	struct amdgpu_device *adev = ring->adev;
  74
  75	return RREG32(mmUVD_RBC_RB_WPTR);
  76}
  77
  78/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  79 * uvd_v6_0_ring_set_wptr - set write pointer
  80 *
  81 * @ring: amdgpu_ring pointer
  82 *
  83 * Commits the write pointer to the hardware
  84 */
  85static void uvd_v6_0_ring_set_wptr(struct amdgpu_ring *ring)
  86{
  87	struct amdgpu_device *adev = ring->adev;
  88
  89	WREG32(mmUVD_RBC_RB_WPTR, ring->wptr);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  90}
  91
  92static int uvd_v6_0_early_init(void *handle)
  93{
  94	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
 
 
 
 
  95
  96	uvd_v6_0_set_ring_funcs(adev);
 
 
 
 
 
 
  97	uvd_v6_0_set_irq_funcs(adev);
  98
  99	return 0;
 100}
 101
 102static int uvd_v6_0_sw_init(void *handle)
 103{
 104	struct amdgpu_ring *ring;
 105	int r;
 106	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 107
 108	/* UVD TRAP */
 109	r = amdgpu_irq_add_id(adev, 124, &adev->uvd.irq);
 110	if (r)
 111		return r;
 112
 
 
 
 
 
 
 
 
 
 113	r = amdgpu_uvd_sw_init(adev);
 114	if (r)
 115		return r;
 116
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 117	r = amdgpu_uvd_resume(adev);
 118	if (r)
 119		return r;
 120
 121	ring = &adev->uvd.ring;
 122	sprintf(ring->name, "uvd");
 123	r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0);
 
 
 
 
 
 
 
 
 
 
 124
 125	return r;
 126}
 127
 128static int uvd_v6_0_sw_fini(void *handle)
 129{
 130	int r;
 131	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 132
 133	r = amdgpu_uvd_suspend(adev);
 134	if (r)
 135		return r;
 136
 137	r = amdgpu_uvd_sw_fini(adev);
 138	if (r)
 139		return r;
 
 140
 141	return r;
 142}
 143
 144/**
 145 * uvd_v6_0_hw_init - start and test UVD block
 146 *
 147 * @adev: amdgpu_device pointer
 148 *
 149 * Initialize the hardware, boot up the VCPU and do some testing
 150 */
 151static int uvd_v6_0_hw_init(void *handle)
 152{
 153	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 154	struct amdgpu_ring *ring = &adev->uvd.ring;
 155	uint32_t tmp;
 156	int r;
 157
 158	r = uvd_v6_0_start(adev);
 159	if (r)
 160		goto done;
 161
 162	ring->ready = true;
 163	r = amdgpu_ring_test_ring(ring);
 164	if (r) {
 165		ring->ready = false;
 166		goto done;
 167	}
 168
 169	r = amdgpu_ring_alloc(ring, 10);
 170	if (r) {
 171		DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r);
 172		goto done;
 173	}
 174
 175	tmp = PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0);
 176	amdgpu_ring_write(ring, tmp);
 177	amdgpu_ring_write(ring, 0xFFFFF);
 178
 179	tmp = PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0);
 180	amdgpu_ring_write(ring, tmp);
 181	amdgpu_ring_write(ring, 0xFFFFF);
 182
 183	tmp = PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0);
 184	amdgpu_ring_write(ring, tmp);
 185	amdgpu_ring_write(ring, 0xFFFFF);
 186
 187	/* Clear timeout status bits */
 188	amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0));
 189	amdgpu_ring_write(ring, 0x8);
 190
 191	amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0));
 192	amdgpu_ring_write(ring, 3);
 193
 194	amdgpu_ring_commit(ring);
 195
 
 
 
 
 
 
 
 
 
 196done:
 197	if (!r)
 198		DRM_INFO("UVD initialized successfully.\n");
 
 
 
 
 199
 200	return r;
 201}
 202
 203/**
 204 * uvd_v6_0_hw_fini - stop the hardware block
 205 *
 206 * @adev: amdgpu_device pointer
 207 *
 208 * Stop the UVD block, mark ring as not ready any more
 209 */
 210static int uvd_v6_0_hw_fini(void *handle)
 211{
 212	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 213	struct amdgpu_ring *ring = &adev->uvd.ring;
 214
 215	uvd_v6_0_stop(adev);
 216	ring->ready = false;
 217
 218	return 0;
 219}
 220
 221static int uvd_v6_0_suspend(void *handle)
 222{
 223	int r;
 224	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 225
 226	r = uvd_v6_0_hw_fini(adev);
 227	if (r)
 228		return r;
 229
 230	/* Skip this for APU for now */
 231	if (!(adev->flags & AMD_IS_APU)) {
 232		r = amdgpu_uvd_suspend(adev);
 233		if (r)
 234			return r;
 235	}
 236
 237	return r;
 238}
 239
 240static int uvd_v6_0_resume(void *handle)
 241{
 242	int r;
 243	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 244
 245	/* Skip this for APU for now */
 246	if (!(adev->flags & AMD_IS_APU)) {
 247		r = amdgpu_uvd_resume(adev);
 248		if (r)
 249			return r;
 250	}
 251	r = uvd_v6_0_hw_init(adev);
 252	if (r)
 253		return r;
 254
 255	return r;
 256}
 257
 258/**
 259 * uvd_v6_0_mc_resume - memory controller programming
 260 *
 261 * @adev: amdgpu_device pointer
 262 *
 263 * Let the UVD memory controller know it's offsets
 264 */
 265static void uvd_v6_0_mc_resume(struct amdgpu_device *adev)
 266{
 267	uint64_t offset;
 268	uint32_t size;
 269
 270	/* programm memory controller bits 0-27 */
 271	WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
 272			lower_32_bits(adev->uvd.gpu_addr));
 273	WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
 274			upper_32_bits(adev->uvd.gpu_addr));
 275
 276	offset = AMDGPU_UVD_FIRMWARE_OFFSET;
 277	size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4);
 278	WREG32(mmUVD_VCPU_CACHE_OFFSET0, offset >> 3);
 279	WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
 280
 281	offset += size;
 282	size = AMDGPU_UVD_HEAP_SIZE;
 283	WREG32(mmUVD_VCPU_CACHE_OFFSET1, offset >> 3);
 284	WREG32(mmUVD_VCPU_CACHE_SIZE1, size);
 285
 286	offset += size;
 287	size = AMDGPU_UVD_STACK_SIZE +
 288	       (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles);
 289	WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3);
 290	WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
 291
 292	WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
 293	WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
 294	WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
 295
 296	WREG32(mmUVD_GP_SCRATCH4, adev->uvd.max_handles);
 297}
 298
 299#if 0
 300static void cz_set_uvd_clock_gating_branches(struct amdgpu_device *adev,
 301		bool enable)
 302{
 303	u32 data, data1;
 304
 305	data = RREG32(mmUVD_CGC_GATE);
 306	data1 = RREG32(mmUVD_SUVD_CGC_GATE);
 307	if (enable) {
 308		data |= UVD_CGC_GATE__SYS_MASK |
 309				UVD_CGC_GATE__UDEC_MASK |
 310				UVD_CGC_GATE__MPEG2_MASK |
 311				UVD_CGC_GATE__RBC_MASK |
 312				UVD_CGC_GATE__LMI_MC_MASK |
 313				UVD_CGC_GATE__IDCT_MASK |
 314				UVD_CGC_GATE__MPRD_MASK |
 315				UVD_CGC_GATE__MPC_MASK |
 316				UVD_CGC_GATE__LBSI_MASK |
 317				UVD_CGC_GATE__LRBBM_MASK |
 318				UVD_CGC_GATE__UDEC_RE_MASK |
 319				UVD_CGC_GATE__UDEC_CM_MASK |
 320				UVD_CGC_GATE__UDEC_IT_MASK |
 321				UVD_CGC_GATE__UDEC_DB_MASK |
 322				UVD_CGC_GATE__UDEC_MP_MASK |
 323				UVD_CGC_GATE__WCB_MASK |
 324				UVD_CGC_GATE__VCPU_MASK |
 325				UVD_CGC_GATE__SCPU_MASK;
 326		data1 |= UVD_SUVD_CGC_GATE__SRE_MASK |
 327				UVD_SUVD_CGC_GATE__SIT_MASK |
 328				UVD_SUVD_CGC_GATE__SMP_MASK |
 329				UVD_SUVD_CGC_GATE__SCM_MASK |
 330				UVD_SUVD_CGC_GATE__SDB_MASK |
 331				UVD_SUVD_CGC_GATE__SRE_H264_MASK |
 332				UVD_SUVD_CGC_GATE__SRE_HEVC_MASK |
 333				UVD_SUVD_CGC_GATE__SIT_H264_MASK |
 334				UVD_SUVD_CGC_GATE__SIT_HEVC_MASK |
 335				UVD_SUVD_CGC_GATE__SCM_H264_MASK |
 336				UVD_SUVD_CGC_GATE__SCM_HEVC_MASK |
 337				UVD_SUVD_CGC_GATE__SDB_H264_MASK |
 338				UVD_SUVD_CGC_GATE__SDB_HEVC_MASK;
 339	} else {
 340		data &= ~(UVD_CGC_GATE__SYS_MASK |
 341				UVD_CGC_GATE__UDEC_MASK |
 342				UVD_CGC_GATE__MPEG2_MASK |
 343				UVD_CGC_GATE__RBC_MASK |
 344				UVD_CGC_GATE__LMI_MC_MASK |
 345				UVD_CGC_GATE__LMI_UMC_MASK |
 346				UVD_CGC_GATE__IDCT_MASK |
 347				UVD_CGC_GATE__MPRD_MASK |
 348				UVD_CGC_GATE__MPC_MASK |
 349				UVD_CGC_GATE__LBSI_MASK |
 350				UVD_CGC_GATE__LRBBM_MASK |
 351				UVD_CGC_GATE__UDEC_RE_MASK |
 352				UVD_CGC_GATE__UDEC_CM_MASK |
 353				UVD_CGC_GATE__UDEC_IT_MASK |
 354				UVD_CGC_GATE__UDEC_DB_MASK |
 355				UVD_CGC_GATE__UDEC_MP_MASK |
 356				UVD_CGC_GATE__WCB_MASK |
 357				UVD_CGC_GATE__VCPU_MASK |
 358				UVD_CGC_GATE__SCPU_MASK);
 359		data1 &= ~(UVD_SUVD_CGC_GATE__SRE_MASK |
 360				UVD_SUVD_CGC_GATE__SIT_MASK |
 361				UVD_SUVD_CGC_GATE__SMP_MASK |
 362				UVD_SUVD_CGC_GATE__SCM_MASK |
 363				UVD_SUVD_CGC_GATE__SDB_MASK |
 364				UVD_SUVD_CGC_GATE__SRE_H264_MASK |
 365				UVD_SUVD_CGC_GATE__SRE_HEVC_MASK |
 366				UVD_SUVD_CGC_GATE__SIT_H264_MASK |
 367				UVD_SUVD_CGC_GATE__SIT_HEVC_MASK |
 368				UVD_SUVD_CGC_GATE__SCM_H264_MASK |
 369				UVD_SUVD_CGC_GATE__SCM_HEVC_MASK |
 370				UVD_SUVD_CGC_GATE__SDB_H264_MASK |
 371				UVD_SUVD_CGC_GATE__SDB_HEVC_MASK);
 372	}
 373	WREG32(mmUVD_CGC_GATE, data);
 374	WREG32(mmUVD_SUVD_CGC_GATE, data1);
 375}
 376#endif
 377
 378/**
 379 * uvd_v6_0_start - start UVD block
 380 *
 381 * @adev: amdgpu_device pointer
 382 *
 383 * Setup and start the UVD block
 384 */
 385static int uvd_v6_0_start(struct amdgpu_device *adev)
 386{
 387	struct amdgpu_ring *ring = &adev->uvd.ring;
 388	uint32_t rb_bufsz, tmp;
 389	uint32_t lmi_swap_cntl;
 390	uint32_t mp_swap_cntl;
 391	int i, j, r;
 392
 393	/* disable DPG */
 394	WREG32_P(mmUVD_POWER_STATUS, 0, ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
 395
 396	/* disable byte swapping */
 397	lmi_swap_cntl = 0;
 398	mp_swap_cntl = 0;
 399
 400	amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
 401	uvd_v6_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
 402	uvd_v6_0_enable_mgcg(adev, true);
 403	uvd_v6_0_mc_resume(adev);
 404
 405	/* disable interupt */
 406	WREG32_FIELD(UVD_MASTINT_EN, VCPU_EN, 0);
 407
 408	/* stall UMC and register bus before resetting VCPU */
 409	WREG32_FIELD(UVD_LMI_CTRL2, STALL_ARB_UMC, 1);
 410	mdelay(1);
 411
 412	/* put LMI, VCPU, RBC etc... into reset */
 413	WREG32(mmUVD_SOFT_RESET,
 414		UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
 415		UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
 416		UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
 417		UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
 418		UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
 419		UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
 420		UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
 421		UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
 422	mdelay(5);
 423
 424	/* take UVD block out of reset */
 425	WREG32_FIELD(SRBM_SOFT_RESET, SOFT_RESET_UVD, 0);
 426	mdelay(5);
 427
 428	/* initialize UVD memory controller */
 429	WREG32(mmUVD_LMI_CTRL,
 430		(0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
 431		UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
 432		UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
 433		UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
 434		UVD_LMI_CTRL__REQ_MODE_MASK |
 435		UVD_LMI_CTRL__DISABLE_ON_FWV_FAIL_MASK);
 436
 437#ifdef __BIG_ENDIAN
 438	/* swap (8 in 32) RB and IB */
 439	lmi_swap_cntl = 0xa;
 440	mp_swap_cntl = 0;
 441#endif
 442	WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
 443	WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
 444
 445	WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040);
 446	WREG32(mmUVD_MPC_SET_MUXA1, 0x0);
 447	WREG32(mmUVD_MPC_SET_MUXB0, 0x40c2040);
 448	WREG32(mmUVD_MPC_SET_MUXB1, 0x0);
 449	WREG32(mmUVD_MPC_SET_ALU, 0);
 450	WREG32(mmUVD_MPC_SET_MUX, 0x88);
 451
 452	/* take all subblocks out of reset, except VCPU */
 453	WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
 454	mdelay(5);
 455
 456	/* enable VCPU clock */
 457	WREG32(mmUVD_VCPU_CNTL, UVD_VCPU_CNTL__CLK_EN_MASK);
 458
 459	/* enable UMC */
 460	WREG32_FIELD(UVD_LMI_CTRL2, STALL_ARB_UMC, 0);
 461
 462	/* boot up the VCPU */
 463	WREG32(mmUVD_SOFT_RESET, 0);
 464	mdelay(10);
 465
 466	for (i = 0; i < 10; ++i) {
 467		uint32_t status;
 468
 469		for (j = 0; j < 100; ++j) {
 470			status = RREG32(mmUVD_STATUS);
 471			if (status & 2)
 472				break;
 473			mdelay(10);
 474		}
 475		r = 0;
 476		if (status & 2)
 477			break;
 478
 479		DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
 480		WREG32_FIELD(UVD_SOFT_RESET, VCPU_SOFT_RESET, 1);
 481		mdelay(10);
 482		WREG32_FIELD(UVD_SOFT_RESET, VCPU_SOFT_RESET, 0);
 483		mdelay(10);
 484		r = -1;
 485	}
 486
 487	if (r) {
 488		DRM_ERROR("UVD not responding, giving up!!!\n");
 489		return r;
 490	}
 491	/* enable master interrupt */
 492	WREG32_P(mmUVD_MASTINT_EN,
 493		(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
 494		~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
 495
 496	/* clear the bit 4 of UVD_STATUS */
 497	WREG32_P(mmUVD_STATUS, 0, ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
 498
 499	/* force RBC into idle state */
 500	rb_bufsz = order_base_2(ring->ring_size);
 501	tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
 502	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
 503	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
 504	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
 505	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
 506	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
 507	WREG32(mmUVD_RBC_RB_CNTL, tmp);
 508
 509	/* set the write pointer delay */
 510	WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0);
 511
 512	/* set the wb address */
 513	WREG32(mmUVD_RBC_RB_RPTR_ADDR, (upper_32_bits(ring->gpu_addr) >> 2));
 514
 515	/* programm the RB_BASE for ring buffer */
 516	WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
 517			lower_32_bits(ring->gpu_addr));
 518	WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
 519			upper_32_bits(ring->gpu_addr));
 520
 521	/* Initialize the ring buffer's read and write pointers */
 522	WREG32(mmUVD_RBC_RB_RPTR, 0);
 523
 524	ring->wptr = RREG32(mmUVD_RBC_RB_RPTR);
 525	WREG32(mmUVD_RBC_RB_WPTR, ring->wptr);
 526
 527	WREG32_FIELD(UVD_RBC_RB_CNTL, RB_NO_FETCH, 0);
 528
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 529	return 0;
 530}
 531
 532/**
 533 * uvd_v6_0_stop - stop UVD block
 534 *
 535 * @adev: amdgpu_device pointer
 536 *
 537 * stop the UVD block
 538 */
 539static void uvd_v6_0_stop(struct amdgpu_device *adev)
 540{
 541	/* force RBC into idle state */
 542	WREG32(mmUVD_RBC_RB_CNTL, 0x11010101);
 543
 544	/* Stall UMC and register bus before resetting VCPU */
 545	WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
 546	mdelay(1);
 547
 548	/* put VCPU into reset */
 549	WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
 550	mdelay(5);
 551
 552	/* disable VCPU clock */
 553	WREG32(mmUVD_VCPU_CNTL, 0x0);
 554
 555	/* Unstall UMC and register bus */
 556	WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
 
 
 557}
 558
 559/**
 560 * uvd_v6_0_ring_emit_fence - emit an fence & trap command
 561 *
 562 * @ring: amdgpu_ring pointer
 563 * @fence: fence to emit
 
 
 564 *
 565 * Write a fence and a trap command to the ring.
 566 */
 567static void uvd_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
 568				     unsigned flags)
 569{
 570	WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
 571
 572	amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
 573	amdgpu_ring_write(ring, seq);
 574	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
 575	amdgpu_ring_write(ring, addr & 0xffffffff);
 576	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
 577	amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
 578	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
 579	amdgpu_ring_write(ring, 0);
 580
 581	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
 582	amdgpu_ring_write(ring, 0);
 583	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
 584	amdgpu_ring_write(ring, 0);
 585	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
 586	amdgpu_ring_write(ring, 2);
 587}
 588
 589/**
 590 * uvd_v6_0_ring_emit_hdp_flush - emit an hdp flush
 591 *
 592 * @ring: amdgpu_ring pointer
 
 
 
 593 *
 594 * Emits an hdp flush.
 595 */
 596static void uvd_v6_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
 
 597{
 598	amdgpu_ring_write(ring, PACKET0(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0));
 599	amdgpu_ring_write(ring, 0);
 
 
 
 
 
 600}
 601
 602/**
 603 * uvd_v6_0_ring_hdp_invalidate - emit an hdp invalidate
 604 *
 605 * @ring: amdgpu_ring pointer
 606 *
 607 * Emits an hdp invalidate.
 608 */
 609static void uvd_v6_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
 610{
 611	amdgpu_ring_write(ring, PACKET0(mmHDP_DEBUG0, 0));
 612	amdgpu_ring_write(ring, 1);
 613}
 614
 615/**
 616 * uvd_v6_0_ring_test_ring - register write test
 617 *
 618 * @ring: amdgpu_ring pointer
 619 *
 620 * Test if we can successfully write to the context register
 621 */
 622static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring)
 623{
 624	struct amdgpu_device *adev = ring->adev;
 625	uint32_t tmp = 0;
 626	unsigned i;
 627	int r;
 628
 629	WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD);
 630	r = amdgpu_ring_alloc(ring, 3);
 631	if (r) {
 632		DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
 633			  ring->idx, r);
 634		return r;
 635	}
 636	amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
 637	amdgpu_ring_write(ring, 0xDEADBEEF);
 638	amdgpu_ring_commit(ring);
 639	for (i = 0; i < adev->usec_timeout; i++) {
 640		tmp = RREG32(mmUVD_CONTEXT_ID);
 641		if (tmp == 0xDEADBEEF)
 642			break;
 643		DRM_UDELAY(1);
 644	}
 645
 646	if (i < adev->usec_timeout) {
 647		DRM_INFO("ring test on %d succeeded in %d usecs\n",
 648			 ring->idx, i);
 649	} else {
 650		DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
 651			  ring->idx, tmp);
 652		r = -EINVAL;
 653	}
 654	return r;
 655}
 656
 657/**
 658 * uvd_v6_0_ring_emit_ib - execute indirect buffer
 659 *
 660 * @ring: amdgpu_ring pointer
 
 661 * @ib: indirect buffer to execute
 
 662 *
 663 * Write ring commands to execute the indirect buffer
 664 */
 665static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
 
 666				  struct amdgpu_ib *ib,
 667				  unsigned vm_id, bool ctx_switch)
 668{
 
 
 669	amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_VMID, 0));
 670	amdgpu_ring_write(ring, vm_id);
 671
 672	amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0));
 673	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
 674	amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH, 0));
 675	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
 676	amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0));
 677	amdgpu_ring_write(ring, ib->length_dw);
 678}
 679
 680static void uvd_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
 681					 unsigned vm_id, uint64_t pd_addr)
 
 
 
 
 
 
 
 
 
 
 
 
 682{
 683	uint32_t reg;
 684
 685	if (vm_id < 8)
 686		reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id;
 687	else
 688		reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8;
 
 
 689
 
 
 
 690	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
 691	amdgpu_ring_write(ring, reg << 2);
 692	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
 693	amdgpu_ring_write(ring, pd_addr >> 12);
 694	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
 695	amdgpu_ring_write(ring, 0x8);
 
 696
 697	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
 698	amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
 699	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
 700	amdgpu_ring_write(ring, 1 << vm_id);
 701	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
 702	amdgpu_ring_write(ring, 0x8);
 703
 704	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
 705	amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
 706	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
 707	amdgpu_ring_write(ring, 0);
 708	amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0));
 709	amdgpu_ring_write(ring, 1 << vm_id); /* mask */
 710	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
 711	amdgpu_ring_write(ring, 0xC);
 712}
 713
 714static void uvd_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
 715{
 716	uint32_t seq = ring->fence_drv.sync_seq;
 717	uint64_t addr = ring->fence_drv.gpu_addr;
 718
 719	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
 720	amdgpu_ring_write(ring, lower_32_bits(addr));
 721	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
 722	amdgpu_ring_write(ring, upper_32_bits(addr));
 723	amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0));
 724	amdgpu_ring_write(ring, 0xffffffff); /* mask */
 725	amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH9, 0));
 726	amdgpu_ring_write(ring, seq);
 727	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
 728	amdgpu_ring_write(ring, 0xE);
 729}
 730
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 731static bool uvd_v6_0_is_idle(void *handle)
 732{
 733	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 734
 735	return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
 736}
 737
 738static int uvd_v6_0_wait_for_idle(void *handle)
 739{
 740	unsigned i;
 741	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 742
 743	for (i = 0; i < adev->usec_timeout; i++) {
 744		if (uvd_v6_0_is_idle(handle))
 745			return 0;
 746	}
 747	return -ETIMEDOUT;
 748}
 749
 750#define AMDGPU_UVD_STATUS_BUSY_MASK    0xfd
 751static bool uvd_v6_0_check_soft_reset(void *handle)
 752{
 753	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 754	u32 srbm_soft_reset = 0;
 755	u32 tmp = RREG32(mmSRBM_STATUS);
 756
 757	if (REG_GET_FIELD(tmp, SRBM_STATUS, UVD_RQ_PENDING) ||
 758	    REG_GET_FIELD(tmp, SRBM_STATUS, UVD_BUSY) ||
 759	    (RREG32(mmUVD_STATUS) & AMDGPU_UVD_STATUS_BUSY_MASK))
 760		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_UVD, 1);
 761
 762	if (srbm_soft_reset) {
 763		adev->uvd.srbm_soft_reset = srbm_soft_reset;
 764		return true;
 765	} else {
 766		adev->uvd.srbm_soft_reset = 0;
 767		return false;
 768	}
 769}
 770
 771static int uvd_v6_0_pre_soft_reset(void *handle)
 772{
 773	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 774
 775	if (!adev->uvd.srbm_soft_reset)
 776		return 0;
 777
 778	uvd_v6_0_stop(adev);
 779	return 0;
 780}
 781
 782static int uvd_v6_0_soft_reset(void *handle)
 783{
 784	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 785	u32 srbm_soft_reset;
 786
 787	if (!adev->uvd.srbm_soft_reset)
 788		return 0;
 789	srbm_soft_reset = adev->uvd.srbm_soft_reset;
 790
 791	if (srbm_soft_reset) {
 792		u32 tmp;
 793
 794		tmp = RREG32(mmSRBM_SOFT_RESET);
 795		tmp |= srbm_soft_reset;
 796		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
 797		WREG32(mmSRBM_SOFT_RESET, tmp);
 798		tmp = RREG32(mmSRBM_SOFT_RESET);
 799
 800		udelay(50);
 801
 802		tmp &= ~srbm_soft_reset;
 803		WREG32(mmSRBM_SOFT_RESET, tmp);
 804		tmp = RREG32(mmSRBM_SOFT_RESET);
 805
 806		/* Wait a little for things to settle down */
 807		udelay(50);
 808	}
 809
 810	return 0;
 811}
 812
 813static int uvd_v6_0_post_soft_reset(void *handle)
 814{
 815	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 816
 817	if (!adev->uvd.srbm_soft_reset)
 818		return 0;
 819
 820	mdelay(5);
 821
 822	return uvd_v6_0_start(adev);
 823}
 824
 825static int uvd_v6_0_set_interrupt_state(struct amdgpu_device *adev,
 826					struct amdgpu_irq_src *source,
 827					unsigned type,
 828					enum amdgpu_interrupt_state state)
 829{
 830	// TODO
 831	return 0;
 832}
 833
 834static int uvd_v6_0_process_interrupt(struct amdgpu_device *adev,
 835				      struct amdgpu_irq_src *source,
 836				      struct amdgpu_iv_entry *entry)
 837{
 
 838	DRM_DEBUG("IH: UVD TRAP\n");
 839	amdgpu_fence_process(&adev->uvd.ring);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 840	return 0;
 841}
 842
 843static void uvd_v6_0_enable_clock_gating(struct amdgpu_device *adev, bool enable)
 844{
 845	uint32_t data1, data3;
 846
 847	data1 = RREG32(mmUVD_SUVD_CGC_GATE);
 848	data3 = RREG32(mmUVD_CGC_GATE);
 849
 850	data1 |= UVD_SUVD_CGC_GATE__SRE_MASK |
 851		     UVD_SUVD_CGC_GATE__SIT_MASK |
 852		     UVD_SUVD_CGC_GATE__SMP_MASK |
 853		     UVD_SUVD_CGC_GATE__SCM_MASK |
 854		     UVD_SUVD_CGC_GATE__SDB_MASK |
 855		     UVD_SUVD_CGC_GATE__SRE_H264_MASK |
 856		     UVD_SUVD_CGC_GATE__SRE_HEVC_MASK |
 857		     UVD_SUVD_CGC_GATE__SIT_H264_MASK |
 858		     UVD_SUVD_CGC_GATE__SIT_HEVC_MASK |
 859		     UVD_SUVD_CGC_GATE__SCM_H264_MASK |
 860		     UVD_SUVD_CGC_GATE__SCM_HEVC_MASK |
 861		     UVD_SUVD_CGC_GATE__SDB_H264_MASK |
 862		     UVD_SUVD_CGC_GATE__SDB_HEVC_MASK;
 863
 864	if (enable) {
 865		data3 |= (UVD_CGC_GATE__SYS_MASK       |
 866			UVD_CGC_GATE__UDEC_MASK      |
 867			UVD_CGC_GATE__MPEG2_MASK     |
 868			UVD_CGC_GATE__RBC_MASK       |
 869			UVD_CGC_GATE__LMI_MC_MASK    |
 870			UVD_CGC_GATE__LMI_UMC_MASK   |
 871			UVD_CGC_GATE__IDCT_MASK      |
 872			UVD_CGC_GATE__MPRD_MASK      |
 873			UVD_CGC_GATE__MPC_MASK       |
 874			UVD_CGC_GATE__LBSI_MASK      |
 875			UVD_CGC_GATE__LRBBM_MASK     |
 876			UVD_CGC_GATE__UDEC_RE_MASK   |
 877			UVD_CGC_GATE__UDEC_CM_MASK   |
 878			UVD_CGC_GATE__UDEC_IT_MASK   |
 879			UVD_CGC_GATE__UDEC_DB_MASK   |
 880			UVD_CGC_GATE__UDEC_MP_MASK   |
 881			UVD_CGC_GATE__WCB_MASK       |
 882			UVD_CGC_GATE__JPEG_MASK      |
 883			UVD_CGC_GATE__SCPU_MASK      |
 884			UVD_CGC_GATE__JPEG2_MASK);
 885		/* only in pg enabled, we can gate clock to vcpu*/
 886		if (adev->pg_flags & AMD_PG_SUPPORT_UVD)
 887			data3 |= UVD_CGC_GATE__VCPU_MASK;
 888
 889		data3 &= ~UVD_CGC_GATE__REGS_MASK;
 890	} else {
 891		data3 = 0;
 892	}
 893
 894	WREG32(mmUVD_SUVD_CGC_GATE, data1);
 895	WREG32(mmUVD_CGC_GATE, data3);
 896}
 897
 898static void uvd_v6_0_set_sw_clock_gating(struct amdgpu_device *adev)
 899{
 900	uint32_t data, data2;
 901
 902	data = RREG32(mmUVD_CGC_CTRL);
 903	data2 = RREG32(mmUVD_SUVD_CGC_CTRL);
 904
 905
 906	data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK |
 907		  UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
 908
 909
 910	data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
 911		(1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) |
 912		(4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY));
 913
 914	data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
 915			UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
 916			UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
 917			UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
 918			UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
 919			UVD_CGC_CTRL__SYS_MODE_MASK |
 920			UVD_CGC_CTRL__UDEC_MODE_MASK |
 921			UVD_CGC_CTRL__MPEG2_MODE_MASK |
 922			UVD_CGC_CTRL__REGS_MODE_MASK |
 923			UVD_CGC_CTRL__RBC_MODE_MASK |
 924			UVD_CGC_CTRL__LMI_MC_MODE_MASK |
 925			UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
 926			UVD_CGC_CTRL__IDCT_MODE_MASK |
 927			UVD_CGC_CTRL__MPRD_MODE_MASK |
 928			UVD_CGC_CTRL__MPC_MODE_MASK |
 929			UVD_CGC_CTRL__LBSI_MODE_MASK |
 930			UVD_CGC_CTRL__LRBBM_MODE_MASK |
 931			UVD_CGC_CTRL__WCB_MODE_MASK |
 932			UVD_CGC_CTRL__VCPU_MODE_MASK |
 933			UVD_CGC_CTRL__JPEG_MODE_MASK |
 934			UVD_CGC_CTRL__SCPU_MODE_MASK |
 935			UVD_CGC_CTRL__JPEG2_MODE_MASK);
 936	data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK |
 937			UVD_SUVD_CGC_CTRL__SIT_MODE_MASK |
 938			UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
 939			UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
 940			UVD_SUVD_CGC_CTRL__SDB_MODE_MASK);
 941
 942	WREG32(mmUVD_CGC_CTRL, data);
 943	WREG32(mmUVD_SUVD_CGC_CTRL, data2);
 944}
 945
 946#if 0
 947static void uvd_v6_0_set_hw_clock_gating(struct amdgpu_device *adev)
 948{
 949	uint32_t data, data1, cgc_flags, suvd_flags;
 950
 951	data = RREG32(mmUVD_CGC_GATE);
 952	data1 = RREG32(mmUVD_SUVD_CGC_GATE);
 953
 954	cgc_flags = UVD_CGC_GATE__SYS_MASK |
 955		UVD_CGC_GATE__UDEC_MASK |
 956		UVD_CGC_GATE__MPEG2_MASK |
 957		UVD_CGC_GATE__RBC_MASK |
 958		UVD_CGC_GATE__LMI_MC_MASK |
 959		UVD_CGC_GATE__IDCT_MASK |
 960		UVD_CGC_GATE__MPRD_MASK |
 961		UVD_CGC_GATE__MPC_MASK |
 962		UVD_CGC_GATE__LBSI_MASK |
 963		UVD_CGC_GATE__LRBBM_MASK |
 964		UVD_CGC_GATE__UDEC_RE_MASK |
 965		UVD_CGC_GATE__UDEC_CM_MASK |
 966		UVD_CGC_GATE__UDEC_IT_MASK |
 967		UVD_CGC_GATE__UDEC_DB_MASK |
 968		UVD_CGC_GATE__UDEC_MP_MASK |
 969		UVD_CGC_GATE__WCB_MASK |
 970		UVD_CGC_GATE__VCPU_MASK |
 971		UVD_CGC_GATE__SCPU_MASK |
 972		UVD_CGC_GATE__JPEG_MASK |
 973		UVD_CGC_GATE__JPEG2_MASK;
 974
 975	suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
 976				UVD_SUVD_CGC_GATE__SIT_MASK |
 977				UVD_SUVD_CGC_GATE__SMP_MASK |
 978				UVD_SUVD_CGC_GATE__SCM_MASK |
 979				UVD_SUVD_CGC_GATE__SDB_MASK;
 980
 981	data |= cgc_flags;
 982	data1 |= suvd_flags;
 983
 984	WREG32(mmUVD_CGC_GATE, data);
 985	WREG32(mmUVD_SUVD_CGC_GATE, data1);
 986}
 987#endif
 988
 989static void uvd_v6_0_enable_mgcg(struct amdgpu_device *adev,
 990				 bool enable)
 991{
 992	u32 orig, data;
 993
 994	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) {
 995		data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
 996		data |= 0xfff;
 997		WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
 998
 999		orig = data = RREG32(mmUVD_CGC_CTRL);
1000		data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
1001		if (orig != data)
1002			WREG32(mmUVD_CGC_CTRL, data);
1003	} else {
1004		data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
1005		data &= ~0xfff;
1006		WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
1007
1008		orig = data = RREG32(mmUVD_CGC_CTRL);
1009		data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
1010		if (orig != data)
1011			WREG32(mmUVD_CGC_CTRL, data);
1012	}
1013}
1014
1015static int uvd_v6_0_set_clockgating_state(void *handle,
1016					  enum amd_clockgating_state state)
1017{
1018	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1019	bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
1020
1021	if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
1022		return 0;
1023
1024	if (enable) {
1025		/* wait for STATUS to clear */
1026		if (uvd_v6_0_wait_for_idle(handle))
1027			return -EBUSY;
1028		uvd_v6_0_enable_clock_gating(adev, true);
1029		/* enable HW gates because UVD is idle */
1030/*		uvd_v6_0_set_hw_clock_gating(adev); */
1031	} else {
1032		/* disable HW gating and enable Sw gating */
1033		uvd_v6_0_enable_clock_gating(adev, false);
1034	}
1035	uvd_v6_0_set_sw_clock_gating(adev);
1036	return 0;
1037}
1038
1039static int uvd_v6_0_set_powergating_state(void *handle,
1040					  enum amd_powergating_state state)
1041{
1042	/* This doesn't actually powergate the UVD block.
1043	 * That's done in the dpm code via the SMC.  This
1044	 * just re-inits the block as necessary.  The actual
1045	 * gating still happens in the dpm code.  We should
1046	 * revisit this when there is a cleaner line between
1047	 * the smc and the hw blocks
1048	 */
1049	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1050
1051	if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
1052		return 0;
1053
1054	WREG32(mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK);
1055
1056	if (state == AMD_PG_STATE_GATE) {
1057		uvd_v6_0_stop(adev);
1058		return 0;
1059	} else {
1060		return uvd_v6_0_start(adev);
 
 
1061	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1062}
1063
1064static const struct amd_ip_funcs uvd_v6_0_ip_funcs = {
1065	.name = "uvd_v6_0",
1066	.early_init = uvd_v6_0_early_init,
1067	.late_init = NULL,
1068	.sw_init = uvd_v6_0_sw_init,
1069	.sw_fini = uvd_v6_0_sw_fini,
1070	.hw_init = uvd_v6_0_hw_init,
1071	.hw_fini = uvd_v6_0_hw_fini,
1072	.suspend = uvd_v6_0_suspend,
1073	.resume = uvd_v6_0_resume,
1074	.is_idle = uvd_v6_0_is_idle,
1075	.wait_for_idle = uvd_v6_0_wait_for_idle,
1076	.check_soft_reset = uvd_v6_0_check_soft_reset,
1077	.pre_soft_reset = uvd_v6_0_pre_soft_reset,
1078	.soft_reset = uvd_v6_0_soft_reset,
1079	.post_soft_reset = uvd_v6_0_post_soft_reset,
1080	.set_clockgating_state = uvd_v6_0_set_clockgating_state,
1081	.set_powergating_state = uvd_v6_0_set_powergating_state,
 
1082};
1083
1084static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = {
1085	.type = AMDGPU_RING_TYPE_UVD,
1086	.align_mask = 0xf,
1087	.nop = PACKET0(mmUVD_NO_OP, 0),
 
1088	.get_rptr = uvd_v6_0_ring_get_rptr,
1089	.get_wptr = uvd_v6_0_ring_get_wptr,
1090	.set_wptr = uvd_v6_0_ring_set_wptr,
1091	.parse_cs = amdgpu_uvd_ring_parse_cs,
1092	.emit_frame_size =
1093		2 + /* uvd_v6_0_ring_emit_hdp_flush */
1094		2 + /* uvd_v6_0_ring_emit_hdp_invalidate */
1095		10 + /* uvd_v6_0_ring_emit_pipeline_sync */
1096		14, /* uvd_v6_0_ring_emit_fence x1 no user fence */
1097	.emit_ib_size = 8, /* uvd_v6_0_ring_emit_ib */
1098	.emit_ib = uvd_v6_0_ring_emit_ib,
1099	.emit_fence = uvd_v6_0_ring_emit_fence,
1100	.emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
1101	.emit_hdp_invalidate = uvd_v6_0_ring_emit_hdp_invalidate,
1102	.test_ring = uvd_v6_0_ring_test_ring,
1103	.test_ib = amdgpu_uvd_ring_test_ib,
1104	.insert_nop = amdgpu_ring_insert_nop,
1105	.pad_ib = amdgpu_ring_generic_pad_ib,
1106	.begin_use = amdgpu_uvd_ring_begin_use,
1107	.end_use = amdgpu_uvd_ring_end_use,
 
1108};
1109
1110static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
1111	.type = AMDGPU_RING_TYPE_UVD,
1112	.align_mask = 0xf,
1113	.nop = PACKET0(mmUVD_NO_OP, 0),
 
1114	.get_rptr = uvd_v6_0_ring_get_rptr,
1115	.get_wptr = uvd_v6_0_ring_get_wptr,
1116	.set_wptr = uvd_v6_0_ring_set_wptr,
1117	.emit_frame_size =
1118		2 + /* uvd_v6_0_ring_emit_hdp_flush */
1119		2 + /* uvd_v6_0_ring_emit_hdp_invalidate */
1120		10 + /* uvd_v6_0_ring_emit_pipeline_sync */
1121		20 + /* uvd_v6_0_ring_emit_vm_flush */
1122		14 + 14, /* uvd_v6_0_ring_emit_fence x2 vm fence */
1123	.emit_ib_size = 8, /* uvd_v6_0_ring_emit_ib */
1124	.emit_ib = uvd_v6_0_ring_emit_ib,
1125	.emit_fence = uvd_v6_0_ring_emit_fence,
1126	.emit_vm_flush = uvd_v6_0_ring_emit_vm_flush,
1127	.emit_pipeline_sync = uvd_v6_0_ring_emit_pipeline_sync,
1128	.emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
1129	.emit_hdp_invalidate = uvd_v6_0_ring_emit_hdp_invalidate,
1130	.test_ring = uvd_v6_0_ring_test_ring,
1131	.test_ib = amdgpu_uvd_ring_test_ib,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1132	.insert_nop = amdgpu_ring_insert_nop,
 
1133	.pad_ib = amdgpu_ring_generic_pad_ib,
1134	.begin_use = amdgpu_uvd_ring_begin_use,
1135	.end_use = amdgpu_uvd_ring_end_use,
1136};
1137
1138static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev)
1139{
1140	if (adev->asic_type >= CHIP_POLARIS10) {
1141		adev->uvd.ring.funcs = &uvd_v6_0_ring_vm_funcs;
1142		DRM_INFO("UVD is enabled in VM mode\n");
1143	} else {
1144		adev->uvd.ring.funcs = &uvd_v6_0_ring_phys_funcs;
1145		DRM_INFO("UVD is enabled in physical mode\n");
1146	}
1147}
1148
 
 
 
 
 
 
 
 
 
 
1149static const struct amdgpu_irq_src_funcs uvd_v6_0_irq_funcs = {
1150	.set = uvd_v6_0_set_interrupt_state,
1151	.process = uvd_v6_0_process_interrupt,
1152};
1153
1154static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev)
1155{
1156	adev->uvd.irq.num_types = 1;
1157	adev->uvd.irq.funcs = &uvd_v6_0_irq_funcs;
 
 
 
 
1158}
1159
1160const struct amdgpu_ip_block_version uvd_v6_0_ip_block =
1161{
1162		.type = AMD_IP_BLOCK_TYPE_UVD,
1163		.major = 6,
1164		.minor = 0,
1165		.rev = 0,
1166		.funcs = &uvd_v6_0_ip_funcs,
1167};
1168
1169const struct amdgpu_ip_block_version uvd_v6_2_ip_block =
1170{
1171		.type = AMD_IP_BLOCK_TYPE_UVD,
1172		.major = 6,
1173		.minor = 2,
1174		.rev = 0,
1175		.funcs = &uvd_v6_0_ip_funcs,
1176};
1177
1178const struct amdgpu_ip_block_version uvd_v6_3_ip_block =
1179{
1180		.type = AMD_IP_BLOCK_TYPE_UVD,
1181		.major = 6,
1182		.minor = 3,
1183		.rev = 0,
1184		.funcs = &uvd_v6_0_ip_funcs,
1185};
v5.14.15
   1/*
   2 * Copyright 2014 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 * Authors: Christian König <christian.koenig@amd.com>
  23 */
  24
  25#include <linux/firmware.h>
  26
  27#include "amdgpu.h"
  28#include "amdgpu_uvd.h"
  29#include "vid.h"
  30#include "uvd/uvd_6_0_d.h"
  31#include "uvd/uvd_6_0_sh_mask.h"
  32#include "oss/oss_2_0_d.h"
  33#include "oss/oss_2_0_sh_mask.h"
  34#include "smu/smu_7_1_3_d.h"
  35#include "smu/smu_7_1_3_sh_mask.h"
  36#include "bif/bif_5_1_d.h"
  37#include "gmc/gmc_8_1_d.h"
  38#include "vi.h"
  39#include "ivsrcid/ivsrcid_vislands30.h"
  40
  41/* Polaris10/11/12 firmware version */
  42#define FW_1_130_16 ((1 << 24) | (130 << 16) | (16 << 8))
  43
  44static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev);
  45static void uvd_v6_0_set_enc_ring_funcs(struct amdgpu_device *adev);
  46
  47static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev);
  48static int uvd_v6_0_start(struct amdgpu_device *adev);
  49static void uvd_v6_0_stop(struct amdgpu_device *adev);
  50static void uvd_v6_0_set_sw_clock_gating(struct amdgpu_device *adev);
  51static int uvd_v6_0_set_clockgating_state(void *handle,
  52					  enum amd_clockgating_state state);
  53static void uvd_v6_0_enable_mgcg(struct amdgpu_device *adev,
  54				 bool enable);
  55
  56/**
  57* uvd_v6_0_enc_support - get encode support status
  58*
  59* @adev: amdgpu_device pointer
  60*
  61* Returns the current hardware encode support status
  62*/
  63static inline bool uvd_v6_0_enc_support(struct amdgpu_device *adev)
  64{
  65	return ((adev->asic_type >= CHIP_POLARIS10) &&
  66			(adev->asic_type <= CHIP_VEGAM) &&
  67			(!adev->uvd.fw_version || adev->uvd.fw_version >= FW_1_130_16));
  68}
  69
  70/**
  71 * uvd_v6_0_ring_get_rptr - get read pointer
  72 *
  73 * @ring: amdgpu_ring pointer
  74 *
  75 * Returns the current hardware read pointer
  76 */
  77static uint64_t uvd_v6_0_ring_get_rptr(struct amdgpu_ring *ring)
  78{
  79	struct amdgpu_device *adev = ring->adev;
  80
  81	return RREG32(mmUVD_RBC_RB_RPTR);
  82}
  83
  84/**
  85 * uvd_v6_0_enc_ring_get_rptr - get enc read pointer
  86 *
  87 * @ring: amdgpu_ring pointer
  88 *
  89 * Returns the current hardware enc read pointer
  90 */
  91static uint64_t uvd_v6_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
  92{
  93	struct amdgpu_device *adev = ring->adev;
  94
  95	if (ring == &adev->uvd.inst->ring_enc[0])
  96		return RREG32(mmUVD_RB_RPTR);
  97	else
  98		return RREG32(mmUVD_RB_RPTR2);
  99}
 100/**
 101 * uvd_v6_0_ring_get_wptr - get write pointer
 102 *
 103 * @ring: amdgpu_ring pointer
 104 *
 105 * Returns the current hardware write pointer
 106 */
 107static uint64_t uvd_v6_0_ring_get_wptr(struct amdgpu_ring *ring)
 108{
 109	struct amdgpu_device *adev = ring->adev;
 110
 111	return RREG32(mmUVD_RBC_RB_WPTR);
 112}
 113
 114/**
 115 * uvd_v6_0_enc_ring_get_wptr - get enc write pointer
 116 *
 117 * @ring: amdgpu_ring pointer
 118 *
 119 * Returns the current hardware enc write pointer
 120 */
 121static uint64_t uvd_v6_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
 122{
 123	struct amdgpu_device *adev = ring->adev;
 124
 125	if (ring == &adev->uvd.inst->ring_enc[0])
 126		return RREG32(mmUVD_RB_WPTR);
 127	else
 128		return RREG32(mmUVD_RB_WPTR2);
 129}
 130
 131/**
 132 * uvd_v6_0_ring_set_wptr - set write pointer
 133 *
 134 * @ring: amdgpu_ring pointer
 135 *
 136 * Commits the write pointer to the hardware
 137 */
 138static void uvd_v6_0_ring_set_wptr(struct amdgpu_ring *ring)
 139{
 140	struct amdgpu_device *adev = ring->adev;
 141
 142	WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
 143}
 144
 145/**
 146 * uvd_v6_0_enc_ring_set_wptr - set enc write pointer
 147 *
 148 * @ring: amdgpu_ring pointer
 149 *
 150 * Commits the enc write pointer to the hardware
 151 */
 152static void uvd_v6_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
 153{
 154	struct amdgpu_device *adev = ring->adev;
 155
 156	if (ring == &adev->uvd.inst->ring_enc[0])
 157		WREG32(mmUVD_RB_WPTR,
 158			lower_32_bits(ring->wptr));
 159	else
 160		WREG32(mmUVD_RB_WPTR2,
 161			lower_32_bits(ring->wptr));
 162}
 163
 164/**
 165 * uvd_v6_0_enc_ring_test_ring - test if UVD ENC ring is working
 166 *
 167 * @ring: the engine to test on
 168 *
 169 */
 170static int uvd_v6_0_enc_ring_test_ring(struct amdgpu_ring *ring)
 171{
 172	struct amdgpu_device *adev = ring->adev;
 173	uint32_t rptr;
 174	unsigned i;
 175	int r;
 176
 177	r = amdgpu_ring_alloc(ring, 16);
 178	if (r)
 179		return r;
 180
 181	rptr = amdgpu_ring_get_rptr(ring);
 182
 183	amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
 184	amdgpu_ring_commit(ring);
 185
 186	for (i = 0; i < adev->usec_timeout; i++) {
 187		if (amdgpu_ring_get_rptr(ring) != rptr)
 188			break;
 189		udelay(1);
 190	}
 191
 192	if (i >= adev->usec_timeout)
 193		r = -ETIMEDOUT;
 194
 195	return r;
 196}
 197
 198/**
 199 * uvd_v6_0_enc_get_create_msg - generate a UVD ENC create msg
 200 *
 201 * @ring: ring we should submit the msg to
 202 * @handle: session handle to use
 203 * @bo: amdgpu object for which we query the offset
 204 * @fence: optional fence to return
 205 *
 206 * Open up a stream for HW test
 207 */
 208static int uvd_v6_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
 209				       struct amdgpu_bo *bo,
 210				       struct dma_fence **fence)
 211{
 212	const unsigned ib_size_dw = 16;
 213	struct amdgpu_job *job;
 214	struct amdgpu_ib *ib;
 215	struct dma_fence *f = NULL;
 216	uint64_t addr;
 217	int i, r;
 218
 219	r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
 220					AMDGPU_IB_POOL_DIRECT, &job);
 221	if (r)
 222		return r;
 223
 224	ib = &job->ibs[0];
 225	addr = amdgpu_bo_gpu_offset(bo);
 226
 227	ib->length_dw = 0;
 228	ib->ptr[ib->length_dw++] = 0x00000018;
 229	ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
 230	ib->ptr[ib->length_dw++] = handle;
 231	ib->ptr[ib->length_dw++] = 0x00010000;
 232	ib->ptr[ib->length_dw++] = upper_32_bits(addr);
 233	ib->ptr[ib->length_dw++] = addr;
 234
 235	ib->ptr[ib->length_dw++] = 0x00000014;
 236	ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
 237	ib->ptr[ib->length_dw++] = 0x0000001c;
 238	ib->ptr[ib->length_dw++] = 0x00000001;
 239	ib->ptr[ib->length_dw++] = 0x00000000;
 240
 241	ib->ptr[ib->length_dw++] = 0x00000008;
 242	ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */
 243
 244	for (i = ib->length_dw; i < ib_size_dw; ++i)
 245		ib->ptr[i] = 0x0;
 246
 247	r = amdgpu_job_submit_direct(job, ring, &f);
 248	if (r)
 249		goto err;
 250
 251	if (fence)
 252		*fence = dma_fence_get(f);
 253	dma_fence_put(f);
 254	return 0;
 255
 256err:
 257	amdgpu_job_free(job);
 258	return r;
 259}
 260
 261/**
 262 * uvd_v6_0_enc_get_destroy_msg - generate a UVD ENC destroy msg
 263 *
 264 * @ring: ring we should submit the msg to
 265 * @handle: session handle to use
 266 * @bo: amdgpu object for which we query the offset
 267 * @fence: optional fence to return
 268 *
 269 * Close up a stream for HW test or if userspace failed to do so
 270 */
 271static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring,
 272					uint32_t handle,
 273					struct amdgpu_bo *bo,
 274					struct dma_fence **fence)
 275{
 276	const unsigned ib_size_dw = 16;
 277	struct amdgpu_job *job;
 278	struct amdgpu_ib *ib;
 279	struct dma_fence *f = NULL;
 280	uint64_t addr;
 281	int i, r;
 282
 283	r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
 284					AMDGPU_IB_POOL_DIRECT, &job);
 285	if (r)
 286		return r;
 287
 288	ib = &job->ibs[0];
 289	addr = amdgpu_bo_gpu_offset(bo);
 290
 291	ib->length_dw = 0;
 292	ib->ptr[ib->length_dw++] = 0x00000018;
 293	ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
 294	ib->ptr[ib->length_dw++] = handle;
 295	ib->ptr[ib->length_dw++] = 0x00010000;
 296	ib->ptr[ib->length_dw++] = upper_32_bits(addr);
 297	ib->ptr[ib->length_dw++] = addr;
 298
 299	ib->ptr[ib->length_dw++] = 0x00000014;
 300	ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
 301	ib->ptr[ib->length_dw++] = 0x0000001c;
 302	ib->ptr[ib->length_dw++] = 0x00000001;
 303	ib->ptr[ib->length_dw++] = 0x00000000;
 304
 305	ib->ptr[ib->length_dw++] = 0x00000008;
 306	ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */
 307
 308	for (i = ib->length_dw; i < ib_size_dw; ++i)
 309		ib->ptr[i] = 0x0;
 310
 311	r = amdgpu_job_submit_direct(job, ring, &f);
 312	if (r)
 313		goto err;
 314
 315	if (fence)
 316		*fence = dma_fence_get(f);
 317	dma_fence_put(f);
 318	return 0;
 319
 320err:
 321	amdgpu_job_free(job);
 322	return r;
 323}
 324
 325/**
 326 * uvd_v6_0_enc_ring_test_ib - test if UVD ENC IBs are working
 327 *
 328 * @ring: the engine to test on
 329 * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
 330 *
 331 */
 332static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 333{
 334	struct dma_fence *fence = NULL;
 335	struct amdgpu_bo *bo = NULL;
 336	long r;
 337
 338	r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE,
 339				      AMDGPU_GEM_DOMAIN_VRAM,
 340				      &bo, NULL, NULL);
 341	if (r)
 342		return r;
 343
 344	r = uvd_v6_0_enc_get_create_msg(ring, 1, bo, NULL);
 345	if (r)
 346		goto error;
 347
 348	r = uvd_v6_0_enc_get_destroy_msg(ring, 1, bo, &fence);
 349	if (r)
 350		goto error;
 351
 352	r = dma_fence_wait_timeout(fence, false, timeout);
 353	if (r == 0)
 354		r = -ETIMEDOUT;
 355	else if (r > 0)
 356		r = 0;
 357
 358error:
 359	dma_fence_put(fence);
 360	amdgpu_bo_unpin(bo);
 361	amdgpu_bo_unreserve(bo);
 362	amdgpu_bo_unref(&bo);
 363	return r;
 364}
 365
 366static int uvd_v6_0_early_init(void *handle)
 367{
 368	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 369	adev->uvd.num_uvd_inst = 1;
 370
 371	if (!(adev->flags & AMD_IS_APU) &&
 372	    (RREG32_SMC(ixCC_HARVEST_FUSES) & CC_HARVEST_FUSES__UVD_DISABLE_MASK))
 373		return -ENOENT;
 374
 375	uvd_v6_0_set_ring_funcs(adev);
 376
 377	if (uvd_v6_0_enc_support(adev)) {
 378		adev->uvd.num_enc_rings = 2;
 379		uvd_v6_0_set_enc_ring_funcs(adev);
 380	}
 381
 382	uvd_v6_0_set_irq_funcs(adev);
 383
 384	return 0;
 385}
 386
 387static int uvd_v6_0_sw_init(void *handle)
 388{
 389	struct amdgpu_ring *ring;
 390	int i, r;
 391	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 392
 393	/* UVD TRAP */
 394	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_UVD_SYSTEM_MESSAGE, &adev->uvd.inst->irq);
 395	if (r)
 396		return r;
 397
 398	/* UVD ENC TRAP */
 399	if (uvd_v6_0_enc_support(adev)) {
 400		for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
 401			r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + VISLANDS30_IV_SRCID_UVD_ENC_GEN_PURP, &adev->uvd.inst->irq);
 402			if (r)
 403				return r;
 404		}
 405	}
 406
 407	r = amdgpu_uvd_sw_init(adev);
 408	if (r)
 409		return r;
 410
 411	if (!uvd_v6_0_enc_support(adev)) {
 412		for (i = 0; i < adev->uvd.num_enc_rings; ++i)
 413			adev->uvd.inst->ring_enc[i].funcs = NULL;
 414
 415		adev->uvd.inst->irq.num_types = 1;
 416		adev->uvd.num_enc_rings = 0;
 417
 418		DRM_INFO("UVD ENC is disabled\n");
 419	}
 420
 421	ring = &adev->uvd.inst->ring;
 422	sprintf(ring->name, "uvd");
 423	r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0,
 424			     AMDGPU_RING_PRIO_DEFAULT, NULL);
 425	if (r)
 426		return r;
 427
 428	r = amdgpu_uvd_resume(adev);
 429	if (r)
 430		return r;
 431
 432	if (uvd_v6_0_enc_support(adev)) {
 433		for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
 434			ring = &adev->uvd.inst->ring_enc[i];
 435			sprintf(ring->name, "uvd_enc%d", i);
 436			r = amdgpu_ring_init(adev, ring, 512,
 437					     &adev->uvd.inst->irq, 0,
 438					     AMDGPU_RING_PRIO_DEFAULT, NULL);
 439			if (r)
 440				return r;
 441		}
 442	}
 443
 444	r = amdgpu_uvd_entity_init(adev);
 445
 446	return r;
 447}
 448
 449static int uvd_v6_0_sw_fini(void *handle)
 450{
 451	int i, r;
 452	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 453
 454	r = amdgpu_uvd_suspend(adev);
 455	if (r)
 456		return r;
 457
 458	if (uvd_v6_0_enc_support(adev)) {
 459		for (i = 0; i < adev->uvd.num_enc_rings; ++i)
 460			amdgpu_ring_fini(&adev->uvd.inst->ring_enc[i]);
 461	}
 462
 463	return amdgpu_uvd_sw_fini(adev);
 464}
 465
 466/**
 467 * uvd_v6_0_hw_init - start and test UVD block
 468 *
 469 * @handle: handle used to pass amdgpu_device pointer
 470 *
 471 * Initialize the hardware, boot up the VCPU and do some testing
 472 */
 473static int uvd_v6_0_hw_init(void *handle)
 474{
 475	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 476	struct amdgpu_ring *ring = &adev->uvd.inst->ring;
 477	uint32_t tmp;
 478	int i, r;
 479
 480	amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
 481	uvd_v6_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
 482	uvd_v6_0_enable_mgcg(adev, true);
 483
 484	r = amdgpu_ring_test_helper(ring);
 485	if (r)
 
 
 486		goto done;
 
 487
 488	r = amdgpu_ring_alloc(ring, 10);
 489	if (r) {
 490		DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r);
 491		goto done;
 492	}
 493
 494	tmp = PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0);
 495	amdgpu_ring_write(ring, tmp);
 496	amdgpu_ring_write(ring, 0xFFFFF);
 497
 498	tmp = PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0);
 499	amdgpu_ring_write(ring, tmp);
 500	amdgpu_ring_write(ring, 0xFFFFF);
 501
 502	tmp = PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0);
 503	amdgpu_ring_write(ring, tmp);
 504	amdgpu_ring_write(ring, 0xFFFFF);
 505
 506	/* Clear timeout status bits */
 507	amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0));
 508	amdgpu_ring_write(ring, 0x8);
 509
 510	amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0));
 511	amdgpu_ring_write(ring, 3);
 512
 513	amdgpu_ring_commit(ring);
 514
 515	if (uvd_v6_0_enc_support(adev)) {
 516		for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
 517			ring = &adev->uvd.inst->ring_enc[i];
 518			r = amdgpu_ring_test_helper(ring);
 519			if (r)
 520				goto done;
 521		}
 522	}
 523
 524done:
 525	if (!r) {
 526		if (uvd_v6_0_enc_support(adev))
 527			DRM_INFO("UVD and UVD ENC initialized successfully.\n");
 528		else
 529			DRM_INFO("UVD initialized successfully.\n");
 530	}
 531
 532	return r;
 533}
 534
 535/**
 536 * uvd_v6_0_hw_fini - stop the hardware block
 537 *
 538 * @handle: handle used to pass amdgpu_device pointer
 539 *
 540 * Stop the UVD block, mark ring as not ready any more
 541 */
 542static int uvd_v6_0_hw_fini(void *handle)
 543{
 544	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
 545
 546	if (RREG32(mmUVD_STATUS) != 0)
 547		uvd_v6_0_stop(adev);
 548
 549	return 0;
 550}
 551
 552static int uvd_v6_0_suspend(void *handle)
 553{
 554	int r;
 555	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 556
 557	r = uvd_v6_0_hw_fini(adev);
 558	if (r)
 559		return r;
 560
 561	return amdgpu_uvd_suspend(adev);
 
 
 
 
 
 
 
 562}
 563
 564static int uvd_v6_0_resume(void *handle)
 565{
 566	int r;
 567	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 568
 569	r = amdgpu_uvd_resume(adev);
 
 
 
 
 
 
 570	if (r)
 571		return r;
 572
 573	return uvd_v6_0_hw_init(adev);
 574}
 575
 576/**
 577 * uvd_v6_0_mc_resume - memory controller programming
 578 *
 579 * @adev: amdgpu_device pointer
 580 *
 581 * Let the UVD memory controller know it's offsets
 582 */
 583static void uvd_v6_0_mc_resume(struct amdgpu_device *adev)
 584{
 585	uint64_t offset;
 586	uint32_t size;
 587
 588	/* program memory controller bits 0-27 */
 589	WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
 590			lower_32_bits(adev->uvd.inst->gpu_addr));
 591	WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
 592			upper_32_bits(adev->uvd.inst->gpu_addr));
 593
 594	offset = AMDGPU_UVD_FIRMWARE_OFFSET;
 595	size = AMDGPU_UVD_FIRMWARE_SIZE(adev);
 596	WREG32(mmUVD_VCPU_CACHE_OFFSET0, offset >> 3);
 597	WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
 598
 599	offset += size;
 600	size = AMDGPU_UVD_HEAP_SIZE;
 601	WREG32(mmUVD_VCPU_CACHE_OFFSET1, offset >> 3);
 602	WREG32(mmUVD_VCPU_CACHE_SIZE1, size);
 603
 604	offset += size;
 605	size = AMDGPU_UVD_STACK_SIZE +
 606	       (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles);
 607	WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3);
 608	WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
 609
 610	WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
 611	WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
 612	WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
 613
 614	WREG32(mmUVD_GP_SCRATCH4, adev->uvd.max_handles);
 615}
 616
 617#if 0
 618static void cz_set_uvd_clock_gating_branches(struct amdgpu_device *adev,
 619		bool enable)
 620{
 621	u32 data, data1;
 622
 623	data = RREG32(mmUVD_CGC_GATE);
 624	data1 = RREG32(mmUVD_SUVD_CGC_GATE);
 625	if (enable) {
 626		data |= UVD_CGC_GATE__SYS_MASK |
 627				UVD_CGC_GATE__UDEC_MASK |
 628				UVD_CGC_GATE__MPEG2_MASK |
 629				UVD_CGC_GATE__RBC_MASK |
 630				UVD_CGC_GATE__LMI_MC_MASK |
 631				UVD_CGC_GATE__IDCT_MASK |
 632				UVD_CGC_GATE__MPRD_MASK |
 633				UVD_CGC_GATE__MPC_MASK |
 634				UVD_CGC_GATE__LBSI_MASK |
 635				UVD_CGC_GATE__LRBBM_MASK |
 636				UVD_CGC_GATE__UDEC_RE_MASK |
 637				UVD_CGC_GATE__UDEC_CM_MASK |
 638				UVD_CGC_GATE__UDEC_IT_MASK |
 639				UVD_CGC_GATE__UDEC_DB_MASK |
 640				UVD_CGC_GATE__UDEC_MP_MASK |
 641				UVD_CGC_GATE__WCB_MASK |
 642				UVD_CGC_GATE__VCPU_MASK |
 643				UVD_CGC_GATE__SCPU_MASK;
 644		data1 |= UVD_SUVD_CGC_GATE__SRE_MASK |
 645				UVD_SUVD_CGC_GATE__SIT_MASK |
 646				UVD_SUVD_CGC_GATE__SMP_MASK |
 647				UVD_SUVD_CGC_GATE__SCM_MASK |
 648				UVD_SUVD_CGC_GATE__SDB_MASK |
 649				UVD_SUVD_CGC_GATE__SRE_H264_MASK |
 650				UVD_SUVD_CGC_GATE__SRE_HEVC_MASK |
 651				UVD_SUVD_CGC_GATE__SIT_H264_MASK |
 652				UVD_SUVD_CGC_GATE__SIT_HEVC_MASK |
 653				UVD_SUVD_CGC_GATE__SCM_H264_MASK |
 654				UVD_SUVD_CGC_GATE__SCM_HEVC_MASK |
 655				UVD_SUVD_CGC_GATE__SDB_H264_MASK |
 656				UVD_SUVD_CGC_GATE__SDB_HEVC_MASK;
 657	} else {
 658		data &= ~(UVD_CGC_GATE__SYS_MASK |
 659				UVD_CGC_GATE__UDEC_MASK |
 660				UVD_CGC_GATE__MPEG2_MASK |
 661				UVD_CGC_GATE__RBC_MASK |
 662				UVD_CGC_GATE__LMI_MC_MASK |
 663				UVD_CGC_GATE__LMI_UMC_MASK |
 664				UVD_CGC_GATE__IDCT_MASK |
 665				UVD_CGC_GATE__MPRD_MASK |
 666				UVD_CGC_GATE__MPC_MASK |
 667				UVD_CGC_GATE__LBSI_MASK |
 668				UVD_CGC_GATE__LRBBM_MASK |
 669				UVD_CGC_GATE__UDEC_RE_MASK |
 670				UVD_CGC_GATE__UDEC_CM_MASK |
 671				UVD_CGC_GATE__UDEC_IT_MASK |
 672				UVD_CGC_GATE__UDEC_DB_MASK |
 673				UVD_CGC_GATE__UDEC_MP_MASK |
 674				UVD_CGC_GATE__WCB_MASK |
 675				UVD_CGC_GATE__VCPU_MASK |
 676				UVD_CGC_GATE__SCPU_MASK);
 677		data1 &= ~(UVD_SUVD_CGC_GATE__SRE_MASK |
 678				UVD_SUVD_CGC_GATE__SIT_MASK |
 679				UVD_SUVD_CGC_GATE__SMP_MASK |
 680				UVD_SUVD_CGC_GATE__SCM_MASK |
 681				UVD_SUVD_CGC_GATE__SDB_MASK |
 682				UVD_SUVD_CGC_GATE__SRE_H264_MASK |
 683				UVD_SUVD_CGC_GATE__SRE_HEVC_MASK |
 684				UVD_SUVD_CGC_GATE__SIT_H264_MASK |
 685				UVD_SUVD_CGC_GATE__SIT_HEVC_MASK |
 686				UVD_SUVD_CGC_GATE__SCM_H264_MASK |
 687				UVD_SUVD_CGC_GATE__SCM_HEVC_MASK |
 688				UVD_SUVD_CGC_GATE__SDB_H264_MASK |
 689				UVD_SUVD_CGC_GATE__SDB_HEVC_MASK);
 690	}
 691	WREG32(mmUVD_CGC_GATE, data);
 692	WREG32(mmUVD_SUVD_CGC_GATE, data1);
 693}
 694#endif
 695
 696/**
 697 * uvd_v6_0_start - start UVD block
 698 *
 699 * @adev: amdgpu_device pointer
 700 *
 701 * Setup and start the UVD block
 702 */
 703static int uvd_v6_0_start(struct amdgpu_device *adev)
 704{
 705	struct amdgpu_ring *ring = &adev->uvd.inst->ring;
 706	uint32_t rb_bufsz, tmp;
 707	uint32_t lmi_swap_cntl;
 708	uint32_t mp_swap_cntl;
 709	int i, j, r;
 710
 711	/* disable DPG */
 712	WREG32_P(mmUVD_POWER_STATUS, 0, ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
 713
 714	/* disable byte swapping */
 715	lmi_swap_cntl = 0;
 716	mp_swap_cntl = 0;
 717
 
 
 
 718	uvd_v6_0_mc_resume(adev);
 719
 720	/* disable interupt */
 721	WREG32_FIELD(UVD_MASTINT_EN, VCPU_EN, 0);
 722
 723	/* stall UMC and register bus before resetting VCPU */
 724	WREG32_FIELD(UVD_LMI_CTRL2, STALL_ARB_UMC, 1);
 725	mdelay(1);
 726
 727	/* put LMI, VCPU, RBC etc... into reset */
 728	WREG32(mmUVD_SOFT_RESET,
 729		UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
 730		UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
 731		UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
 732		UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
 733		UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
 734		UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
 735		UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
 736		UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
 737	mdelay(5);
 738
 739	/* take UVD block out of reset */
 740	WREG32_FIELD(SRBM_SOFT_RESET, SOFT_RESET_UVD, 0);
 741	mdelay(5);
 742
 743	/* initialize UVD memory controller */
 744	WREG32(mmUVD_LMI_CTRL,
 745		(0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
 746		UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
 747		UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
 748		UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
 749		UVD_LMI_CTRL__REQ_MODE_MASK |
 750		UVD_LMI_CTRL__DISABLE_ON_FWV_FAIL_MASK);
 751
 752#ifdef __BIG_ENDIAN
 753	/* swap (8 in 32) RB and IB */
 754	lmi_swap_cntl = 0xa;
 755	mp_swap_cntl = 0;
 756#endif
 757	WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
 758	WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
 759
 760	WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040);
 761	WREG32(mmUVD_MPC_SET_MUXA1, 0x0);
 762	WREG32(mmUVD_MPC_SET_MUXB0, 0x40c2040);
 763	WREG32(mmUVD_MPC_SET_MUXB1, 0x0);
 764	WREG32(mmUVD_MPC_SET_ALU, 0);
 765	WREG32(mmUVD_MPC_SET_MUX, 0x88);
 766
 767	/* take all subblocks out of reset, except VCPU */
 768	WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
 769	mdelay(5);
 770
 771	/* enable VCPU clock */
 772	WREG32(mmUVD_VCPU_CNTL, UVD_VCPU_CNTL__CLK_EN_MASK);
 773
 774	/* enable UMC */
 775	WREG32_FIELD(UVD_LMI_CTRL2, STALL_ARB_UMC, 0);
 776
 777	/* boot up the VCPU */
 778	WREG32(mmUVD_SOFT_RESET, 0);
 779	mdelay(10);
 780
 781	for (i = 0; i < 10; ++i) {
 782		uint32_t status;
 783
 784		for (j = 0; j < 100; ++j) {
 785			status = RREG32(mmUVD_STATUS);
 786			if (status & 2)
 787				break;
 788			mdelay(10);
 789		}
 790		r = 0;
 791		if (status & 2)
 792			break;
 793
 794		DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
 795		WREG32_FIELD(UVD_SOFT_RESET, VCPU_SOFT_RESET, 1);
 796		mdelay(10);
 797		WREG32_FIELD(UVD_SOFT_RESET, VCPU_SOFT_RESET, 0);
 798		mdelay(10);
 799		r = -1;
 800	}
 801
 802	if (r) {
 803		DRM_ERROR("UVD not responding, giving up!!!\n");
 804		return r;
 805	}
 806	/* enable master interrupt */
 807	WREG32_P(mmUVD_MASTINT_EN,
 808		(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
 809		~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
 810
 811	/* clear the bit 4 of UVD_STATUS */
 812	WREG32_P(mmUVD_STATUS, 0, ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
 813
 814	/* force RBC into idle state */
 815	rb_bufsz = order_base_2(ring->ring_size);
 816	tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
 817	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
 818	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
 819	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
 820	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
 821	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
 822	WREG32(mmUVD_RBC_RB_CNTL, tmp);
 823
 824	/* set the write pointer delay */
 825	WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0);
 826
 827	/* set the wb address */
 828	WREG32(mmUVD_RBC_RB_RPTR_ADDR, (upper_32_bits(ring->gpu_addr) >> 2));
 829
 830	/* program the RB_BASE for ring buffer */
 831	WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
 832			lower_32_bits(ring->gpu_addr));
 833	WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
 834			upper_32_bits(ring->gpu_addr));
 835
 836	/* Initialize the ring buffer's read and write pointers */
 837	WREG32(mmUVD_RBC_RB_RPTR, 0);
 838
 839	ring->wptr = RREG32(mmUVD_RBC_RB_RPTR);
 840	WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
 841
 842	WREG32_FIELD(UVD_RBC_RB_CNTL, RB_NO_FETCH, 0);
 843
 844	if (uvd_v6_0_enc_support(adev)) {
 845		ring = &adev->uvd.inst->ring_enc[0];
 846		WREG32(mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
 847		WREG32(mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
 848		WREG32(mmUVD_RB_BASE_LO, ring->gpu_addr);
 849		WREG32(mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
 850		WREG32(mmUVD_RB_SIZE, ring->ring_size / 4);
 851
 852		ring = &adev->uvd.inst->ring_enc[1];
 853		WREG32(mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
 854		WREG32(mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
 855		WREG32(mmUVD_RB_BASE_LO2, ring->gpu_addr);
 856		WREG32(mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
 857		WREG32(mmUVD_RB_SIZE2, ring->ring_size / 4);
 858	}
 859
 860	return 0;
 861}
 862
 863/**
 864 * uvd_v6_0_stop - stop UVD block
 865 *
 866 * @adev: amdgpu_device pointer
 867 *
 868 * stop the UVD block
 869 */
 870static void uvd_v6_0_stop(struct amdgpu_device *adev)
 871{
 872	/* force RBC into idle state */
 873	WREG32(mmUVD_RBC_RB_CNTL, 0x11010101);
 874
 875	/* Stall UMC and register bus before resetting VCPU */
 876	WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
 877	mdelay(1);
 878
 879	/* put VCPU into reset */
 880	WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
 881	mdelay(5);
 882
 883	/* disable VCPU clock */
 884	WREG32(mmUVD_VCPU_CNTL, 0x0);
 885
 886	/* Unstall UMC and register bus */
 887	WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
 888
 889	WREG32(mmUVD_STATUS, 0);
 890}
 891
 892/**
 893 * uvd_v6_0_ring_emit_fence - emit an fence & trap command
 894 *
 895 * @ring: amdgpu_ring pointer
 896 * @addr: address
 897 * @seq: sequence number
 898 * @flags: fence related flags
 899 *
 900 * Write a fence and a trap command to the ring.
 901 */
 902static void uvd_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
 903				     unsigned flags)
 904{
 905	WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
 906
 907	amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
 908	amdgpu_ring_write(ring, seq);
 909	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
 910	amdgpu_ring_write(ring, addr & 0xffffffff);
 911	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
 912	amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
 913	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
 914	amdgpu_ring_write(ring, 0);
 915
 916	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
 917	amdgpu_ring_write(ring, 0);
 918	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
 919	amdgpu_ring_write(ring, 0);
 920	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
 921	amdgpu_ring_write(ring, 2);
 922}
 923
 924/**
 925 * uvd_v6_0_enc_ring_emit_fence - emit an enc fence & trap command
 926 *
 927 * @ring: amdgpu_ring pointer
 928 * @addr: address
 929 * @seq: sequence number
 930 * @flags: fence related flags
 931 *
 932 * Write enc a fence and a trap command to the ring.
 933 */
 934static void uvd_v6_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
 935			u64 seq, unsigned flags)
 936{
 937	WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
 938
 939	amdgpu_ring_write(ring, HEVC_ENC_CMD_FENCE);
 940	amdgpu_ring_write(ring, addr);
 941	amdgpu_ring_write(ring, upper_32_bits(addr));
 942	amdgpu_ring_write(ring, seq);
 943	amdgpu_ring_write(ring, HEVC_ENC_CMD_TRAP);
 944}
 945
 946/**
 947 * uvd_v6_0_ring_emit_hdp_flush - skip HDP flushing
 948 *
 949 * @ring: amdgpu_ring pointer
 
 
 950 */
 951static void uvd_v6_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
 952{
 953	/* The firmware doesn't seem to like touching registers at this point. */
 
 954}
 955
 956/**
 957 * uvd_v6_0_ring_test_ring - register write test
 958 *
 959 * @ring: amdgpu_ring pointer
 960 *
 961 * Test if we can successfully write to the context register
 962 */
 963static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring)
 964{
 965	struct amdgpu_device *adev = ring->adev;
 966	uint32_t tmp = 0;
 967	unsigned i;
 968	int r;
 969
 970	WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD);
 971	r = amdgpu_ring_alloc(ring, 3);
 972	if (r)
 
 
 973		return r;
 974
 975	amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
 976	amdgpu_ring_write(ring, 0xDEADBEEF);
 977	amdgpu_ring_commit(ring);
 978	for (i = 0; i < adev->usec_timeout; i++) {
 979		tmp = RREG32(mmUVD_CONTEXT_ID);
 980		if (tmp == 0xDEADBEEF)
 981			break;
 982		udelay(1);
 983	}
 984
 985	if (i >= adev->usec_timeout)
 986		r = -ETIMEDOUT;
 987
 
 
 
 
 
 988	return r;
 989}
 990
 991/**
 992 * uvd_v6_0_ring_emit_ib - execute indirect buffer
 993 *
 994 * @ring: amdgpu_ring pointer
 995 * @job: job to retrieve vmid from
 996 * @ib: indirect buffer to execute
 997 * @flags: unused
 998 *
 999 * Write ring commands to execute the indirect buffer
1000 */
1001static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
1002				  struct amdgpu_job *job,
1003				  struct amdgpu_ib *ib,
1004				  uint32_t flags)
1005{
1006	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1007
1008	amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_VMID, 0));
1009	amdgpu_ring_write(ring, vmid);
1010
1011	amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0));
1012	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1013	amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH, 0));
1014	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1015	amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0));
1016	amdgpu_ring_write(ring, ib->length_dw);
1017}
1018
1019/**
1020 * uvd_v6_0_enc_ring_emit_ib - enc execute indirect buffer
1021 *
1022 * @ring: amdgpu_ring pointer
1023 * @job: job to retrive vmid from
1024 * @ib: indirect buffer to execute
1025 * @flags: unused
1026 *
1027 * Write enc ring commands to execute the indirect buffer
1028 */
1029static void uvd_v6_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
1030					struct amdgpu_job *job,
1031					struct amdgpu_ib *ib,
1032					uint32_t flags)
1033{
1034	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1035
1036	amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM);
1037	amdgpu_ring_write(ring, vmid);
1038	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1039	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1040	amdgpu_ring_write(ring, ib->length_dw);
1041}
1042
1043static void uvd_v6_0_ring_emit_wreg(struct amdgpu_ring *ring,
1044				    uint32_t reg, uint32_t val)
1045{
1046	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
1047	amdgpu_ring_write(ring, reg << 2);
1048	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
1049	amdgpu_ring_write(ring, val);
1050	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
1051	amdgpu_ring_write(ring, 0x8);
1052}
1053
1054static void uvd_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
1055					unsigned vmid, uint64_t pd_addr)
1056{
1057	amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
 
 
1058
1059	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
1060	amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
1061	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
1062	amdgpu_ring_write(ring, 0);
1063	amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0));
1064	amdgpu_ring_write(ring, 1 << vmid); /* mask */
1065	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
1066	amdgpu_ring_write(ring, 0xC);
1067}
1068
1069static void uvd_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
1070{
1071	uint32_t seq = ring->fence_drv.sync_seq;
1072	uint64_t addr = ring->fence_drv.gpu_addr;
1073
1074	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
1075	amdgpu_ring_write(ring, lower_32_bits(addr));
1076	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
1077	amdgpu_ring_write(ring, upper_32_bits(addr));
1078	amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0));
1079	amdgpu_ring_write(ring, 0xffffffff); /* mask */
1080	amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH9, 0));
1081	amdgpu_ring_write(ring, seq);
1082	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
1083	amdgpu_ring_write(ring, 0xE);
1084}
1085
1086static void uvd_v6_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
1087{
1088	int i;
1089
1090	WARN_ON(ring->wptr % 2 || count % 2);
1091
1092	for (i = 0; i < count / 2; i++) {
1093		amdgpu_ring_write(ring, PACKET0(mmUVD_NO_OP, 0));
1094		amdgpu_ring_write(ring, 0);
1095	}
1096}
1097
1098static void uvd_v6_0_enc_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
1099{
1100	uint32_t seq = ring->fence_drv.sync_seq;
1101	uint64_t addr = ring->fence_drv.gpu_addr;
1102
1103	amdgpu_ring_write(ring, HEVC_ENC_CMD_WAIT_GE);
1104	amdgpu_ring_write(ring, lower_32_bits(addr));
1105	amdgpu_ring_write(ring, upper_32_bits(addr));
1106	amdgpu_ring_write(ring, seq);
1107}
1108
1109static void uvd_v6_0_enc_ring_insert_end(struct amdgpu_ring *ring)
1110{
1111	amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
1112}
1113
1114static void uvd_v6_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
1115					    unsigned int vmid, uint64_t pd_addr)
1116{
1117	amdgpu_ring_write(ring, HEVC_ENC_CMD_UPDATE_PTB);
1118	amdgpu_ring_write(ring, vmid);
1119	amdgpu_ring_write(ring, pd_addr >> 12);
1120
1121	amdgpu_ring_write(ring, HEVC_ENC_CMD_FLUSH_TLB);
1122	amdgpu_ring_write(ring, vmid);
1123}
1124
1125static bool uvd_v6_0_is_idle(void *handle)
1126{
1127	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1128
1129	return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
1130}
1131
1132static int uvd_v6_0_wait_for_idle(void *handle)
1133{
1134	unsigned i;
1135	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1136
1137	for (i = 0; i < adev->usec_timeout; i++) {
1138		if (uvd_v6_0_is_idle(handle))
1139			return 0;
1140	}
1141	return -ETIMEDOUT;
1142}
1143
1144#define AMDGPU_UVD_STATUS_BUSY_MASK    0xfd
1145static bool uvd_v6_0_check_soft_reset(void *handle)
1146{
1147	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1148	u32 srbm_soft_reset = 0;
1149	u32 tmp = RREG32(mmSRBM_STATUS);
1150
1151	if (REG_GET_FIELD(tmp, SRBM_STATUS, UVD_RQ_PENDING) ||
1152	    REG_GET_FIELD(tmp, SRBM_STATUS, UVD_BUSY) ||
1153	    (RREG32(mmUVD_STATUS) & AMDGPU_UVD_STATUS_BUSY_MASK))
1154		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_UVD, 1);
1155
1156	if (srbm_soft_reset) {
1157		adev->uvd.inst->srbm_soft_reset = srbm_soft_reset;
1158		return true;
1159	} else {
1160		adev->uvd.inst->srbm_soft_reset = 0;
1161		return false;
1162	}
1163}
1164
1165static int uvd_v6_0_pre_soft_reset(void *handle)
1166{
1167	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1168
1169	if (!adev->uvd.inst->srbm_soft_reset)
1170		return 0;
1171
1172	uvd_v6_0_stop(adev);
1173	return 0;
1174}
1175
1176static int uvd_v6_0_soft_reset(void *handle)
1177{
1178	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1179	u32 srbm_soft_reset;
1180
1181	if (!adev->uvd.inst->srbm_soft_reset)
1182		return 0;
1183	srbm_soft_reset = adev->uvd.inst->srbm_soft_reset;
1184
1185	if (srbm_soft_reset) {
1186		u32 tmp;
1187
1188		tmp = RREG32(mmSRBM_SOFT_RESET);
1189		tmp |= srbm_soft_reset;
1190		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1191		WREG32(mmSRBM_SOFT_RESET, tmp);
1192		tmp = RREG32(mmSRBM_SOFT_RESET);
1193
1194		udelay(50);
1195
1196		tmp &= ~srbm_soft_reset;
1197		WREG32(mmSRBM_SOFT_RESET, tmp);
1198		tmp = RREG32(mmSRBM_SOFT_RESET);
1199
1200		/* Wait a little for things to settle down */
1201		udelay(50);
1202	}
1203
1204	return 0;
1205}
1206
1207static int uvd_v6_0_post_soft_reset(void *handle)
1208{
1209	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1210
1211	if (!adev->uvd.inst->srbm_soft_reset)
1212		return 0;
1213
1214	mdelay(5);
1215
1216	return uvd_v6_0_start(adev);
1217}
1218
1219static int uvd_v6_0_set_interrupt_state(struct amdgpu_device *adev,
1220					struct amdgpu_irq_src *source,
1221					unsigned type,
1222					enum amdgpu_interrupt_state state)
1223{
1224	// TODO
1225	return 0;
1226}
1227
1228static int uvd_v6_0_process_interrupt(struct amdgpu_device *adev,
1229				      struct amdgpu_irq_src *source,
1230				      struct amdgpu_iv_entry *entry)
1231{
1232	bool int_handled = true;
1233	DRM_DEBUG("IH: UVD TRAP\n");
1234
1235	switch (entry->src_id) {
1236	case 124:
1237		amdgpu_fence_process(&adev->uvd.inst->ring);
1238		break;
1239	case 119:
1240		if (likely(uvd_v6_0_enc_support(adev)))
1241			amdgpu_fence_process(&adev->uvd.inst->ring_enc[0]);
1242		else
1243			int_handled = false;
1244		break;
1245	case 120:
1246		if (likely(uvd_v6_0_enc_support(adev)))
1247			amdgpu_fence_process(&adev->uvd.inst->ring_enc[1]);
1248		else
1249			int_handled = false;
1250		break;
1251	}
1252
1253	if (!int_handled)
1254		DRM_ERROR("Unhandled interrupt: %d %d\n",
1255			  entry->src_id, entry->src_data[0]);
1256
1257	return 0;
1258}
1259
1260static void uvd_v6_0_enable_clock_gating(struct amdgpu_device *adev, bool enable)
1261{
1262	uint32_t data1, data3;
1263
1264	data1 = RREG32(mmUVD_SUVD_CGC_GATE);
1265	data3 = RREG32(mmUVD_CGC_GATE);
1266
1267	data1 |= UVD_SUVD_CGC_GATE__SRE_MASK |
1268		     UVD_SUVD_CGC_GATE__SIT_MASK |
1269		     UVD_SUVD_CGC_GATE__SMP_MASK |
1270		     UVD_SUVD_CGC_GATE__SCM_MASK |
1271		     UVD_SUVD_CGC_GATE__SDB_MASK |
1272		     UVD_SUVD_CGC_GATE__SRE_H264_MASK |
1273		     UVD_SUVD_CGC_GATE__SRE_HEVC_MASK |
1274		     UVD_SUVD_CGC_GATE__SIT_H264_MASK |
1275		     UVD_SUVD_CGC_GATE__SIT_HEVC_MASK |
1276		     UVD_SUVD_CGC_GATE__SCM_H264_MASK |
1277		     UVD_SUVD_CGC_GATE__SCM_HEVC_MASK |
1278		     UVD_SUVD_CGC_GATE__SDB_H264_MASK |
1279		     UVD_SUVD_CGC_GATE__SDB_HEVC_MASK;
1280
1281	if (enable) {
1282		data3 |= (UVD_CGC_GATE__SYS_MASK       |
1283			UVD_CGC_GATE__UDEC_MASK      |
1284			UVD_CGC_GATE__MPEG2_MASK     |
1285			UVD_CGC_GATE__RBC_MASK       |
1286			UVD_CGC_GATE__LMI_MC_MASK    |
1287			UVD_CGC_GATE__LMI_UMC_MASK   |
1288			UVD_CGC_GATE__IDCT_MASK      |
1289			UVD_CGC_GATE__MPRD_MASK      |
1290			UVD_CGC_GATE__MPC_MASK       |
1291			UVD_CGC_GATE__LBSI_MASK      |
1292			UVD_CGC_GATE__LRBBM_MASK     |
1293			UVD_CGC_GATE__UDEC_RE_MASK   |
1294			UVD_CGC_GATE__UDEC_CM_MASK   |
1295			UVD_CGC_GATE__UDEC_IT_MASK   |
1296			UVD_CGC_GATE__UDEC_DB_MASK   |
1297			UVD_CGC_GATE__UDEC_MP_MASK   |
1298			UVD_CGC_GATE__WCB_MASK       |
1299			UVD_CGC_GATE__JPEG_MASK      |
1300			UVD_CGC_GATE__SCPU_MASK      |
1301			UVD_CGC_GATE__JPEG2_MASK);
1302		/* only in pg enabled, we can gate clock to vcpu*/
1303		if (adev->pg_flags & AMD_PG_SUPPORT_UVD)
1304			data3 |= UVD_CGC_GATE__VCPU_MASK;
1305
1306		data3 &= ~UVD_CGC_GATE__REGS_MASK;
1307	} else {
1308		data3 = 0;
1309	}
1310
1311	WREG32(mmUVD_SUVD_CGC_GATE, data1);
1312	WREG32(mmUVD_CGC_GATE, data3);
1313}
1314
1315static void uvd_v6_0_set_sw_clock_gating(struct amdgpu_device *adev)
1316{
1317	uint32_t data, data2;
1318
1319	data = RREG32(mmUVD_CGC_CTRL);
1320	data2 = RREG32(mmUVD_SUVD_CGC_CTRL);
1321
1322
1323	data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK |
1324		  UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
1325
1326
1327	data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
1328		(1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) |
1329		(4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY));
1330
1331	data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
1332			UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
1333			UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
1334			UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
1335			UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
1336			UVD_CGC_CTRL__SYS_MODE_MASK |
1337			UVD_CGC_CTRL__UDEC_MODE_MASK |
1338			UVD_CGC_CTRL__MPEG2_MODE_MASK |
1339			UVD_CGC_CTRL__REGS_MODE_MASK |
1340			UVD_CGC_CTRL__RBC_MODE_MASK |
1341			UVD_CGC_CTRL__LMI_MC_MODE_MASK |
1342			UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
1343			UVD_CGC_CTRL__IDCT_MODE_MASK |
1344			UVD_CGC_CTRL__MPRD_MODE_MASK |
1345			UVD_CGC_CTRL__MPC_MODE_MASK |
1346			UVD_CGC_CTRL__LBSI_MODE_MASK |
1347			UVD_CGC_CTRL__LRBBM_MODE_MASK |
1348			UVD_CGC_CTRL__WCB_MODE_MASK |
1349			UVD_CGC_CTRL__VCPU_MODE_MASK |
1350			UVD_CGC_CTRL__JPEG_MODE_MASK |
1351			UVD_CGC_CTRL__SCPU_MODE_MASK |
1352			UVD_CGC_CTRL__JPEG2_MODE_MASK);
1353	data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK |
1354			UVD_SUVD_CGC_CTRL__SIT_MODE_MASK |
1355			UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
1356			UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
1357			UVD_SUVD_CGC_CTRL__SDB_MODE_MASK);
1358
1359	WREG32(mmUVD_CGC_CTRL, data);
1360	WREG32(mmUVD_SUVD_CGC_CTRL, data2);
1361}
1362
1363#if 0
1364static void uvd_v6_0_set_hw_clock_gating(struct amdgpu_device *adev)
1365{
1366	uint32_t data, data1, cgc_flags, suvd_flags;
1367
1368	data = RREG32(mmUVD_CGC_GATE);
1369	data1 = RREG32(mmUVD_SUVD_CGC_GATE);
1370
1371	cgc_flags = UVD_CGC_GATE__SYS_MASK |
1372		UVD_CGC_GATE__UDEC_MASK |
1373		UVD_CGC_GATE__MPEG2_MASK |
1374		UVD_CGC_GATE__RBC_MASK |
1375		UVD_CGC_GATE__LMI_MC_MASK |
1376		UVD_CGC_GATE__IDCT_MASK |
1377		UVD_CGC_GATE__MPRD_MASK |
1378		UVD_CGC_GATE__MPC_MASK |
1379		UVD_CGC_GATE__LBSI_MASK |
1380		UVD_CGC_GATE__LRBBM_MASK |
1381		UVD_CGC_GATE__UDEC_RE_MASK |
1382		UVD_CGC_GATE__UDEC_CM_MASK |
1383		UVD_CGC_GATE__UDEC_IT_MASK |
1384		UVD_CGC_GATE__UDEC_DB_MASK |
1385		UVD_CGC_GATE__UDEC_MP_MASK |
1386		UVD_CGC_GATE__WCB_MASK |
1387		UVD_CGC_GATE__VCPU_MASK |
1388		UVD_CGC_GATE__SCPU_MASK |
1389		UVD_CGC_GATE__JPEG_MASK |
1390		UVD_CGC_GATE__JPEG2_MASK;
1391
1392	suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
1393				UVD_SUVD_CGC_GATE__SIT_MASK |
1394				UVD_SUVD_CGC_GATE__SMP_MASK |
1395				UVD_SUVD_CGC_GATE__SCM_MASK |
1396				UVD_SUVD_CGC_GATE__SDB_MASK;
1397
1398	data |= cgc_flags;
1399	data1 |= suvd_flags;
1400
1401	WREG32(mmUVD_CGC_GATE, data);
1402	WREG32(mmUVD_SUVD_CGC_GATE, data1);
1403}
1404#endif
1405
1406static void uvd_v6_0_enable_mgcg(struct amdgpu_device *adev,
1407				 bool enable)
1408{
1409	u32 orig, data;
1410
1411	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) {
1412		data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
1413		data |= 0xfff;
1414		WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
1415
1416		orig = data = RREG32(mmUVD_CGC_CTRL);
1417		data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
1418		if (orig != data)
1419			WREG32(mmUVD_CGC_CTRL, data);
1420	} else {
1421		data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
1422		data &= ~0xfff;
1423		WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
1424
1425		orig = data = RREG32(mmUVD_CGC_CTRL);
1426		data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
1427		if (orig != data)
1428			WREG32(mmUVD_CGC_CTRL, data);
1429	}
1430}
1431
1432static int uvd_v6_0_set_clockgating_state(void *handle,
1433					  enum amd_clockgating_state state)
1434{
1435	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1436	bool enable = (state == AMD_CG_STATE_GATE);
 
 
 
1437
1438	if (enable) {
1439		/* wait for STATUS to clear */
1440		if (uvd_v6_0_wait_for_idle(handle))
1441			return -EBUSY;
1442		uvd_v6_0_enable_clock_gating(adev, true);
1443		/* enable HW gates because UVD is idle */
1444/*		uvd_v6_0_set_hw_clock_gating(adev); */
1445	} else {
1446		/* disable HW gating and enable Sw gating */
1447		uvd_v6_0_enable_clock_gating(adev, false);
1448	}
1449	uvd_v6_0_set_sw_clock_gating(adev);
1450	return 0;
1451}
1452
1453static int uvd_v6_0_set_powergating_state(void *handle,
1454					  enum amd_powergating_state state)
1455{
1456	/* This doesn't actually powergate the UVD block.
1457	 * That's done in the dpm code via the SMC.  This
1458	 * just re-inits the block as necessary.  The actual
1459	 * gating still happens in the dpm code.  We should
1460	 * revisit this when there is a cleaner line between
1461	 * the smc and the hw blocks
1462	 */
1463	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1464	int ret = 0;
 
 
1465
1466	WREG32(mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK);
1467
1468	if (state == AMD_PG_STATE_GATE) {
1469		uvd_v6_0_stop(adev);
 
1470	} else {
1471		ret = uvd_v6_0_start(adev);
1472		if (ret)
1473			goto out;
1474	}
1475
1476out:
1477	return ret;
1478}
1479
1480static void uvd_v6_0_get_clockgating_state(void *handle, u32 *flags)
1481{
1482	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1483	int data;
1484
1485	mutex_lock(&adev->pm.mutex);
1486
1487	if (adev->flags & AMD_IS_APU)
1488		data = RREG32_SMC(ixCURRENT_PG_STATUS_APU);
1489	else
1490		data = RREG32_SMC(ixCURRENT_PG_STATUS);
1491
1492	if (data & CURRENT_PG_STATUS__UVD_PG_STATUS_MASK) {
1493		DRM_INFO("Cannot get clockgating state when UVD is powergated.\n");
1494		goto out;
1495	}
1496
1497	/* AMD_CG_SUPPORT_UVD_MGCG */
1498	data = RREG32(mmUVD_CGC_CTRL);
1499	if (data & UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK)
1500		*flags |= AMD_CG_SUPPORT_UVD_MGCG;
1501
1502out:
1503	mutex_unlock(&adev->pm.mutex);
1504}
1505
1506static const struct amd_ip_funcs uvd_v6_0_ip_funcs = {
1507	.name = "uvd_v6_0",
1508	.early_init = uvd_v6_0_early_init,
1509	.late_init = NULL,
1510	.sw_init = uvd_v6_0_sw_init,
1511	.sw_fini = uvd_v6_0_sw_fini,
1512	.hw_init = uvd_v6_0_hw_init,
1513	.hw_fini = uvd_v6_0_hw_fini,
1514	.suspend = uvd_v6_0_suspend,
1515	.resume = uvd_v6_0_resume,
1516	.is_idle = uvd_v6_0_is_idle,
1517	.wait_for_idle = uvd_v6_0_wait_for_idle,
1518	.check_soft_reset = uvd_v6_0_check_soft_reset,
1519	.pre_soft_reset = uvd_v6_0_pre_soft_reset,
1520	.soft_reset = uvd_v6_0_soft_reset,
1521	.post_soft_reset = uvd_v6_0_post_soft_reset,
1522	.set_clockgating_state = uvd_v6_0_set_clockgating_state,
1523	.set_powergating_state = uvd_v6_0_set_powergating_state,
1524	.get_clockgating_state = uvd_v6_0_get_clockgating_state,
1525};
1526
1527static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = {
1528	.type = AMDGPU_RING_TYPE_UVD,
1529	.align_mask = 0xf,
1530	.support_64bit_ptrs = false,
1531	.no_user_fence = true,
1532	.get_rptr = uvd_v6_0_ring_get_rptr,
1533	.get_wptr = uvd_v6_0_ring_get_wptr,
1534	.set_wptr = uvd_v6_0_ring_set_wptr,
1535	.parse_cs = amdgpu_uvd_ring_parse_cs,
1536	.emit_frame_size =
1537		6 + /* hdp invalidate */
 
1538		10 + /* uvd_v6_0_ring_emit_pipeline_sync */
1539		14, /* uvd_v6_0_ring_emit_fence x1 no user fence */
1540	.emit_ib_size = 8, /* uvd_v6_0_ring_emit_ib */
1541	.emit_ib = uvd_v6_0_ring_emit_ib,
1542	.emit_fence = uvd_v6_0_ring_emit_fence,
1543	.emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
 
1544	.test_ring = uvd_v6_0_ring_test_ring,
1545	.test_ib = amdgpu_uvd_ring_test_ib,
1546	.insert_nop = uvd_v6_0_ring_insert_nop,
1547	.pad_ib = amdgpu_ring_generic_pad_ib,
1548	.begin_use = amdgpu_uvd_ring_begin_use,
1549	.end_use = amdgpu_uvd_ring_end_use,
1550	.emit_wreg = uvd_v6_0_ring_emit_wreg,
1551};
1552
1553static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
1554	.type = AMDGPU_RING_TYPE_UVD,
1555	.align_mask = 0xf,
1556	.support_64bit_ptrs = false,
1557	.no_user_fence = true,
1558	.get_rptr = uvd_v6_0_ring_get_rptr,
1559	.get_wptr = uvd_v6_0_ring_get_wptr,
1560	.set_wptr = uvd_v6_0_ring_set_wptr,
1561	.emit_frame_size =
1562		6 + /* hdp invalidate */
 
1563		10 + /* uvd_v6_0_ring_emit_pipeline_sync */
1564		VI_FLUSH_GPU_TLB_NUM_WREG * 6 + 8 + /* uvd_v6_0_ring_emit_vm_flush */
1565		14 + 14, /* uvd_v6_0_ring_emit_fence x2 vm fence */
1566	.emit_ib_size = 8, /* uvd_v6_0_ring_emit_ib */
1567	.emit_ib = uvd_v6_0_ring_emit_ib,
1568	.emit_fence = uvd_v6_0_ring_emit_fence,
1569	.emit_vm_flush = uvd_v6_0_ring_emit_vm_flush,
1570	.emit_pipeline_sync = uvd_v6_0_ring_emit_pipeline_sync,
1571	.emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
 
1572	.test_ring = uvd_v6_0_ring_test_ring,
1573	.test_ib = amdgpu_uvd_ring_test_ib,
1574	.insert_nop = uvd_v6_0_ring_insert_nop,
1575	.pad_ib = amdgpu_ring_generic_pad_ib,
1576	.begin_use = amdgpu_uvd_ring_begin_use,
1577	.end_use = amdgpu_uvd_ring_end_use,
1578	.emit_wreg = uvd_v6_0_ring_emit_wreg,
1579};
1580
1581static const struct amdgpu_ring_funcs uvd_v6_0_enc_ring_vm_funcs = {
1582	.type = AMDGPU_RING_TYPE_UVD_ENC,
1583	.align_mask = 0x3f,
1584	.nop = HEVC_ENC_CMD_NO_OP,
1585	.support_64bit_ptrs = false,
1586	.no_user_fence = true,
1587	.get_rptr = uvd_v6_0_enc_ring_get_rptr,
1588	.get_wptr = uvd_v6_0_enc_ring_get_wptr,
1589	.set_wptr = uvd_v6_0_enc_ring_set_wptr,
1590	.emit_frame_size =
1591		4 + /* uvd_v6_0_enc_ring_emit_pipeline_sync */
1592		5 + /* uvd_v6_0_enc_ring_emit_vm_flush */
1593		5 + 5 + /* uvd_v6_0_enc_ring_emit_fence x2 vm fence */
1594		1, /* uvd_v6_0_enc_ring_insert_end */
1595	.emit_ib_size = 5, /* uvd_v6_0_enc_ring_emit_ib */
1596	.emit_ib = uvd_v6_0_enc_ring_emit_ib,
1597	.emit_fence = uvd_v6_0_enc_ring_emit_fence,
1598	.emit_vm_flush = uvd_v6_0_enc_ring_emit_vm_flush,
1599	.emit_pipeline_sync = uvd_v6_0_enc_ring_emit_pipeline_sync,
1600	.test_ring = uvd_v6_0_enc_ring_test_ring,
1601	.test_ib = uvd_v6_0_enc_ring_test_ib,
1602	.insert_nop = amdgpu_ring_insert_nop,
1603	.insert_end = uvd_v6_0_enc_ring_insert_end,
1604	.pad_ib = amdgpu_ring_generic_pad_ib,
1605	.begin_use = amdgpu_uvd_ring_begin_use,
1606	.end_use = amdgpu_uvd_ring_end_use,
1607};
1608
1609static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev)
1610{
1611	if (adev->asic_type >= CHIP_POLARIS10) {
1612		adev->uvd.inst->ring.funcs = &uvd_v6_0_ring_vm_funcs;
1613		DRM_INFO("UVD is enabled in VM mode\n");
1614	} else {
1615		adev->uvd.inst->ring.funcs = &uvd_v6_0_ring_phys_funcs;
1616		DRM_INFO("UVD is enabled in physical mode\n");
1617	}
1618}
1619
1620static void uvd_v6_0_set_enc_ring_funcs(struct amdgpu_device *adev)
1621{
1622	int i;
1623
1624	for (i = 0; i < adev->uvd.num_enc_rings; ++i)
1625		adev->uvd.inst->ring_enc[i].funcs = &uvd_v6_0_enc_ring_vm_funcs;
1626
1627	DRM_INFO("UVD ENC is enabled in VM mode\n");
1628}
1629
1630static const struct amdgpu_irq_src_funcs uvd_v6_0_irq_funcs = {
1631	.set = uvd_v6_0_set_interrupt_state,
1632	.process = uvd_v6_0_process_interrupt,
1633};
1634
1635static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev)
1636{
1637	if (uvd_v6_0_enc_support(adev))
1638		adev->uvd.inst->irq.num_types = adev->uvd.num_enc_rings + 1;
1639	else
1640		adev->uvd.inst->irq.num_types = 1;
1641
1642	adev->uvd.inst->irq.funcs = &uvd_v6_0_irq_funcs;
1643}
1644
1645const struct amdgpu_ip_block_version uvd_v6_0_ip_block =
1646{
1647		.type = AMD_IP_BLOCK_TYPE_UVD,
1648		.major = 6,
1649		.minor = 0,
1650		.rev = 0,
1651		.funcs = &uvd_v6_0_ip_funcs,
1652};
1653
1654const struct amdgpu_ip_block_version uvd_v6_2_ip_block =
1655{
1656		.type = AMD_IP_BLOCK_TYPE_UVD,
1657		.major = 6,
1658		.minor = 2,
1659		.rev = 0,
1660		.funcs = &uvd_v6_0_ip_funcs,
1661};
1662
1663const struct amdgpu_ip_block_version uvd_v6_3_ip_block =
1664{
1665		.type = AMD_IP_BLOCK_TYPE_UVD,
1666		.major = 6,
1667		.minor = 3,
1668		.rev = 0,
1669		.funcs = &uvd_v6_0_ip_funcs,
1670};