Linux Audio

Check our new training course

Loading...
v4.17
  1/*
  2 * Copyright 2014 Advanced Micro Devices, Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 *
 22 * Authors: Christian König <christian.koenig@amd.com>
 23 */
 24
 
 25#include <linux/firmware.h>
 26#include <drm/drmP.h>
 27#include "amdgpu.h"
 28#include "amdgpu_uvd.h"
 29#include "vid.h"
 30#include "uvd/uvd_5_0_d.h"
 31#include "uvd/uvd_5_0_sh_mask.h"
 32#include "oss/oss_2_0_d.h"
 33#include "oss/oss_2_0_sh_mask.h"
 34#include "bif/bif_5_0_d.h"
 35#include "vi.h"
 36#include "smu/smu_7_1_2_d.h"
 37#include "smu/smu_7_1_2_sh_mask.h"
 
 38
 39static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev);
 40static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev);
 41static int uvd_v5_0_start(struct amdgpu_device *adev);
 42static void uvd_v5_0_stop(struct amdgpu_device *adev);
 43static int uvd_v5_0_set_clockgating_state(void *handle,
 44					  enum amd_clockgating_state state);
 45static void uvd_v5_0_enable_mgcg(struct amdgpu_device *adev,
 46				 bool enable);
 47/**
 48 * uvd_v5_0_ring_get_rptr - get read pointer
 49 *
 50 * @ring: amdgpu_ring pointer
 51 *
 52 * Returns the current hardware read pointer
 53 */
 54static uint64_t uvd_v5_0_ring_get_rptr(struct amdgpu_ring *ring)
 55{
 56	struct amdgpu_device *adev = ring->adev;
 57
 58	return RREG32(mmUVD_RBC_RB_RPTR);
 59}
 60
 61/**
 62 * uvd_v5_0_ring_get_wptr - get write pointer
 63 *
 64 * @ring: amdgpu_ring pointer
 65 *
 66 * Returns the current hardware write pointer
 67 */
 68static uint64_t uvd_v5_0_ring_get_wptr(struct amdgpu_ring *ring)
 69{
 70	struct amdgpu_device *adev = ring->adev;
 71
 72	return RREG32(mmUVD_RBC_RB_WPTR);
 73}
 74
 75/**
 76 * uvd_v5_0_ring_set_wptr - set write pointer
 77 *
 78 * @ring: amdgpu_ring pointer
 79 *
 80 * Commits the write pointer to the hardware
 81 */
 82static void uvd_v5_0_ring_set_wptr(struct amdgpu_ring *ring)
 83{
 84	struct amdgpu_device *adev = ring->adev;
 85
 86	WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
 87}
 88
 89static int uvd_v5_0_early_init(void *handle)
 90{
 91	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
 92
 93	uvd_v5_0_set_ring_funcs(adev);
 94	uvd_v5_0_set_irq_funcs(adev);
 95
 96	return 0;
 97}
 98
 99static int uvd_v5_0_sw_init(void *handle)
100{
101	struct amdgpu_ring *ring;
102	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
103	int r;
104
105	/* UVD TRAP */
106	r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 124, &adev->uvd.irq);
107	if (r)
108		return r;
109
110	r = amdgpu_uvd_sw_init(adev);
111	if (r)
112		return r;
113
 
 
 
 
 
 
114	r = amdgpu_uvd_resume(adev);
115	if (r)
116		return r;
117
118	ring = &adev->uvd.ring;
119	sprintf(ring->name, "uvd");
120	r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0);
121
122	return r;
123}
124
125static int uvd_v5_0_sw_fini(void *handle)
126{
127	int r;
128	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
129
130	r = amdgpu_uvd_suspend(adev);
131	if (r)
132		return r;
133
134	return amdgpu_uvd_sw_fini(adev);
135}
136
137/**
138 * uvd_v5_0_hw_init - start and test UVD block
139 *
140 * @adev: amdgpu_device pointer
141 *
142 * Initialize the hardware, boot up the VCPU and do some testing
143 */
144static int uvd_v5_0_hw_init(void *handle)
145{
146	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
147	struct amdgpu_ring *ring = &adev->uvd.ring;
148	uint32_t tmp;
149	int r;
150
151	amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
152	uvd_v5_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
153	uvd_v5_0_enable_mgcg(adev, true);
154
155	ring->ready = true;
156	r = amdgpu_ring_test_ring(ring);
157	if (r) {
158		ring->ready = false;
159		goto done;
160	}
161
162	r = amdgpu_ring_alloc(ring, 10);
163	if (r) {
164		DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r);
165		goto done;
166	}
167
168	tmp = PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0);
169	amdgpu_ring_write(ring, tmp);
170	amdgpu_ring_write(ring, 0xFFFFF);
171
172	tmp = PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0);
173	amdgpu_ring_write(ring, tmp);
174	amdgpu_ring_write(ring, 0xFFFFF);
175
176	tmp = PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0);
177	amdgpu_ring_write(ring, tmp);
178	amdgpu_ring_write(ring, 0xFFFFF);
179
180	/* Clear timeout status bits */
181	amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0));
182	amdgpu_ring_write(ring, 0x8);
183
184	amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0));
185	amdgpu_ring_write(ring, 3);
186
187	amdgpu_ring_commit(ring);
188
189done:
190	if (!r)
191		DRM_INFO("UVD initialized successfully.\n");
192
193	return r;
194
195}
196
197/**
198 * uvd_v5_0_hw_fini - stop the hardware block
199 *
200 * @adev: amdgpu_device pointer
201 *
202 * Stop the UVD block, mark ring as not ready any more
203 */
204static int uvd_v5_0_hw_fini(void *handle)
205{
206	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
207	struct amdgpu_ring *ring = &adev->uvd.ring;
208
209	if (RREG32(mmUVD_STATUS) != 0)
210		uvd_v5_0_stop(adev);
211
212	ring->ready = false;
213
214	return 0;
215}
216
217static int uvd_v5_0_suspend(void *handle)
218{
219	int r;
220	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
221
222	r = uvd_v5_0_hw_fini(adev);
223	if (r)
224		return r;
225	uvd_v5_0_set_clockgating_state(adev, AMD_CG_STATE_GATE);
226
227	return amdgpu_uvd_suspend(adev);
228}
229
230static int uvd_v5_0_resume(void *handle)
231{
232	int r;
233	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
234
235	r = amdgpu_uvd_resume(adev);
236	if (r)
237		return r;
238
239	return uvd_v5_0_hw_init(adev);
240}
241
242/**
243 * uvd_v5_0_mc_resume - memory controller programming
244 *
245 * @adev: amdgpu_device pointer
246 *
247 * Let the UVD memory controller know it's offsets
248 */
249static void uvd_v5_0_mc_resume(struct amdgpu_device *adev)
250{
251	uint64_t offset;
252	uint32_t size;
253
254	/* programm memory controller bits 0-27 */
255	WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
256			lower_32_bits(adev->uvd.gpu_addr));
257	WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
258			upper_32_bits(adev->uvd.gpu_addr));
259
260	offset = AMDGPU_UVD_FIRMWARE_OFFSET;
261	size = AMDGPU_UVD_FIRMWARE_SIZE(adev);
262	WREG32(mmUVD_VCPU_CACHE_OFFSET0, offset >> 3);
263	WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
264
265	offset += size;
266	size = AMDGPU_UVD_HEAP_SIZE;
267	WREG32(mmUVD_VCPU_CACHE_OFFSET1, offset >> 3);
268	WREG32(mmUVD_VCPU_CACHE_SIZE1, size);
269
270	offset += size;
271	size = AMDGPU_UVD_STACK_SIZE +
272	       (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles);
273	WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3);
274	WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
275
276	WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
277	WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
278	WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
279}
280
281/**
282 * uvd_v5_0_start - start UVD block
283 *
284 * @adev: amdgpu_device pointer
285 *
286 * Setup and start the UVD block
287 */
288static int uvd_v5_0_start(struct amdgpu_device *adev)
289{
290	struct amdgpu_ring *ring = &adev->uvd.ring;
291	uint32_t rb_bufsz, tmp;
292	uint32_t lmi_swap_cntl;
293	uint32_t mp_swap_cntl;
294	int i, j, r;
295
296	/*disable DPG */
297	WREG32_P(mmUVD_POWER_STATUS, 0, ~(1 << 2));
298
299	/* disable byte swapping */
300	lmi_swap_cntl = 0;
301	mp_swap_cntl = 0;
302
303	uvd_v5_0_mc_resume(adev);
304
305	/* disable interupt */
306	WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1));
307
308	/* stall UMC and register bus before resetting VCPU */
309	WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
310	mdelay(1);
311
312	/* put LMI, VCPU, RBC etc... into reset */
313	WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
314		UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK | UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
315		UVD_SOFT_RESET__RBC_SOFT_RESET_MASK | UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
316		UVD_SOFT_RESET__CXW_SOFT_RESET_MASK | UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
317		UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
318	mdelay(5);
319
320	/* take UVD block out of reset */
321	WREG32_P(mmSRBM_SOFT_RESET, 0, ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK);
322	mdelay(5);
323
324	/* initialize UVD memory controller */
325	WREG32(mmUVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) |
326			     (1 << 21) | (1 << 9) | (1 << 20));
327
328#ifdef __BIG_ENDIAN
329	/* swap (8 in 32) RB and IB */
330	lmi_swap_cntl = 0xa;
331	mp_swap_cntl = 0;
332#endif
333	WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
334	WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
335
336	WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040);
337	WREG32(mmUVD_MPC_SET_MUXA1, 0x0);
338	WREG32(mmUVD_MPC_SET_MUXB0, 0x40c2040);
339	WREG32(mmUVD_MPC_SET_MUXB1, 0x0);
340	WREG32(mmUVD_MPC_SET_ALU, 0);
341	WREG32(mmUVD_MPC_SET_MUX, 0x88);
342
343	/* take all subblocks out of reset, except VCPU */
344	WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
345	mdelay(5);
346
347	/* enable VCPU clock */
348	WREG32(mmUVD_VCPU_CNTL,  1 << 9);
349
350	/* enable UMC */
351	WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
352
353	/* boot up the VCPU */
354	WREG32(mmUVD_SOFT_RESET, 0);
355	mdelay(10);
356
357	for (i = 0; i < 10; ++i) {
358		uint32_t status;
359		for (j = 0; j < 100; ++j) {
360			status = RREG32(mmUVD_STATUS);
361			if (status & 2)
362				break;
363			mdelay(10);
364		}
365		r = 0;
366		if (status & 2)
367			break;
368
369		DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
370		WREG32_P(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
371				~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
372		mdelay(10);
373		WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
374		mdelay(10);
375		r = -1;
376	}
377
378	if (r) {
379		DRM_ERROR("UVD not responding, giving up!!!\n");
380		return r;
381	}
382	/* enable master interrupt */
383	WREG32_P(mmUVD_MASTINT_EN, 3 << 1, ~(3 << 1));
384
385	/* clear the bit 4 of UVD_STATUS */
386	WREG32_P(mmUVD_STATUS, 0, ~(2 << 1));
387
388	rb_bufsz = order_base_2(ring->ring_size);
389	tmp = 0;
390	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
391	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
392	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
393	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
394	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
395	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
396	/* force RBC into idle state */
397	WREG32(mmUVD_RBC_RB_CNTL, tmp);
398
399	/* set the write pointer delay */
400	WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0);
401
402	/* set the wb address */
403	WREG32(mmUVD_RBC_RB_RPTR_ADDR, (upper_32_bits(ring->gpu_addr) >> 2));
404
405	/* programm the RB_BASE for ring buffer */
406	WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
407			lower_32_bits(ring->gpu_addr));
408	WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
409			upper_32_bits(ring->gpu_addr));
410
411	/* Initialize the ring buffer's read and write pointers */
412	WREG32(mmUVD_RBC_RB_RPTR, 0);
413
414	ring->wptr = RREG32(mmUVD_RBC_RB_RPTR);
415	WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
416
417	WREG32_P(mmUVD_RBC_RB_CNTL, 0, ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
418
419	return 0;
420}
421
422/**
423 * uvd_v5_0_stop - stop UVD block
424 *
425 * @adev: amdgpu_device pointer
426 *
427 * stop the UVD block
428 */
429static void uvd_v5_0_stop(struct amdgpu_device *adev)
430{
431	/* force RBC into idle state */
432	WREG32(mmUVD_RBC_RB_CNTL, 0x11010101);
433
434	/* Stall UMC and register bus before resetting VCPU */
435	WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
436	mdelay(1);
437
438	/* put VCPU into reset */
439	WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
440	mdelay(5);
441
442	/* disable VCPU clock */
443	WREG32(mmUVD_VCPU_CNTL, 0x0);
444
445	/* Unstall UMC and register bus */
446	WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
447
448	WREG32(mmUVD_STATUS, 0);
449}
450
451/**
452 * uvd_v5_0_ring_emit_fence - emit an fence & trap command
453 *
454 * @ring: amdgpu_ring pointer
455 * @fence: fence to emit
456 *
457 * Write a fence and a trap command to the ring.
458 */
459static void uvd_v5_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
460				     unsigned flags)
461{
462	WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
463
464	amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
465	amdgpu_ring_write(ring, seq);
466	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
467	amdgpu_ring_write(ring, addr & 0xffffffff);
468	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
469	amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
470	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
471	amdgpu_ring_write(ring, 0);
472
473	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
474	amdgpu_ring_write(ring, 0);
475	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
476	amdgpu_ring_write(ring, 0);
477	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
478	amdgpu_ring_write(ring, 2);
479}
480
481/**
482 * uvd_v5_0_ring_test_ring - register write test
483 *
484 * @ring: amdgpu_ring pointer
485 *
486 * Test if we can successfully write to the context register
487 */
488static int uvd_v5_0_ring_test_ring(struct amdgpu_ring *ring)
489{
490	struct amdgpu_device *adev = ring->adev;
491	uint32_t tmp = 0;
492	unsigned i;
493	int r;
494
495	WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD);
496	r = amdgpu_ring_alloc(ring, 3);
497	if (r) {
498		DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
499			  ring->idx, r);
500		return r;
501	}
502	amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
503	amdgpu_ring_write(ring, 0xDEADBEEF);
504	amdgpu_ring_commit(ring);
505	for (i = 0; i < adev->usec_timeout; i++) {
506		tmp = RREG32(mmUVD_CONTEXT_ID);
507		if (tmp == 0xDEADBEEF)
508			break;
509		DRM_UDELAY(1);
510	}
511
512	if (i < adev->usec_timeout) {
513		DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
514			 ring->idx, i);
515	} else {
516		DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
517			  ring->idx, tmp);
518		r = -EINVAL;
519	}
520	return r;
521}
522
523/**
524 * uvd_v5_0_ring_emit_ib - execute indirect buffer
525 *
526 * @ring: amdgpu_ring pointer
527 * @ib: indirect buffer to execute
528 *
529 * Write ring commands to execute the indirect buffer
530 */
531static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
 
532				  struct amdgpu_ib *ib,
533				  unsigned vmid, bool ctx_switch)
534{
535	amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0));
536	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
537	amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH, 0));
538	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
539	amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0));
540	amdgpu_ring_write(ring, ib->length_dw);
541}
542
 
 
 
 
 
 
 
 
 
 
 
 
543static bool uvd_v5_0_is_idle(void *handle)
544{
545	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
546
547	return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
548}
549
550static int uvd_v5_0_wait_for_idle(void *handle)
551{
552	unsigned i;
553	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
554
555	for (i = 0; i < adev->usec_timeout; i++) {
556		if (!(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK))
557			return 0;
558	}
559	return -ETIMEDOUT;
560}
561
562static int uvd_v5_0_soft_reset(void *handle)
563{
564	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
565
566	uvd_v5_0_stop(adev);
567
568	WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK,
569			~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK);
570	mdelay(5);
571
572	return uvd_v5_0_start(adev);
573}
574
575static int uvd_v5_0_set_interrupt_state(struct amdgpu_device *adev,
576					struct amdgpu_irq_src *source,
577					unsigned type,
578					enum amdgpu_interrupt_state state)
579{
580	// TODO
581	return 0;
582}
583
584static int uvd_v5_0_process_interrupt(struct amdgpu_device *adev,
585				      struct amdgpu_irq_src *source,
586				      struct amdgpu_iv_entry *entry)
587{
588	DRM_DEBUG("IH: UVD TRAP\n");
589	amdgpu_fence_process(&adev->uvd.ring);
590	return 0;
591}
592
593static void uvd_v5_0_enable_clock_gating(struct amdgpu_device *adev, bool enable)
594{
595	uint32_t data1, data3, suvd_flags;
596
597	data1 = RREG32(mmUVD_SUVD_CGC_GATE);
598	data3 = RREG32(mmUVD_CGC_GATE);
599
600	suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
601		     UVD_SUVD_CGC_GATE__SIT_MASK |
602		     UVD_SUVD_CGC_GATE__SMP_MASK |
603		     UVD_SUVD_CGC_GATE__SCM_MASK |
604		     UVD_SUVD_CGC_GATE__SDB_MASK;
605
606	if (enable) {
607		data3 |= (UVD_CGC_GATE__SYS_MASK     |
608			UVD_CGC_GATE__UDEC_MASK      |
609			UVD_CGC_GATE__MPEG2_MASK     |
610			UVD_CGC_GATE__RBC_MASK       |
611			UVD_CGC_GATE__LMI_MC_MASK    |
612			UVD_CGC_GATE__IDCT_MASK      |
613			UVD_CGC_GATE__MPRD_MASK      |
614			UVD_CGC_GATE__MPC_MASK       |
615			UVD_CGC_GATE__LBSI_MASK      |
616			UVD_CGC_GATE__LRBBM_MASK     |
617			UVD_CGC_GATE__UDEC_RE_MASK   |
618			UVD_CGC_GATE__UDEC_CM_MASK   |
619			UVD_CGC_GATE__UDEC_IT_MASK   |
620			UVD_CGC_GATE__UDEC_DB_MASK   |
621			UVD_CGC_GATE__UDEC_MP_MASK   |
622			UVD_CGC_GATE__WCB_MASK       |
623			UVD_CGC_GATE__JPEG_MASK      |
624			UVD_CGC_GATE__SCPU_MASK);
625		/* only in pg enabled, we can gate clock to vcpu*/
626		if (adev->pg_flags & AMD_PG_SUPPORT_UVD)
627			data3 |= UVD_CGC_GATE__VCPU_MASK;
628		data3 &= ~UVD_CGC_GATE__REGS_MASK;
629		data1 |= suvd_flags;
630	} else {
631		data3 = 0;
632		data1 = 0;
633	}
634
635	WREG32(mmUVD_SUVD_CGC_GATE, data1);
636	WREG32(mmUVD_CGC_GATE, data3);
637}
638
639static void uvd_v5_0_set_sw_clock_gating(struct amdgpu_device *adev)
640{
641	uint32_t data, data2;
642
643	data = RREG32(mmUVD_CGC_CTRL);
644	data2 = RREG32(mmUVD_SUVD_CGC_CTRL);
645
646
647	data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK |
648		  UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
649
650
651	data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
652		(1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) |
653		(4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY));
654
655	data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
656			UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
657			UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
658			UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
659			UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
660			UVD_CGC_CTRL__SYS_MODE_MASK |
661			UVD_CGC_CTRL__UDEC_MODE_MASK |
662			UVD_CGC_CTRL__MPEG2_MODE_MASK |
663			UVD_CGC_CTRL__REGS_MODE_MASK |
664			UVD_CGC_CTRL__RBC_MODE_MASK |
665			UVD_CGC_CTRL__LMI_MC_MODE_MASK |
666			UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
667			UVD_CGC_CTRL__IDCT_MODE_MASK |
668			UVD_CGC_CTRL__MPRD_MODE_MASK |
669			UVD_CGC_CTRL__MPC_MODE_MASK |
670			UVD_CGC_CTRL__LBSI_MODE_MASK |
671			UVD_CGC_CTRL__LRBBM_MODE_MASK |
672			UVD_CGC_CTRL__WCB_MODE_MASK |
673			UVD_CGC_CTRL__VCPU_MODE_MASK |
674			UVD_CGC_CTRL__JPEG_MODE_MASK |
675			UVD_CGC_CTRL__SCPU_MODE_MASK);
676	data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK |
677			UVD_SUVD_CGC_CTRL__SIT_MODE_MASK |
678			UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
679			UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
680			UVD_SUVD_CGC_CTRL__SDB_MODE_MASK);
681
682	WREG32(mmUVD_CGC_CTRL, data);
683	WREG32(mmUVD_SUVD_CGC_CTRL, data2);
684}
685
686#if 0
687static void uvd_v5_0_set_hw_clock_gating(struct amdgpu_device *adev)
688{
689	uint32_t data, data1, cgc_flags, suvd_flags;
690
691	data = RREG32(mmUVD_CGC_GATE);
692	data1 = RREG32(mmUVD_SUVD_CGC_GATE);
693
694	cgc_flags = UVD_CGC_GATE__SYS_MASK |
695				UVD_CGC_GATE__UDEC_MASK |
696				UVD_CGC_GATE__MPEG2_MASK |
697				UVD_CGC_GATE__RBC_MASK |
698				UVD_CGC_GATE__LMI_MC_MASK |
699				UVD_CGC_GATE__IDCT_MASK |
700				UVD_CGC_GATE__MPRD_MASK |
701				UVD_CGC_GATE__MPC_MASK |
702				UVD_CGC_GATE__LBSI_MASK |
703				UVD_CGC_GATE__LRBBM_MASK |
704				UVD_CGC_GATE__UDEC_RE_MASK |
705				UVD_CGC_GATE__UDEC_CM_MASK |
706				UVD_CGC_GATE__UDEC_IT_MASK |
707				UVD_CGC_GATE__UDEC_DB_MASK |
708				UVD_CGC_GATE__UDEC_MP_MASK |
709				UVD_CGC_GATE__WCB_MASK |
710				UVD_CGC_GATE__VCPU_MASK |
711				UVD_CGC_GATE__SCPU_MASK;
712
713	suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
714				UVD_SUVD_CGC_GATE__SIT_MASK |
715				UVD_SUVD_CGC_GATE__SMP_MASK |
716				UVD_SUVD_CGC_GATE__SCM_MASK |
717				UVD_SUVD_CGC_GATE__SDB_MASK;
718
719	data |= cgc_flags;
720	data1 |= suvd_flags;
721
722	WREG32(mmUVD_CGC_GATE, data);
723	WREG32(mmUVD_SUVD_CGC_GATE, data1);
724}
725#endif
726
727static void uvd_v5_0_enable_mgcg(struct amdgpu_device *adev,
728				 bool enable)
729{
730	u32 orig, data;
731
732	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) {
733		data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
734		data |= 0xfff;
735		WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
736
737		orig = data = RREG32(mmUVD_CGC_CTRL);
738		data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
739		if (orig != data)
740			WREG32(mmUVD_CGC_CTRL, data);
741	} else {
742		data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
743		data &= ~0xfff;
744		WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
745
746		orig = data = RREG32(mmUVD_CGC_CTRL);
747		data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
748		if (orig != data)
749			WREG32(mmUVD_CGC_CTRL, data);
750	}
751}
752
753static int uvd_v5_0_set_clockgating_state(void *handle,
754					  enum amd_clockgating_state state)
755{
756	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
757	bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
758
759	if (enable) {
760		/* wait for STATUS to clear */
761		if (uvd_v5_0_wait_for_idle(handle))
762			return -EBUSY;
763		uvd_v5_0_enable_clock_gating(adev, true);
764
765		/* enable HW gates because UVD is idle */
766/*		uvd_v5_0_set_hw_clock_gating(adev); */
767	} else {
768		uvd_v5_0_enable_clock_gating(adev, false);
769	}
770
771	uvd_v5_0_set_sw_clock_gating(adev);
772	return 0;
773}
774
775static int uvd_v5_0_set_powergating_state(void *handle,
776					  enum amd_powergating_state state)
777{
778	/* This doesn't actually powergate the UVD block.
779	 * That's done in the dpm code via the SMC.  This
780	 * just re-inits the block as necessary.  The actual
781	 * gating still happens in the dpm code.  We should
782	 * revisit this when there is a cleaner line between
783	 * the smc and the hw blocks
784	 */
785	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
786	int ret = 0;
787
788	if (state == AMD_PG_STATE_GATE) {
789		uvd_v5_0_stop(adev);
790	} else {
791		ret = uvd_v5_0_start(adev);
792		if (ret)
793			goto out;
794	}
795
796out:
797	return ret;
798}
799
800static void uvd_v5_0_get_clockgating_state(void *handle, u32 *flags)
801{
802	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
803	int data;
804
805	mutex_lock(&adev->pm.mutex);
806
807	if (RREG32_SMC(ixCURRENT_PG_STATUS) &
808				CURRENT_PG_STATUS__UVD_PG_STATUS_MASK) {
809		DRM_INFO("Cannot get clockgating state when UVD is powergated.\n");
810		goto out;
811	}
812
813	/* AMD_CG_SUPPORT_UVD_MGCG */
814	data = RREG32(mmUVD_CGC_CTRL);
815	if (data & UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK)
816		*flags |= AMD_CG_SUPPORT_UVD_MGCG;
817
818out:
819	mutex_unlock(&adev->pm.mutex);
820}
821
822static const struct amd_ip_funcs uvd_v5_0_ip_funcs = {
823	.name = "uvd_v5_0",
824	.early_init = uvd_v5_0_early_init,
825	.late_init = NULL,
826	.sw_init = uvd_v5_0_sw_init,
827	.sw_fini = uvd_v5_0_sw_fini,
828	.hw_init = uvd_v5_0_hw_init,
829	.hw_fini = uvd_v5_0_hw_fini,
830	.suspend = uvd_v5_0_suspend,
831	.resume = uvd_v5_0_resume,
832	.is_idle = uvd_v5_0_is_idle,
833	.wait_for_idle = uvd_v5_0_wait_for_idle,
834	.soft_reset = uvd_v5_0_soft_reset,
835	.set_clockgating_state = uvd_v5_0_set_clockgating_state,
836	.set_powergating_state = uvd_v5_0_set_powergating_state,
837	.get_clockgating_state = uvd_v5_0_get_clockgating_state,
838};
839
840static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = {
841	.type = AMDGPU_RING_TYPE_UVD,
842	.align_mask = 0xf,
843	.nop = PACKET0(mmUVD_NO_OP, 0),
844	.support_64bit_ptrs = false,
 
845	.get_rptr = uvd_v5_0_ring_get_rptr,
846	.get_wptr = uvd_v5_0_ring_get_wptr,
847	.set_wptr = uvd_v5_0_ring_set_wptr,
848	.parse_cs = amdgpu_uvd_ring_parse_cs,
849	.emit_frame_size =
850		14, /* uvd_v5_0_ring_emit_fence  x1 no user fence */
851	.emit_ib_size = 6, /* uvd_v5_0_ring_emit_ib */
852	.emit_ib = uvd_v5_0_ring_emit_ib,
853	.emit_fence = uvd_v5_0_ring_emit_fence,
854	.test_ring = uvd_v5_0_ring_test_ring,
855	.test_ib = amdgpu_uvd_ring_test_ib,
856	.insert_nop = amdgpu_ring_insert_nop,
857	.pad_ib = amdgpu_ring_generic_pad_ib,
858	.begin_use = amdgpu_uvd_ring_begin_use,
859	.end_use = amdgpu_uvd_ring_end_use,
860};
861
862static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev)
863{
864	adev->uvd.ring.funcs = &uvd_v5_0_ring_funcs;
865}
866
867static const struct amdgpu_irq_src_funcs uvd_v5_0_irq_funcs = {
868	.set = uvd_v5_0_set_interrupt_state,
869	.process = uvd_v5_0_process_interrupt,
870};
871
872static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev)
873{
874	adev->uvd.irq.num_types = 1;
875	adev->uvd.irq.funcs = &uvd_v5_0_irq_funcs;
876}
877
878const struct amdgpu_ip_block_version uvd_v5_0_ip_block =
879{
880		.type = AMD_IP_BLOCK_TYPE_UVD,
881		.major = 5,
882		.minor = 0,
883		.rev = 0,
884		.funcs = &uvd_v5_0_ip_funcs,
885};
v5.4
  1/*
  2 * Copyright 2014 Advanced Micro Devices, Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 *
 22 * Authors: Christian König <christian.koenig@amd.com>
 23 */
 24
 25#include <linux/delay.h>
 26#include <linux/firmware.h>
 27
 28#include "amdgpu.h"
 29#include "amdgpu_uvd.h"
 30#include "vid.h"
 31#include "uvd/uvd_5_0_d.h"
 32#include "uvd/uvd_5_0_sh_mask.h"
 33#include "oss/oss_2_0_d.h"
 34#include "oss/oss_2_0_sh_mask.h"
 35#include "bif/bif_5_0_d.h"
 36#include "vi.h"
 37#include "smu/smu_7_1_2_d.h"
 38#include "smu/smu_7_1_2_sh_mask.h"
 39#include "ivsrcid/ivsrcid_vislands30.h"
 40
 41static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev);
 42static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev);
 43static int uvd_v5_0_start(struct amdgpu_device *adev);
 44static void uvd_v5_0_stop(struct amdgpu_device *adev);
 45static int uvd_v5_0_set_clockgating_state(void *handle,
 46					  enum amd_clockgating_state state);
 47static void uvd_v5_0_enable_mgcg(struct amdgpu_device *adev,
 48				 bool enable);
 49/**
 50 * uvd_v5_0_ring_get_rptr - get read pointer
 51 *
 52 * @ring: amdgpu_ring pointer
 53 *
 54 * Returns the current hardware read pointer
 55 */
 56static uint64_t uvd_v5_0_ring_get_rptr(struct amdgpu_ring *ring)
 57{
 58	struct amdgpu_device *adev = ring->adev;
 59
 60	return RREG32(mmUVD_RBC_RB_RPTR);
 61}
 62
 63/**
 64 * uvd_v5_0_ring_get_wptr - get write pointer
 65 *
 66 * @ring: amdgpu_ring pointer
 67 *
 68 * Returns the current hardware write pointer
 69 */
 70static uint64_t uvd_v5_0_ring_get_wptr(struct amdgpu_ring *ring)
 71{
 72	struct amdgpu_device *adev = ring->adev;
 73
 74	return RREG32(mmUVD_RBC_RB_WPTR);
 75}
 76
 77/**
 78 * uvd_v5_0_ring_set_wptr - set write pointer
 79 *
 80 * @ring: amdgpu_ring pointer
 81 *
 82 * Commits the write pointer to the hardware
 83 */
 84static void uvd_v5_0_ring_set_wptr(struct amdgpu_ring *ring)
 85{
 86	struct amdgpu_device *adev = ring->adev;
 87
 88	WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
 89}
 90
 91static int uvd_v5_0_early_init(void *handle)
 92{
 93	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 94	adev->uvd.num_uvd_inst = 1;
 95
 96	uvd_v5_0_set_ring_funcs(adev);
 97	uvd_v5_0_set_irq_funcs(adev);
 98
 99	return 0;
100}
101
102static int uvd_v5_0_sw_init(void *handle)
103{
104	struct amdgpu_ring *ring;
105	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
106	int r;
107
108	/* UVD TRAP */
109	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_UVD_SYSTEM_MESSAGE, &adev->uvd.inst->irq);
110	if (r)
111		return r;
112
113	r = amdgpu_uvd_sw_init(adev);
114	if (r)
115		return r;
116
117	ring = &adev->uvd.inst->ring;
118	sprintf(ring->name, "uvd");
119	r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
120	if (r)
121		return r;
122
123	r = amdgpu_uvd_resume(adev);
124	if (r)
125		return r;
126
127	r = amdgpu_uvd_entity_init(adev);
 
 
128
129	return r;
130}
131
132static int uvd_v5_0_sw_fini(void *handle)
133{
134	int r;
135	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
136
137	r = amdgpu_uvd_suspend(adev);
138	if (r)
139		return r;
140
141	return amdgpu_uvd_sw_fini(adev);
142}
143
144/**
145 * uvd_v5_0_hw_init - start and test UVD block
146 *
147 * @adev: amdgpu_device pointer
148 *
149 * Initialize the hardware, boot up the VCPU and do some testing
150 */
151static int uvd_v5_0_hw_init(void *handle)
152{
153	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
154	struct amdgpu_ring *ring = &adev->uvd.inst->ring;
155	uint32_t tmp;
156	int r;
157
158	amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
159	uvd_v5_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
160	uvd_v5_0_enable_mgcg(adev, true);
161
162	r = amdgpu_ring_test_helper(ring);
163	if (r)
 
 
164		goto done;
 
165
166	r = amdgpu_ring_alloc(ring, 10);
167	if (r) {
168		DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r);
169		goto done;
170	}
171
172	tmp = PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0);
173	amdgpu_ring_write(ring, tmp);
174	amdgpu_ring_write(ring, 0xFFFFF);
175
176	tmp = PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0);
177	amdgpu_ring_write(ring, tmp);
178	amdgpu_ring_write(ring, 0xFFFFF);
179
180	tmp = PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0);
181	amdgpu_ring_write(ring, tmp);
182	amdgpu_ring_write(ring, 0xFFFFF);
183
184	/* Clear timeout status bits */
185	amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0));
186	amdgpu_ring_write(ring, 0x8);
187
188	amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0));
189	amdgpu_ring_write(ring, 3);
190
191	amdgpu_ring_commit(ring);
192
193done:
194	if (!r)
195		DRM_INFO("UVD initialized successfully.\n");
196
197	return r;
198
199}
200
201/**
202 * uvd_v5_0_hw_fini - stop the hardware block
203 *
204 * @adev: amdgpu_device pointer
205 *
206 * Stop the UVD block, mark ring as not ready any more
207 */
208static int uvd_v5_0_hw_fini(void *handle)
209{
210	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
211	struct amdgpu_ring *ring = &adev->uvd.inst->ring;
212
213	if (RREG32(mmUVD_STATUS) != 0)
214		uvd_v5_0_stop(adev);
215
216	ring->sched.ready = false;
217
218	return 0;
219}
220
221static int uvd_v5_0_suspend(void *handle)
222{
223	int r;
224	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
225
226	r = uvd_v5_0_hw_fini(adev);
227	if (r)
228		return r;
229	uvd_v5_0_set_clockgating_state(adev, AMD_CG_STATE_GATE);
230
231	return amdgpu_uvd_suspend(adev);
232}
233
234static int uvd_v5_0_resume(void *handle)
235{
236	int r;
237	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
238
239	r = amdgpu_uvd_resume(adev);
240	if (r)
241		return r;
242
243	return uvd_v5_0_hw_init(adev);
244}
245
246/**
247 * uvd_v5_0_mc_resume - memory controller programming
248 *
249 * @adev: amdgpu_device pointer
250 *
251 * Let the UVD memory controller know it's offsets
252 */
253static void uvd_v5_0_mc_resume(struct amdgpu_device *adev)
254{
255	uint64_t offset;
256	uint32_t size;
257
258	/* programm memory controller bits 0-27 */
259	WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
260			lower_32_bits(adev->uvd.inst->gpu_addr));
261	WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
262			upper_32_bits(adev->uvd.inst->gpu_addr));
263
264	offset = AMDGPU_UVD_FIRMWARE_OFFSET;
265	size = AMDGPU_UVD_FIRMWARE_SIZE(adev);
266	WREG32(mmUVD_VCPU_CACHE_OFFSET0, offset >> 3);
267	WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
268
269	offset += size;
270	size = AMDGPU_UVD_HEAP_SIZE;
271	WREG32(mmUVD_VCPU_CACHE_OFFSET1, offset >> 3);
272	WREG32(mmUVD_VCPU_CACHE_SIZE1, size);
273
274	offset += size;
275	size = AMDGPU_UVD_STACK_SIZE +
276	       (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles);
277	WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3);
278	WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
279
280	WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
281	WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
282	WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
283}
284
285/**
286 * uvd_v5_0_start - start UVD block
287 *
288 * @adev: amdgpu_device pointer
289 *
290 * Setup and start the UVD block
291 */
292static int uvd_v5_0_start(struct amdgpu_device *adev)
293{
294	struct amdgpu_ring *ring = &adev->uvd.inst->ring;
295	uint32_t rb_bufsz, tmp;
296	uint32_t lmi_swap_cntl;
297	uint32_t mp_swap_cntl;
298	int i, j, r;
299
300	/*disable DPG */
301	WREG32_P(mmUVD_POWER_STATUS, 0, ~(1 << 2));
302
303	/* disable byte swapping */
304	lmi_swap_cntl = 0;
305	mp_swap_cntl = 0;
306
307	uvd_v5_0_mc_resume(adev);
308
309	/* disable interupt */
310	WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1));
311
312	/* stall UMC and register bus before resetting VCPU */
313	WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
314	mdelay(1);
315
316	/* put LMI, VCPU, RBC etc... into reset */
317	WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
318		UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK | UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
319		UVD_SOFT_RESET__RBC_SOFT_RESET_MASK | UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
320		UVD_SOFT_RESET__CXW_SOFT_RESET_MASK | UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
321		UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
322	mdelay(5);
323
324	/* take UVD block out of reset */
325	WREG32_P(mmSRBM_SOFT_RESET, 0, ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK);
326	mdelay(5);
327
328	/* initialize UVD memory controller */
329	WREG32(mmUVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) |
330			     (1 << 21) | (1 << 9) | (1 << 20));
331
332#ifdef __BIG_ENDIAN
333	/* swap (8 in 32) RB and IB */
334	lmi_swap_cntl = 0xa;
335	mp_swap_cntl = 0;
336#endif
337	WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
338	WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
339
340	WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040);
341	WREG32(mmUVD_MPC_SET_MUXA1, 0x0);
342	WREG32(mmUVD_MPC_SET_MUXB0, 0x40c2040);
343	WREG32(mmUVD_MPC_SET_MUXB1, 0x0);
344	WREG32(mmUVD_MPC_SET_ALU, 0);
345	WREG32(mmUVD_MPC_SET_MUX, 0x88);
346
347	/* take all subblocks out of reset, except VCPU */
348	WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
349	mdelay(5);
350
351	/* enable VCPU clock */
352	WREG32(mmUVD_VCPU_CNTL,  1 << 9);
353
354	/* enable UMC */
355	WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
356
357	/* boot up the VCPU */
358	WREG32(mmUVD_SOFT_RESET, 0);
359	mdelay(10);
360
361	for (i = 0; i < 10; ++i) {
362		uint32_t status;
363		for (j = 0; j < 100; ++j) {
364			status = RREG32(mmUVD_STATUS);
365			if (status & 2)
366				break;
367			mdelay(10);
368		}
369		r = 0;
370		if (status & 2)
371			break;
372
373		DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
374		WREG32_P(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
375				~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
376		mdelay(10);
377		WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
378		mdelay(10);
379		r = -1;
380	}
381
382	if (r) {
383		DRM_ERROR("UVD not responding, giving up!!!\n");
384		return r;
385	}
386	/* enable master interrupt */
387	WREG32_P(mmUVD_MASTINT_EN, 3 << 1, ~(3 << 1));
388
389	/* clear the bit 4 of UVD_STATUS */
390	WREG32_P(mmUVD_STATUS, 0, ~(2 << 1));
391
392	rb_bufsz = order_base_2(ring->ring_size);
393	tmp = 0;
394	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
395	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
396	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
397	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
398	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
399	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
400	/* force RBC into idle state */
401	WREG32(mmUVD_RBC_RB_CNTL, tmp);
402
403	/* set the write pointer delay */
404	WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0);
405
406	/* set the wb address */
407	WREG32(mmUVD_RBC_RB_RPTR_ADDR, (upper_32_bits(ring->gpu_addr) >> 2));
408
409	/* programm the RB_BASE for ring buffer */
410	WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
411			lower_32_bits(ring->gpu_addr));
412	WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
413			upper_32_bits(ring->gpu_addr));
414
415	/* Initialize the ring buffer's read and write pointers */
416	WREG32(mmUVD_RBC_RB_RPTR, 0);
417
418	ring->wptr = RREG32(mmUVD_RBC_RB_RPTR);
419	WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
420
421	WREG32_P(mmUVD_RBC_RB_CNTL, 0, ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
422
423	return 0;
424}
425
426/**
427 * uvd_v5_0_stop - stop UVD block
428 *
429 * @adev: amdgpu_device pointer
430 *
431 * stop the UVD block
432 */
433static void uvd_v5_0_stop(struct amdgpu_device *adev)
434{
435	/* force RBC into idle state */
436	WREG32(mmUVD_RBC_RB_CNTL, 0x11010101);
437
438	/* Stall UMC and register bus before resetting VCPU */
439	WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
440	mdelay(1);
441
442	/* put VCPU into reset */
443	WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
444	mdelay(5);
445
446	/* disable VCPU clock */
447	WREG32(mmUVD_VCPU_CNTL, 0x0);
448
449	/* Unstall UMC and register bus */
450	WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
451
452	WREG32(mmUVD_STATUS, 0);
453}
454
455/**
456 * uvd_v5_0_ring_emit_fence - emit an fence & trap command
457 *
458 * @ring: amdgpu_ring pointer
459 * @fence: fence to emit
460 *
461 * Write a fence and a trap command to the ring.
462 */
463static void uvd_v5_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
464				     unsigned flags)
465{
466	WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
467
468	amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
469	amdgpu_ring_write(ring, seq);
470	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
471	amdgpu_ring_write(ring, addr & 0xffffffff);
472	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
473	amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
474	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
475	amdgpu_ring_write(ring, 0);
476
477	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
478	amdgpu_ring_write(ring, 0);
479	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
480	amdgpu_ring_write(ring, 0);
481	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
482	amdgpu_ring_write(ring, 2);
483}
484
485/**
486 * uvd_v5_0_ring_test_ring - register write test
487 *
488 * @ring: amdgpu_ring pointer
489 *
490 * Test if we can successfully write to the context register
491 */
492static int uvd_v5_0_ring_test_ring(struct amdgpu_ring *ring)
493{
494	struct amdgpu_device *adev = ring->adev;
495	uint32_t tmp = 0;
496	unsigned i;
497	int r;
498
499	WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD);
500	r = amdgpu_ring_alloc(ring, 3);
501	if (r)
 
 
502		return r;
 
503	amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
504	amdgpu_ring_write(ring, 0xDEADBEEF);
505	amdgpu_ring_commit(ring);
506	for (i = 0; i < adev->usec_timeout; i++) {
507		tmp = RREG32(mmUVD_CONTEXT_ID);
508		if (tmp == 0xDEADBEEF)
509			break;
510		udelay(1);
511	}
512
513	if (i >= adev->usec_timeout)
514		r = -ETIMEDOUT;
515
 
 
 
 
 
516	return r;
517}
518
519/**
520 * uvd_v5_0_ring_emit_ib - execute indirect buffer
521 *
522 * @ring: amdgpu_ring pointer
523 * @ib: indirect buffer to execute
524 *
525 * Write ring commands to execute the indirect buffer
526 */
527static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
528				  struct amdgpu_job *job,
529				  struct amdgpu_ib *ib,
530				  uint32_t flags)
531{
532	amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0));
533	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
534	amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH, 0));
535	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
536	amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0));
537	amdgpu_ring_write(ring, ib->length_dw);
538}
539
540static void uvd_v5_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
541{
542	int i;
543
544	WARN_ON(ring->wptr % 2 || count % 2);
545
546	for (i = 0; i < count / 2; i++) {
547		amdgpu_ring_write(ring, PACKET0(mmUVD_NO_OP, 0));
548		amdgpu_ring_write(ring, 0);
549	}
550}
551
552static bool uvd_v5_0_is_idle(void *handle)
553{
554	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
555
556	return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
557}
558
559static int uvd_v5_0_wait_for_idle(void *handle)
560{
561	unsigned i;
562	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
563
564	for (i = 0; i < adev->usec_timeout; i++) {
565		if (!(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK))
566			return 0;
567	}
568	return -ETIMEDOUT;
569}
570
571static int uvd_v5_0_soft_reset(void *handle)
572{
573	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
574
575	uvd_v5_0_stop(adev);
576
577	WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK,
578			~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK);
579	mdelay(5);
580
581	return uvd_v5_0_start(adev);
582}
583
584static int uvd_v5_0_set_interrupt_state(struct amdgpu_device *adev,
585					struct amdgpu_irq_src *source,
586					unsigned type,
587					enum amdgpu_interrupt_state state)
588{
589	// TODO
590	return 0;
591}
592
593static int uvd_v5_0_process_interrupt(struct amdgpu_device *adev,
594				      struct amdgpu_irq_src *source,
595				      struct amdgpu_iv_entry *entry)
596{
597	DRM_DEBUG("IH: UVD TRAP\n");
598	amdgpu_fence_process(&adev->uvd.inst->ring);
599	return 0;
600}
601
602static void uvd_v5_0_enable_clock_gating(struct amdgpu_device *adev, bool enable)
603{
604	uint32_t data1, data3, suvd_flags;
605
606	data1 = RREG32(mmUVD_SUVD_CGC_GATE);
607	data3 = RREG32(mmUVD_CGC_GATE);
608
609	suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
610		     UVD_SUVD_CGC_GATE__SIT_MASK |
611		     UVD_SUVD_CGC_GATE__SMP_MASK |
612		     UVD_SUVD_CGC_GATE__SCM_MASK |
613		     UVD_SUVD_CGC_GATE__SDB_MASK;
614
615	if (enable) {
616		data3 |= (UVD_CGC_GATE__SYS_MASK     |
617			UVD_CGC_GATE__UDEC_MASK      |
618			UVD_CGC_GATE__MPEG2_MASK     |
619			UVD_CGC_GATE__RBC_MASK       |
620			UVD_CGC_GATE__LMI_MC_MASK    |
621			UVD_CGC_GATE__IDCT_MASK      |
622			UVD_CGC_GATE__MPRD_MASK      |
623			UVD_CGC_GATE__MPC_MASK       |
624			UVD_CGC_GATE__LBSI_MASK      |
625			UVD_CGC_GATE__LRBBM_MASK     |
626			UVD_CGC_GATE__UDEC_RE_MASK   |
627			UVD_CGC_GATE__UDEC_CM_MASK   |
628			UVD_CGC_GATE__UDEC_IT_MASK   |
629			UVD_CGC_GATE__UDEC_DB_MASK   |
630			UVD_CGC_GATE__UDEC_MP_MASK   |
631			UVD_CGC_GATE__WCB_MASK       |
632			UVD_CGC_GATE__JPEG_MASK      |
633			UVD_CGC_GATE__SCPU_MASK);
634		/* only in pg enabled, we can gate clock to vcpu*/
635		if (adev->pg_flags & AMD_PG_SUPPORT_UVD)
636			data3 |= UVD_CGC_GATE__VCPU_MASK;
637		data3 &= ~UVD_CGC_GATE__REGS_MASK;
638		data1 |= suvd_flags;
639	} else {
640		data3 = 0;
641		data1 = 0;
642	}
643
644	WREG32(mmUVD_SUVD_CGC_GATE, data1);
645	WREG32(mmUVD_CGC_GATE, data3);
646}
647
648static void uvd_v5_0_set_sw_clock_gating(struct amdgpu_device *adev)
649{
650	uint32_t data, data2;
651
652	data = RREG32(mmUVD_CGC_CTRL);
653	data2 = RREG32(mmUVD_SUVD_CGC_CTRL);
654
655
656	data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK |
657		  UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
658
659
660	data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
661		(1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) |
662		(4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY));
663
664	data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
665			UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
666			UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
667			UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
668			UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
669			UVD_CGC_CTRL__SYS_MODE_MASK |
670			UVD_CGC_CTRL__UDEC_MODE_MASK |
671			UVD_CGC_CTRL__MPEG2_MODE_MASK |
672			UVD_CGC_CTRL__REGS_MODE_MASK |
673			UVD_CGC_CTRL__RBC_MODE_MASK |
674			UVD_CGC_CTRL__LMI_MC_MODE_MASK |
675			UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
676			UVD_CGC_CTRL__IDCT_MODE_MASK |
677			UVD_CGC_CTRL__MPRD_MODE_MASK |
678			UVD_CGC_CTRL__MPC_MODE_MASK |
679			UVD_CGC_CTRL__LBSI_MODE_MASK |
680			UVD_CGC_CTRL__LRBBM_MODE_MASK |
681			UVD_CGC_CTRL__WCB_MODE_MASK |
682			UVD_CGC_CTRL__VCPU_MODE_MASK |
683			UVD_CGC_CTRL__JPEG_MODE_MASK |
684			UVD_CGC_CTRL__SCPU_MODE_MASK);
685	data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK |
686			UVD_SUVD_CGC_CTRL__SIT_MODE_MASK |
687			UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
688			UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
689			UVD_SUVD_CGC_CTRL__SDB_MODE_MASK);
690
691	WREG32(mmUVD_CGC_CTRL, data);
692	WREG32(mmUVD_SUVD_CGC_CTRL, data2);
693}
694
695#if 0
696static void uvd_v5_0_set_hw_clock_gating(struct amdgpu_device *adev)
697{
698	uint32_t data, data1, cgc_flags, suvd_flags;
699
700	data = RREG32(mmUVD_CGC_GATE);
701	data1 = RREG32(mmUVD_SUVD_CGC_GATE);
702
703	cgc_flags = UVD_CGC_GATE__SYS_MASK |
704				UVD_CGC_GATE__UDEC_MASK |
705				UVD_CGC_GATE__MPEG2_MASK |
706				UVD_CGC_GATE__RBC_MASK |
707				UVD_CGC_GATE__LMI_MC_MASK |
708				UVD_CGC_GATE__IDCT_MASK |
709				UVD_CGC_GATE__MPRD_MASK |
710				UVD_CGC_GATE__MPC_MASK |
711				UVD_CGC_GATE__LBSI_MASK |
712				UVD_CGC_GATE__LRBBM_MASK |
713				UVD_CGC_GATE__UDEC_RE_MASK |
714				UVD_CGC_GATE__UDEC_CM_MASK |
715				UVD_CGC_GATE__UDEC_IT_MASK |
716				UVD_CGC_GATE__UDEC_DB_MASK |
717				UVD_CGC_GATE__UDEC_MP_MASK |
718				UVD_CGC_GATE__WCB_MASK |
719				UVD_CGC_GATE__VCPU_MASK |
720				UVD_CGC_GATE__SCPU_MASK;
721
722	suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
723				UVD_SUVD_CGC_GATE__SIT_MASK |
724				UVD_SUVD_CGC_GATE__SMP_MASK |
725				UVD_SUVD_CGC_GATE__SCM_MASK |
726				UVD_SUVD_CGC_GATE__SDB_MASK;
727
728	data |= cgc_flags;
729	data1 |= suvd_flags;
730
731	WREG32(mmUVD_CGC_GATE, data);
732	WREG32(mmUVD_SUVD_CGC_GATE, data1);
733}
734#endif
735
736static void uvd_v5_0_enable_mgcg(struct amdgpu_device *adev,
737				 bool enable)
738{
739	u32 orig, data;
740
741	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) {
742		data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
743		data |= 0xfff;
744		WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
745
746		orig = data = RREG32(mmUVD_CGC_CTRL);
747		data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
748		if (orig != data)
749			WREG32(mmUVD_CGC_CTRL, data);
750	} else {
751		data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
752		data &= ~0xfff;
753		WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
754
755		orig = data = RREG32(mmUVD_CGC_CTRL);
756		data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
757		if (orig != data)
758			WREG32(mmUVD_CGC_CTRL, data);
759	}
760}
761
762static int uvd_v5_0_set_clockgating_state(void *handle,
763					  enum amd_clockgating_state state)
764{
765	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
766	bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
767
768	if (enable) {
769		/* wait for STATUS to clear */
770		if (uvd_v5_0_wait_for_idle(handle))
771			return -EBUSY;
772		uvd_v5_0_enable_clock_gating(adev, true);
773
774		/* enable HW gates because UVD is idle */
775/*		uvd_v5_0_set_hw_clock_gating(adev); */
776	} else {
777		uvd_v5_0_enable_clock_gating(adev, false);
778	}
779
780	uvd_v5_0_set_sw_clock_gating(adev);
781	return 0;
782}
783
784static int uvd_v5_0_set_powergating_state(void *handle,
785					  enum amd_powergating_state state)
786{
787	/* This doesn't actually powergate the UVD block.
788	 * That's done in the dpm code via the SMC.  This
789	 * just re-inits the block as necessary.  The actual
790	 * gating still happens in the dpm code.  We should
791	 * revisit this when there is a cleaner line between
792	 * the smc and the hw blocks
793	 */
794	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
795	int ret = 0;
796
797	if (state == AMD_PG_STATE_GATE) {
798		uvd_v5_0_stop(adev);
799	} else {
800		ret = uvd_v5_0_start(adev);
801		if (ret)
802			goto out;
803	}
804
805out:
806	return ret;
807}
808
809static void uvd_v5_0_get_clockgating_state(void *handle, u32 *flags)
810{
811	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
812	int data;
813
814	mutex_lock(&adev->pm.mutex);
815
816	if (RREG32_SMC(ixCURRENT_PG_STATUS) &
817				CURRENT_PG_STATUS__UVD_PG_STATUS_MASK) {
818		DRM_INFO("Cannot get clockgating state when UVD is powergated.\n");
819		goto out;
820	}
821
822	/* AMD_CG_SUPPORT_UVD_MGCG */
823	data = RREG32(mmUVD_CGC_CTRL);
824	if (data & UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK)
825		*flags |= AMD_CG_SUPPORT_UVD_MGCG;
826
827out:
828	mutex_unlock(&adev->pm.mutex);
829}
830
831static const struct amd_ip_funcs uvd_v5_0_ip_funcs = {
832	.name = "uvd_v5_0",
833	.early_init = uvd_v5_0_early_init,
834	.late_init = NULL,
835	.sw_init = uvd_v5_0_sw_init,
836	.sw_fini = uvd_v5_0_sw_fini,
837	.hw_init = uvd_v5_0_hw_init,
838	.hw_fini = uvd_v5_0_hw_fini,
839	.suspend = uvd_v5_0_suspend,
840	.resume = uvd_v5_0_resume,
841	.is_idle = uvd_v5_0_is_idle,
842	.wait_for_idle = uvd_v5_0_wait_for_idle,
843	.soft_reset = uvd_v5_0_soft_reset,
844	.set_clockgating_state = uvd_v5_0_set_clockgating_state,
845	.set_powergating_state = uvd_v5_0_set_powergating_state,
846	.get_clockgating_state = uvd_v5_0_get_clockgating_state,
847};
848
849static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = {
850	.type = AMDGPU_RING_TYPE_UVD,
851	.align_mask = 0xf,
 
852	.support_64bit_ptrs = false,
853	.no_user_fence = true,
854	.get_rptr = uvd_v5_0_ring_get_rptr,
855	.get_wptr = uvd_v5_0_ring_get_wptr,
856	.set_wptr = uvd_v5_0_ring_set_wptr,
857	.parse_cs = amdgpu_uvd_ring_parse_cs,
858	.emit_frame_size =
859		14, /* uvd_v5_0_ring_emit_fence  x1 no user fence */
860	.emit_ib_size = 6, /* uvd_v5_0_ring_emit_ib */
861	.emit_ib = uvd_v5_0_ring_emit_ib,
862	.emit_fence = uvd_v5_0_ring_emit_fence,
863	.test_ring = uvd_v5_0_ring_test_ring,
864	.test_ib = amdgpu_uvd_ring_test_ib,
865	.insert_nop = uvd_v5_0_ring_insert_nop,
866	.pad_ib = amdgpu_ring_generic_pad_ib,
867	.begin_use = amdgpu_uvd_ring_begin_use,
868	.end_use = amdgpu_uvd_ring_end_use,
869};
870
871static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev)
872{
873	adev->uvd.inst->ring.funcs = &uvd_v5_0_ring_funcs;
874}
875
876static const struct amdgpu_irq_src_funcs uvd_v5_0_irq_funcs = {
877	.set = uvd_v5_0_set_interrupt_state,
878	.process = uvd_v5_0_process_interrupt,
879};
880
881static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev)
882{
883	adev->uvd.inst->irq.num_types = 1;
884	adev->uvd.inst->irq.funcs = &uvd_v5_0_irq_funcs;
885}
886
887const struct amdgpu_ip_block_version uvd_v5_0_ip_block =
888{
889		.type = AMD_IP_BLOCK_TYPE_UVD,
890		.major = 5,
891		.minor = 0,
892		.rev = 0,
893		.funcs = &uvd_v5_0_ip_funcs,
894};