Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1/*
  2 * Copyright 2013 Advanced Micro Devices, Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 *
 22 * Authors: Christian König <christian.koenig@amd.com>
 23 */
 24
 25#include <linux/firmware.h>
 26#include <drm/drmP.h>
 27#include "amdgpu.h"
 28#include "amdgpu_uvd.h"
 29#include "cikd.h"
 30
 31#include "uvd/uvd_4_2_d.h"
 32#include "uvd/uvd_4_2_sh_mask.h"
 33
 34#include "oss/oss_2_0_d.h"
 35#include "oss/oss_2_0_sh_mask.h"
 36
 37#include "bif/bif_4_1_d.h"
 38
 39#include "smu/smu_7_0_1_d.h"
 40#include "smu/smu_7_0_1_sh_mask.h"
 41
 42static void uvd_v4_2_mc_resume(struct amdgpu_device *adev);
 43static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev);
 44static void uvd_v4_2_set_irq_funcs(struct amdgpu_device *adev);
 45static int uvd_v4_2_start(struct amdgpu_device *adev);
 46static void uvd_v4_2_stop(struct amdgpu_device *adev);
 47static int uvd_v4_2_set_clockgating_state(void *handle,
 48				enum amd_clockgating_state state);
 49static void uvd_v4_2_set_dcm(struct amdgpu_device *adev,
 50			     bool sw_mode);
 51/**
 52 * uvd_v4_2_ring_get_rptr - get read pointer
 53 *
 54 * @ring: amdgpu_ring pointer
 55 *
 56 * Returns the current hardware read pointer
 57 */
 58static uint32_t uvd_v4_2_ring_get_rptr(struct amdgpu_ring *ring)
 59{
 60	struct amdgpu_device *adev = ring->adev;
 61
 62	return RREG32(mmUVD_RBC_RB_RPTR);
 63}
 64
 65/**
 66 * uvd_v4_2_ring_get_wptr - get write pointer
 67 *
 68 * @ring: amdgpu_ring pointer
 69 *
 70 * Returns the current hardware write pointer
 71 */
 72static uint32_t uvd_v4_2_ring_get_wptr(struct amdgpu_ring *ring)
 73{
 74	struct amdgpu_device *adev = ring->adev;
 75
 76	return RREG32(mmUVD_RBC_RB_WPTR);
 77}
 78
 79/**
 80 * uvd_v4_2_ring_set_wptr - set write pointer
 81 *
 82 * @ring: amdgpu_ring pointer
 83 *
 84 * Commits the write pointer to the hardware
 85 */
 86static void uvd_v4_2_ring_set_wptr(struct amdgpu_ring *ring)
 87{
 88	struct amdgpu_device *adev = ring->adev;
 89
 90	WREG32(mmUVD_RBC_RB_WPTR, ring->wptr);
 91}
 92
 93static int uvd_v4_2_early_init(void *handle)
 94{
 95	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 96
 97	uvd_v4_2_set_ring_funcs(adev);
 98	uvd_v4_2_set_irq_funcs(adev);
 99
100	return 0;
101}
102
103static int uvd_v4_2_sw_init(void *handle)
104{
105	struct amdgpu_ring *ring;
106	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
107	int r;
108
109	/* UVD TRAP */
110	r = amdgpu_irq_add_id(adev, 124, &adev->uvd.irq);
111	if (r)
112		return r;
113
114	r = amdgpu_uvd_sw_init(adev);
115	if (r)
116		return r;
117
118	r = amdgpu_uvd_resume(adev);
119	if (r)
120		return r;
121
122	ring = &adev->uvd.ring;
123	sprintf(ring->name, "uvd");
124	r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0);
125
126	return r;
127}
128
129static int uvd_v4_2_sw_fini(void *handle)
130{
131	int r;
132	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
133
134	r = amdgpu_uvd_suspend(adev);
135	if (r)
136		return r;
137
138	r = amdgpu_uvd_sw_fini(adev);
139	if (r)
140		return r;
141
142	return r;
143}
144static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev,
145				 bool enable);
146/**
147 * uvd_v4_2_hw_init - start and test UVD block
148 *
149 * @adev: amdgpu_device pointer
150 *
151 * Initialize the hardware, boot up the VCPU and do some testing
152 */
153static int uvd_v4_2_hw_init(void *handle)
154{
155	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
156	struct amdgpu_ring *ring = &adev->uvd.ring;
157	uint32_t tmp;
158	int r;
159
160	uvd_v4_2_enable_mgcg(adev, true);
161	amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
162	r = uvd_v4_2_start(adev);
163	if (r)
164		goto done;
165
166	ring->ready = true;
167	r = amdgpu_ring_test_ring(ring);
168	if (r) {
169		ring->ready = false;
170		goto done;
171	}
172
173	r = amdgpu_ring_alloc(ring, 10);
174	if (r) {
175		DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r);
176		goto done;
177	}
178
179	tmp = PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0);
180	amdgpu_ring_write(ring, tmp);
181	amdgpu_ring_write(ring, 0xFFFFF);
182
183	tmp = PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0);
184	amdgpu_ring_write(ring, tmp);
185	amdgpu_ring_write(ring, 0xFFFFF);
186
187	tmp = PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0);
188	amdgpu_ring_write(ring, tmp);
189	amdgpu_ring_write(ring, 0xFFFFF);
190
191	/* Clear timeout status bits */
192	amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0));
193	amdgpu_ring_write(ring, 0x8);
194
195	amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0));
196	amdgpu_ring_write(ring, 3);
197
198	amdgpu_ring_commit(ring);
199
200done:
201
202	if (!r)
203		DRM_INFO("UVD initialized successfully.\n");
204
205	return r;
206}
207
208/**
209 * uvd_v4_2_hw_fini - stop the hardware block
210 *
211 * @adev: amdgpu_device pointer
212 *
213 * Stop the UVD block, mark ring as not ready any more
214 */
215static int uvd_v4_2_hw_fini(void *handle)
216{
217	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
218	struct amdgpu_ring *ring = &adev->uvd.ring;
219
220	uvd_v4_2_stop(adev);
221	ring->ready = false;
222
223	return 0;
224}
225
226static int uvd_v4_2_suspend(void *handle)
227{
228	int r;
229	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
230
231	r = uvd_v4_2_hw_fini(adev);
232	if (r)
233		return r;
234
235	r = amdgpu_uvd_suspend(adev);
236	if (r)
237		return r;
238
239	return r;
240}
241
242static int uvd_v4_2_resume(void *handle)
243{
244	int r;
245	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
246
247	r = amdgpu_uvd_resume(adev);
248	if (r)
249		return r;
250
251	r = uvd_v4_2_hw_init(adev);
252	if (r)
253		return r;
254
255	return r;
256}
257
258/**
259 * uvd_v4_2_start - start UVD block
260 *
261 * @adev: amdgpu_device pointer
262 *
263 * Setup and start the UVD block
264 */
265static int uvd_v4_2_start(struct amdgpu_device *adev)
266{
267	struct amdgpu_ring *ring = &adev->uvd.ring;
268	uint32_t rb_bufsz;
269	int i, j, r;
270	/* disable byte swapping */
271	u32 lmi_swap_cntl = 0;
272	u32 mp_swap_cntl = 0;
273
274	WREG32(mmUVD_CGC_GATE, 0);
275	uvd_v4_2_set_dcm(adev, true);
276
277	uvd_v4_2_mc_resume(adev);
278
279	/* disable interupt */
280	WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1));
281
282	/* Stall UMC and register bus before resetting VCPU */
283	WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
284	mdelay(1);
285
286	/* put LMI, VCPU, RBC etc... into reset */
287	WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
288		UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK | UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
289		UVD_SOFT_RESET__RBC_SOFT_RESET_MASK | UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
290		UVD_SOFT_RESET__CXW_SOFT_RESET_MASK | UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
291		UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
292	mdelay(5);
293
294	/* take UVD block out of reset */
295	WREG32_P(mmSRBM_SOFT_RESET, 0, ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK);
296	mdelay(5);
297
298	/* initialize UVD memory controller */
299	WREG32(mmUVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) |
300			     (1 << 21) | (1 << 9) | (1 << 20));
301
302#ifdef __BIG_ENDIAN
303	/* swap (8 in 32) RB and IB */
304	lmi_swap_cntl = 0xa;
305	mp_swap_cntl = 0;
306#endif
307	WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
308	WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
309
310	WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040);
311	WREG32(mmUVD_MPC_SET_MUXA1, 0x0);
312	WREG32(mmUVD_MPC_SET_MUXB0, 0x40c2040);
313	WREG32(mmUVD_MPC_SET_MUXB1, 0x0);
314	WREG32(mmUVD_MPC_SET_ALU, 0);
315	WREG32(mmUVD_MPC_SET_MUX, 0x88);
316
317	/* take all subblocks out of reset, except VCPU */
318	WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
319	mdelay(5);
320
321	/* enable VCPU clock */
322	WREG32(mmUVD_VCPU_CNTL,  1 << 9);
323
324	/* enable UMC */
325	WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
326
327	/* boot up the VCPU */
328	WREG32(mmUVD_SOFT_RESET, 0);
329	mdelay(10);
330
331	for (i = 0; i < 10; ++i) {
332		uint32_t status;
333		for (j = 0; j < 100; ++j) {
334			status = RREG32(mmUVD_STATUS);
335			if (status & 2)
336				break;
337			mdelay(10);
338		}
339		r = 0;
340		if (status & 2)
341			break;
342
343		DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
344		WREG32_P(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
345				~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
346		mdelay(10);
347		WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
348		mdelay(10);
349		r = -1;
350	}
351
352	if (r) {
353		DRM_ERROR("UVD not responding, giving up!!!\n");
354		return r;
355	}
356
357	/* enable interupt */
358	WREG32_P(mmUVD_MASTINT_EN, 3<<1, ~(3 << 1));
359
360	/* force RBC into idle state */
361	WREG32(mmUVD_RBC_RB_CNTL, 0x11010101);
362
363	/* Set the write pointer delay */
364	WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0);
365
366	/* programm the 4GB memory segment for rptr and ring buffer */
367	WREG32(mmUVD_LMI_EXT40_ADDR, upper_32_bits(ring->gpu_addr) |
368				   (0x7 << 16) | (0x1 << 31));
369
370	/* Initialize the ring buffer's read and write pointers */
371	WREG32(mmUVD_RBC_RB_RPTR, 0x0);
372
373	ring->wptr = RREG32(mmUVD_RBC_RB_RPTR);
374	WREG32(mmUVD_RBC_RB_WPTR, ring->wptr);
375
376	/* set the ring address */
377	WREG32(mmUVD_RBC_RB_BASE, ring->gpu_addr);
378
379	/* Set ring buffer size */
380	rb_bufsz = order_base_2(ring->ring_size);
381	rb_bufsz = (0x1 << 8) | rb_bufsz;
382	WREG32_P(mmUVD_RBC_RB_CNTL, rb_bufsz, ~0x11f1f);
383
384	return 0;
385}
386
387/**
388 * uvd_v4_2_stop - stop UVD block
389 *
390 * @adev: amdgpu_device pointer
391 *
392 * stop the UVD block
393 */
394static void uvd_v4_2_stop(struct amdgpu_device *adev)
395{
396	/* force RBC into idle state */
397	WREG32(mmUVD_RBC_RB_CNTL, 0x11010101);
398
399	/* Stall UMC and register bus before resetting VCPU */
400	WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
401	mdelay(1);
402
403	/* put VCPU into reset */
404	WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
405	mdelay(5);
406
407	/* disable VCPU clock */
408	WREG32(mmUVD_VCPU_CNTL, 0x0);
409
410	/* Unstall UMC and register bus */
411	WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
412
413	uvd_v4_2_set_dcm(adev, false);
414}
415
416/**
417 * uvd_v4_2_ring_emit_fence - emit an fence & trap command
418 *
419 * @ring: amdgpu_ring pointer
420 * @fence: fence to emit
421 *
422 * Write a fence and a trap command to the ring.
423 */
424static void uvd_v4_2_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
425				     unsigned flags)
426{
427	WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
428
429	amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
430	amdgpu_ring_write(ring, seq);
431	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
432	amdgpu_ring_write(ring, addr & 0xffffffff);
433	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
434	amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
435	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
436	amdgpu_ring_write(ring, 0);
437
438	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
439	amdgpu_ring_write(ring, 0);
440	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
441	amdgpu_ring_write(ring, 0);
442	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
443	amdgpu_ring_write(ring, 2);
444}
445
446/**
447 * uvd_v4_2_ring_emit_hdp_flush - emit an hdp flush
448 *
449 * @ring: amdgpu_ring pointer
450 *
451 * Emits an hdp flush.
452 */
453static void uvd_v4_2_ring_emit_hdp_flush(struct amdgpu_ring *ring)
454{
455	amdgpu_ring_write(ring, PACKET0(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0));
456	amdgpu_ring_write(ring, 0);
457}
458
459/**
460 * uvd_v4_2_ring_hdp_invalidate - emit an hdp invalidate
461 *
462 * @ring: amdgpu_ring pointer
463 *
464 * Emits an hdp invalidate.
465 */
466static void uvd_v4_2_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
467{
468	amdgpu_ring_write(ring, PACKET0(mmHDP_DEBUG0, 0));
469	amdgpu_ring_write(ring, 1);
470}
471
472/**
473 * uvd_v4_2_ring_test_ring - register write test
474 *
475 * @ring: amdgpu_ring pointer
476 *
477 * Test if we can successfully write to the context register
478 */
479static int uvd_v4_2_ring_test_ring(struct amdgpu_ring *ring)
480{
481	struct amdgpu_device *adev = ring->adev;
482	uint32_t tmp = 0;
483	unsigned i;
484	int r;
485
486	WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD);
487	r = amdgpu_ring_alloc(ring, 3);
488	if (r) {
489		DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
490			  ring->idx, r);
491		return r;
492	}
493	amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
494	amdgpu_ring_write(ring, 0xDEADBEEF);
495	amdgpu_ring_commit(ring);
496	for (i = 0; i < adev->usec_timeout; i++) {
497		tmp = RREG32(mmUVD_CONTEXT_ID);
498		if (tmp == 0xDEADBEEF)
499			break;
500		DRM_UDELAY(1);
501	}
502
503	if (i < adev->usec_timeout) {
504		DRM_INFO("ring test on %d succeeded in %d usecs\n",
505			 ring->idx, i);
506	} else {
507		DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
508			  ring->idx, tmp);
509		r = -EINVAL;
510	}
511	return r;
512}
513
514/**
515 * uvd_v4_2_ring_emit_ib - execute indirect buffer
516 *
517 * @ring: amdgpu_ring pointer
518 * @ib: indirect buffer to execute
519 *
520 * Write ring commands to execute the indirect buffer
521 */
522static void uvd_v4_2_ring_emit_ib(struct amdgpu_ring *ring,
523				  struct amdgpu_ib *ib,
524				  unsigned vm_id, bool ctx_switch)
525{
526	amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_BASE, 0));
527	amdgpu_ring_write(ring, ib->gpu_addr);
528	amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0));
529	amdgpu_ring_write(ring, ib->length_dw);
530}
531
532/**
533 * uvd_v4_2_mc_resume - memory controller programming
534 *
535 * @adev: amdgpu_device pointer
536 *
537 * Let the UVD memory controller know it's offsets
538 */
539static void uvd_v4_2_mc_resume(struct amdgpu_device *adev)
540{
541	uint64_t addr;
542	uint32_t size;
543
544	/* programm the VCPU memory controller bits 0-27 */
545	addr = (adev->uvd.gpu_addr + AMDGPU_UVD_FIRMWARE_OFFSET) >> 3;
546	size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4) >> 3;
547	WREG32(mmUVD_VCPU_CACHE_OFFSET0, addr);
548	WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
549
550	addr += size;
551	size = AMDGPU_UVD_HEAP_SIZE >> 3;
552	WREG32(mmUVD_VCPU_CACHE_OFFSET1, addr);
553	WREG32(mmUVD_VCPU_CACHE_SIZE1, size);
554
555	addr += size;
556	size = (AMDGPU_UVD_STACK_SIZE +
557	       (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles)) >> 3;
558	WREG32(mmUVD_VCPU_CACHE_OFFSET2, addr);
559	WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
560
561	/* bits 28-31 */
562	addr = (adev->uvd.gpu_addr >> 28) & 0xF;
563	WREG32(mmUVD_LMI_ADDR_EXT, (addr << 12) | (addr << 0));
564
565	/* bits 32-39 */
566	addr = (adev->uvd.gpu_addr >> 32) & 0xFF;
567	WREG32(mmUVD_LMI_EXT40_ADDR, addr | (0x9 << 16) | (0x1 << 31));
568
569	WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
570	WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
571	WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
572}
573
574static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev,
575				 bool enable)
576{
577	u32 orig, data;
578
579	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) {
580		data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
581		data |= 0xfff;
582		WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
583
584		orig = data = RREG32(mmUVD_CGC_CTRL);
585		data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
586		if (orig != data)
587			WREG32(mmUVD_CGC_CTRL, data);
588	} else {
589		data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
590		data &= ~0xfff;
591		WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
592
593		orig = data = RREG32(mmUVD_CGC_CTRL);
594		data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
595		if (orig != data)
596			WREG32(mmUVD_CGC_CTRL, data);
597	}
598}
599
600static void uvd_v4_2_set_dcm(struct amdgpu_device *adev,
601			     bool sw_mode)
602{
603	u32 tmp, tmp2;
604
605	WREG32_FIELD(UVD_CGC_GATE, REGS, 0);
606
607	tmp = RREG32(mmUVD_CGC_CTRL);
608	tmp &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK | UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
609	tmp |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
610		(1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT) |
611		(4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT);
612
613	if (sw_mode) {
614		tmp &= ~0x7ffff800;
615		tmp2 = UVD_CGC_CTRL2__DYN_OCLK_RAMP_EN_MASK |
616			UVD_CGC_CTRL2__DYN_RCLK_RAMP_EN_MASK |
617			(7 << UVD_CGC_CTRL2__GATER_DIV_ID__SHIFT);
618	} else {
619		tmp |= 0x7ffff800;
620		tmp2 = 0;
621	}
622
623	WREG32(mmUVD_CGC_CTRL, tmp);
624	WREG32_UVD_CTX(ixUVD_CGC_CTRL2, tmp2);
625}
626
627static bool uvd_v4_2_is_idle(void *handle)
628{
629	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
630
631	return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
632}
633
634static int uvd_v4_2_wait_for_idle(void *handle)
635{
636	unsigned i;
637	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
638
639	for (i = 0; i < adev->usec_timeout; i++) {
640		if (!(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK))
641			return 0;
642	}
643	return -ETIMEDOUT;
644}
645
646static int uvd_v4_2_soft_reset(void *handle)
647{
648	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
649
650	uvd_v4_2_stop(adev);
651
652	WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK,
653			~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK);
654	mdelay(5);
655
656	return uvd_v4_2_start(adev);
657}
658
659static int uvd_v4_2_set_interrupt_state(struct amdgpu_device *adev,
660					struct amdgpu_irq_src *source,
661					unsigned type,
662					enum amdgpu_interrupt_state state)
663{
664	// TODO
665	return 0;
666}
667
668static int uvd_v4_2_process_interrupt(struct amdgpu_device *adev,
669				      struct amdgpu_irq_src *source,
670				      struct amdgpu_iv_entry *entry)
671{
672	DRM_DEBUG("IH: UVD TRAP\n");
673	amdgpu_fence_process(&adev->uvd.ring);
674	return 0;
675}
676
677static int uvd_v4_2_set_clockgating_state(void *handle,
678					  enum amd_clockgating_state state)
679{
680	return 0;
681}
682
683static int uvd_v4_2_set_powergating_state(void *handle,
684					  enum amd_powergating_state state)
685{
686	/* This doesn't actually powergate the UVD block.
687	 * That's done in the dpm code via the SMC.  This
688	 * just re-inits the block as necessary.  The actual
689	 * gating still happens in the dpm code.  We should
690	 * revisit this when there is a cleaner line between
691	 * the smc and the hw blocks
692	 */
693	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
694
695	if (state == AMD_PG_STATE_GATE) {
696		uvd_v4_2_stop(adev);
697		return 0;
698	} else {
699		return uvd_v4_2_start(adev);
700	}
701}
702
703static const struct amd_ip_funcs uvd_v4_2_ip_funcs = {
704	.name = "uvd_v4_2",
705	.early_init = uvd_v4_2_early_init,
706	.late_init = NULL,
707	.sw_init = uvd_v4_2_sw_init,
708	.sw_fini = uvd_v4_2_sw_fini,
709	.hw_init = uvd_v4_2_hw_init,
710	.hw_fini = uvd_v4_2_hw_fini,
711	.suspend = uvd_v4_2_suspend,
712	.resume = uvd_v4_2_resume,
713	.is_idle = uvd_v4_2_is_idle,
714	.wait_for_idle = uvd_v4_2_wait_for_idle,
715	.soft_reset = uvd_v4_2_soft_reset,
716	.set_clockgating_state = uvd_v4_2_set_clockgating_state,
717	.set_powergating_state = uvd_v4_2_set_powergating_state,
718};
719
720static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = {
721	.type = AMDGPU_RING_TYPE_UVD,
722	.align_mask = 0xf,
723	.nop = PACKET0(mmUVD_NO_OP, 0),
724	.get_rptr = uvd_v4_2_ring_get_rptr,
725	.get_wptr = uvd_v4_2_ring_get_wptr,
726	.set_wptr = uvd_v4_2_ring_set_wptr,
727	.parse_cs = amdgpu_uvd_ring_parse_cs,
728	.emit_frame_size =
729		2 + /* uvd_v4_2_ring_emit_hdp_flush */
730		2 + /* uvd_v4_2_ring_emit_hdp_invalidate */
731		14, /* uvd_v4_2_ring_emit_fence  x1 no user fence */
732	.emit_ib_size = 4, /* uvd_v4_2_ring_emit_ib */
733	.emit_ib = uvd_v4_2_ring_emit_ib,
734	.emit_fence = uvd_v4_2_ring_emit_fence,
735	.emit_hdp_flush = uvd_v4_2_ring_emit_hdp_flush,
736	.emit_hdp_invalidate = uvd_v4_2_ring_emit_hdp_invalidate,
737	.test_ring = uvd_v4_2_ring_test_ring,
738	.test_ib = amdgpu_uvd_ring_test_ib,
739	.insert_nop = amdgpu_ring_insert_nop,
740	.pad_ib = amdgpu_ring_generic_pad_ib,
741	.begin_use = amdgpu_uvd_ring_begin_use,
742	.end_use = amdgpu_uvd_ring_end_use,
743};
744
745static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev)
746{
747	adev->uvd.ring.funcs = &uvd_v4_2_ring_funcs;
748}
749
750static const struct amdgpu_irq_src_funcs uvd_v4_2_irq_funcs = {
751	.set = uvd_v4_2_set_interrupt_state,
752	.process = uvd_v4_2_process_interrupt,
753};
754
755static void uvd_v4_2_set_irq_funcs(struct amdgpu_device *adev)
756{
757	adev->uvd.irq.num_types = 1;
758	adev->uvd.irq.funcs = &uvd_v4_2_irq_funcs;
759}
760
761const struct amdgpu_ip_block_version uvd_v4_2_ip_block =
762{
763		.type = AMD_IP_BLOCK_TYPE_UVD,
764		.major = 4,
765		.minor = 2,
766		.rev = 0,
767		.funcs = &uvd_v4_2_ip_funcs,
768};