Linux Audio

Check our new training course

Loading...
v5.4
  1/*
  2 * Copyright 2013 Advanced Micro Devices, Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 *
 22 * Authors: Christian König <christian.koenig@amd.com>
 23 */
 24
 25#include <linux/firmware.h>
 26
 27#include "amdgpu.h"
 28#include "amdgpu_uvd.h"
 29#include "cikd.h"
 30
 31#include "uvd/uvd_4_2_d.h"
 32#include "uvd/uvd_4_2_sh_mask.h"
 33
 34#include "oss/oss_2_0_d.h"
 35#include "oss/oss_2_0_sh_mask.h"
 36
 37#include "bif/bif_4_1_d.h"
 38
 39#include "smu/smu_7_0_1_d.h"
 40#include "smu/smu_7_0_1_sh_mask.h"
 41
 42static void uvd_v4_2_mc_resume(struct amdgpu_device *adev);
 43static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev);
 44static void uvd_v4_2_set_irq_funcs(struct amdgpu_device *adev);
 45static int uvd_v4_2_start(struct amdgpu_device *adev);
 46static void uvd_v4_2_stop(struct amdgpu_device *adev);
 47static int uvd_v4_2_set_clockgating_state(void *handle,
 48				enum amd_clockgating_state state);
 49static void uvd_v4_2_set_dcm(struct amdgpu_device *adev,
 50			     bool sw_mode);
 51/**
 52 * uvd_v4_2_ring_get_rptr - get read pointer
 53 *
 54 * @ring: amdgpu_ring pointer
 55 *
 56 * Returns the current hardware read pointer
 57 */
 58static uint64_t uvd_v4_2_ring_get_rptr(struct amdgpu_ring *ring)
 59{
 60	struct amdgpu_device *adev = ring->adev;
 61
 62	return RREG32(mmUVD_RBC_RB_RPTR);
 63}
 64
 65/**
 66 * uvd_v4_2_ring_get_wptr - get write pointer
 67 *
 68 * @ring: amdgpu_ring pointer
 69 *
 70 * Returns the current hardware write pointer
 71 */
 72static uint64_t uvd_v4_2_ring_get_wptr(struct amdgpu_ring *ring)
 73{
 74	struct amdgpu_device *adev = ring->adev;
 75
 76	return RREG32(mmUVD_RBC_RB_WPTR);
 77}
 78
 79/**
 80 * uvd_v4_2_ring_set_wptr - set write pointer
 81 *
 82 * @ring: amdgpu_ring pointer
 83 *
 84 * Commits the write pointer to the hardware
 85 */
 86static void uvd_v4_2_ring_set_wptr(struct amdgpu_ring *ring)
 87{
 88	struct amdgpu_device *adev = ring->adev;
 89
 90	WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
 91}
 92
 93static int uvd_v4_2_early_init(void *handle)
 94{
 95	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 96	adev->uvd.num_uvd_inst = 1;
 97
 98	uvd_v4_2_set_ring_funcs(adev);
 99	uvd_v4_2_set_irq_funcs(adev);
100
101	return 0;
102}
103
104static int uvd_v4_2_sw_init(void *handle)
105{
106	struct amdgpu_ring *ring;
107	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
108	int r;
109
110	/* UVD TRAP */
111	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 124, &adev->uvd.inst->irq);
112	if (r)
113		return r;
114
115	r = amdgpu_uvd_sw_init(adev);
116	if (r)
117		return r;
118
119	ring = &adev->uvd.inst->ring;
120	sprintf(ring->name, "uvd");
121	r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
 
122	if (r)
123		return r;
124
125	r = amdgpu_uvd_resume(adev);
126	if (r)
127		return r;
128
129	r = amdgpu_uvd_entity_init(adev);
130
131	return r;
132}
133
134static int uvd_v4_2_sw_fini(void *handle)
135{
136	int r;
137	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
138
139	r = amdgpu_uvd_suspend(adev);
140	if (r)
141		return r;
142
143	return amdgpu_uvd_sw_fini(adev);
144}
145
146static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev,
147				 bool enable);
148/**
149 * uvd_v4_2_hw_init - start and test UVD block
150 *
151 * @adev: amdgpu_device pointer
152 *
153 * Initialize the hardware, boot up the VCPU and do some testing
154 */
155static int uvd_v4_2_hw_init(void *handle)
156{
157	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
158	struct amdgpu_ring *ring = &adev->uvd.inst->ring;
159	uint32_t tmp;
160	int r;
161
162	uvd_v4_2_enable_mgcg(adev, true);
163	amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
164
165	r = amdgpu_ring_test_helper(ring);
166	if (r)
167		goto done;
168
169	r = amdgpu_ring_alloc(ring, 10);
170	if (r) {
171		DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r);
172		goto done;
173	}
174
175	tmp = PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0);
176	amdgpu_ring_write(ring, tmp);
177	amdgpu_ring_write(ring, 0xFFFFF);
178
179	tmp = PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0);
180	amdgpu_ring_write(ring, tmp);
181	amdgpu_ring_write(ring, 0xFFFFF);
182
183	tmp = PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0);
184	amdgpu_ring_write(ring, tmp);
185	amdgpu_ring_write(ring, 0xFFFFF);
186
187	/* Clear timeout status bits */
188	amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0));
189	amdgpu_ring_write(ring, 0x8);
190
191	amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0));
192	amdgpu_ring_write(ring, 3);
193
194	amdgpu_ring_commit(ring);
195
196done:
197	if (!r)
198		DRM_INFO("UVD initialized successfully.\n");
199
200	return r;
201}
202
203/**
204 * uvd_v4_2_hw_fini - stop the hardware block
205 *
206 * @adev: amdgpu_device pointer
207 *
208 * Stop the UVD block, mark ring as not ready any more
209 */
210static int uvd_v4_2_hw_fini(void *handle)
211{
212	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
213	struct amdgpu_ring *ring = &adev->uvd.inst->ring;
 
214
215	if (RREG32(mmUVD_STATUS) != 0)
216		uvd_v4_2_stop(adev);
217
218	ring->sched.ready = false;
219
220	return 0;
221}
222
223static int uvd_v4_2_suspend(void *handle)
224{
225	int r;
226	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
227
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
228	r = uvd_v4_2_hw_fini(adev);
229	if (r)
230		return r;
231
232	return amdgpu_uvd_suspend(adev);
233}
234
235static int uvd_v4_2_resume(void *handle)
236{
237	int r;
238	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
239
240	r = amdgpu_uvd_resume(adev);
241	if (r)
242		return r;
243
244	return uvd_v4_2_hw_init(adev);
245}
246
247/**
248 * uvd_v4_2_start - start UVD block
249 *
250 * @adev: amdgpu_device pointer
251 *
252 * Setup and start the UVD block
253 */
254static int uvd_v4_2_start(struct amdgpu_device *adev)
255{
256	struct amdgpu_ring *ring = &adev->uvd.inst->ring;
257	uint32_t rb_bufsz;
258	int i, j, r;
259	u32 tmp;
260	/* disable byte swapping */
261	u32 lmi_swap_cntl = 0;
262	u32 mp_swap_cntl = 0;
263
264	/* set uvd busy */
265	WREG32_P(mmUVD_STATUS, 1<<2, ~(1<<2));
266
267	uvd_v4_2_set_dcm(adev, true);
268	WREG32(mmUVD_CGC_GATE, 0);
269
270	/* take UVD block out of reset */
271	WREG32_P(mmSRBM_SOFT_RESET, 0, ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK);
272	mdelay(5);
273
274	/* enable VCPU clock */
275	WREG32(mmUVD_VCPU_CNTL,  1 << 9);
276
277	/* disable interupt */
278	WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1));
279
280#ifdef __BIG_ENDIAN
281	/* swap (8 in 32) RB and IB */
282	lmi_swap_cntl = 0xa;
283	mp_swap_cntl = 0;
284#endif
285	WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
286	WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
287	/* initialize UVD memory controller */
288	WREG32(mmUVD_LMI_CTRL, 0x203108);
289
290	tmp = RREG32(mmUVD_MPC_CNTL);
291	WREG32(mmUVD_MPC_CNTL, tmp | 0x10);
292
293	WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040);
294	WREG32(mmUVD_MPC_SET_MUXA1, 0x0);
295	WREG32(mmUVD_MPC_SET_MUXB0, 0x40c2040);
296	WREG32(mmUVD_MPC_SET_MUXB1, 0x0);
297	WREG32(mmUVD_MPC_SET_ALU, 0);
298	WREG32(mmUVD_MPC_SET_MUX, 0x88);
299
300	uvd_v4_2_mc_resume(adev);
301
302	tmp = RREG32_UVD_CTX(ixUVD_LMI_CACHE_CTRL);
303	WREG32_UVD_CTX(ixUVD_LMI_CACHE_CTRL, tmp & (~0x10));
304
305	/* enable UMC */
306	WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
307
308	WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK);
309
310	WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
311
312	WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
313
314	mdelay(10);
315
316	for (i = 0; i < 10; ++i) {
317		uint32_t status;
318		for (j = 0; j < 100; ++j) {
319			status = RREG32(mmUVD_STATUS);
320			if (status & 2)
321				break;
322			mdelay(10);
323		}
324		r = 0;
325		if (status & 2)
326			break;
327
328		DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
329		WREG32_P(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
330				~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
331		mdelay(10);
332		WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
333		mdelay(10);
334		r = -1;
335	}
336
337	if (r) {
338		DRM_ERROR("UVD not responding, giving up!!!\n");
339		return r;
340	}
341
342	/* enable interupt */
343	WREG32_P(mmUVD_MASTINT_EN, 3<<1, ~(3 << 1));
344
345	WREG32_P(mmUVD_STATUS, 0, ~(1<<2));
346
347	/* force RBC into idle state */
348	WREG32(mmUVD_RBC_RB_CNTL, 0x11010101);
349
350	/* Set the write pointer delay */
351	WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0);
352
353	/* programm the 4GB memory segment for rptr and ring buffer */
354	WREG32(mmUVD_LMI_EXT40_ADDR, upper_32_bits(ring->gpu_addr) |
355				   (0x7 << 16) | (0x1 << 31));
356
357	/* Initialize the ring buffer's read and write pointers */
358	WREG32(mmUVD_RBC_RB_RPTR, 0x0);
359
360	ring->wptr = RREG32(mmUVD_RBC_RB_RPTR);
361	WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
362
363	/* set the ring address */
364	WREG32(mmUVD_RBC_RB_BASE, ring->gpu_addr);
365
366	/* Set ring buffer size */
367	rb_bufsz = order_base_2(ring->ring_size);
368	rb_bufsz = (0x1 << 8) | rb_bufsz;
369	WREG32_P(mmUVD_RBC_RB_CNTL, rb_bufsz, ~0x11f1f);
370
371	return 0;
372}
373
374/**
375 * uvd_v4_2_stop - stop UVD block
376 *
377 * @adev: amdgpu_device pointer
378 *
379 * stop the UVD block
380 */
381static void uvd_v4_2_stop(struct amdgpu_device *adev)
382{
383	uint32_t i, j;
384	uint32_t status;
385
386	WREG32(mmUVD_RBC_RB_CNTL, 0x11010101);
387
388	for (i = 0; i < 10; ++i) {
389		for (j = 0; j < 100; ++j) {
390			status = RREG32(mmUVD_STATUS);
391			if (status & 2)
392				break;
393			mdelay(1);
394		}
395		if (status & 2)
396			break;
397	}
398
399	for (i = 0; i < 10; ++i) {
400		for (j = 0; j < 100; ++j) {
401			status = RREG32(mmUVD_LMI_STATUS);
402			if (status & 0xf)
403				break;
404			mdelay(1);
405		}
406		if (status & 0xf)
407			break;
408	}
409
410	/* Stall UMC and register bus before resetting VCPU */
411	WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
412
413	for (i = 0; i < 10; ++i) {
414		for (j = 0; j < 100; ++j) {
415			status = RREG32(mmUVD_LMI_STATUS);
416			if (status & 0x240)
417				break;
418			mdelay(1);
419		}
420		if (status & 0x240)
421			break;
422	}
423
424	WREG32_P(0x3D49, 0, ~(1 << 2));
425
426	WREG32_P(mmUVD_VCPU_CNTL, 0, ~(1 << 9));
427
428	/* put LMI, VCPU, RBC etc... into reset */
429	WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
430		UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
431		UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
432
433	WREG32(mmUVD_STATUS, 0);
434
435	uvd_v4_2_set_dcm(adev, false);
436}
437
438/**
439 * uvd_v4_2_ring_emit_fence - emit an fence & trap command
440 *
441 * @ring: amdgpu_ring pointer
442 * @fence: fence to emit
 
 
443 *
444 * Write a fence and a trap command to the ring.
445 */
446static void uvd_v4_2_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
447				     unsigned flags)
448{
449	WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
450
451	amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
452	amdgpu_ring_write(ring, seq);
453	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
454	amdgpu_ring_write(ring, addr & 0xffffffff);
455	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
456	amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
457	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
458	amdgpu_ring_write(ring, 0);
459
460	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
461	amdgpu_ring_write(ring, 0);
462	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
463	amdgpu_ring_write(ring, 0);
464	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
465	amdgpu_ring_write(ring, 2);
466}
467
468/**
469 * uvd_v4_2_ring_test_ring - register write test
470 *
471 * @ring: amdgpu_ring pointer
472 *
473 * Test if we can successfully write to the context register
474 */
475static int uvd_v4_2_ring_test_ring(struct amdgpu_ring *ring)
476{
477	struct amdgpu_device *adev = ring->adev;
478	uint32_t tmp = 0;
479	unsigned i;
480	int r;
481
482	WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD);
483	r = amdgpu_ring_alloc(ring, 3);
484	if (r)
485		return r;
486
487	amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
488	amdgpu_ring_write(ring, 0xDEADBEEF);
489	amdgpu_ring_commit(ring);
490	for (i = 0; i < adev->usec_timeout; i++) {
491		tmp = RREG32(mmUVD_CONTEXT_ID);
492		if (tmp == 0xDEADBEEF)
493			break;
494		udelay(1);
495	}
496
497	if (i >= adev->usec_timeout)
498		r = -ETIMEDOUT;
499
500	return r;
501}
502
503/**
504 * uvd_v4_2_ring_emit_ib - execute indirect buffer
505 *
506 * @ring: amdgpu_ring pointer
 
507 * @ib: indirect buffer to execute
 
508 *
509 * Write ring commands to execute the indirect buffer
510 */
511static void uvd_v4_2_ring_emit_ib(struct amdgpu_ring *ring,
512				  struct amdgpu_job *job,
513				  struct amdgpu_ib *ib,
514				  uint32_t flags)
515{
516	amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_BASE, 0));
517	amdgpu_ring_write(ring, ib->gpu_addr);
518	amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0));
519	amdgpu_ring_write(ring, ib->length_dw);
520}
521
522static void uvd_v4_2_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
523{
524	int i;
525
526	WARN_ON(ring->wptr % 2 || count % 2);
527
528	for (i = 0; i < count / 2; i++) {
529		amdgpu_ring_write(ring, PACKET0(mmUVD_NO_OP, 0));
530		amdgpu_ring_write(ring, 0);
531	}
532}
533
534/**
535 * uvd_v4_2_mc_resume - memory controller programming
536 *
537 * @adev: amdgpu_device pointer
538 *
539 * Let the UVD memory controller know it's offsets
540 */
541static void uvd_v4_2_mc_resume(struct amdgpu_device *adev)
542{
543	uint64_t addr;
544	uint32_t size;
545
546	/* programm the VCPU memory controller bits 0-27 */
547	addr = (adev->uvd.inst->gpu_addr + AMDGPU_UVD_FIRMWARE_OFFSET) >> 3;
548	size = AMDGPU_UVD_FIRMWARE_SIZE(adev) >> 3;
549	WREG32(mmUVD_VCPU_CACHE_OFFSET0, addr);
550	WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
551
552	addr += size;
553	size = AMDGPU_UVD_HEAP_SIZE >> 3;
554	WREG32(mmUVD_VCPU_CACHE_OFFSET1, addr);
555	WREG32(mmUVD_VCPU_CACHE_SIZE1, size);
556
557	addr += size;
558	size = (AMDGPU_UVD_STACK_SIZE +
559	       (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles)) >> 3;
560	WREG32(mmUVD_VCPU_CACHE_OFFSET2, addr);
561	WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
562
563	/* bits 28-31 */
564	addr = (adev->uvd.inst->gpu_addr >> 28) & 0xF;
565	WREG32(mmUVD_LMI_ADDR_EXT, (addr << 12) | (addr << 0));
566
567	/* bits 32-39 */
568	addr = (adev->uvd.inst->gpu_addr >> 32) & 0xFF;
569	WREG32(mmUVD_LMI_EXT40_ADDR, addr | (0x9 << 16) | (0x1 << 31));
570
571	WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
572	WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
573	WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
574}
575
576static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev,
577				 bool enable)
578{
579	u32 orig, data;
580
581	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) {
582		data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
583		data |= 0xfff;
584		WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
585
586		orig = data = RREG32(mmUVD_CGC_CTRL);
587		data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
588		if (orig != data)
589			WREG32(mmUVD_CGC_CTRL, data);
590	} else {
591		data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
592		data &= ~0xfff;
593		WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
594
595		orig = data = RREG32(mmUVD_CGC_CTRL);
596		data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
597		if (orig != data)
598			WREG32(mmUVD_CGC_CTRL, data);
599	}
600}
601
602static void uvd_v4_2_set_dcm(struct amdgpu_device *adev,
603			     bool sw_mode)
604{
605	u32 tmp, tmp2;
606
607	WREG32_FIELD(UVD_CGC_GATE, REGS, 0);
608
609	tmp = RREG32(mmUVD_CGC_CTRL);
610	tmp &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK | UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
611	tmp |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
612		(1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT) |
613		(4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT);
614
615	if (sw_mode) {
616		tmp &= ~0x7ffff800;
617		tmp2 = UVD_CGC_CTRL2__DYN_OCLK_RAMP_EN_MASK |
618			UVD_CGC_CTRL2__DYN_RCLK_RAMP_EN_MASK |
619			(7 << UVD_CGC_CTRL2__GATER_DIV_ID__SHIFT);
620	} else {
621		tmp |= 0x7ffff800;
622		tmp2 = 0;
623	}
624
625	WREG32(mmUVD_CGC_CTRL, tmp);
626	WREG32_UVD_CTX(ixUVD_CGC_CTRL2, tmp2);
627}
628
629static bool uvd_v4_2_is_idle(void *handle)
630{
631	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
632
633	return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
634}
635
636static int uvd_v4_2_wait_for_idle(void *handle)
637{
638	unsigned i;
639	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
640
641	for (i = 0; i < adev->usec_timeout; i++) {
642		if (!(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK))
643			return 0;
644	}
645	return -ETIMEDOUT;
646}
647
648static int uvd_v4_2_soft_reset(void *handle)
649{
650	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
651
652	uvd_v4_2_stop(adev);
653
654	WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK,
655			~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK);
656	mdelay(5);
657
658	return uvd_v4_2_start(adev);
659}
660
661static int uvd_v4_2_set_interrupt_state(struct amdgpu_device *adev,
662					struct amdgpu_irq_src *source,
663					unsigned type,
664					enum amdgpu_interrupt_state state)
665{
666	// TODO
667	return 0;
668}
669
670static int uvd_v4_2_process_interrupt(struct amdgpu_device *adev,
671				      struct amdgpu_irq_src *source,
672				      struct amdgpu_iv_entry *entry)
673{
674	DRM_DEBUG("IH: UVD TRAP\n");
675	amdgpu_fence_process(&adev->uvd.inst->ring);
676	return 0;
677}
678
679static int uvd_v4_2_set_clockgating_state(void *handle,
680					  enum amd_clockgating_state state)
681{
682	return 0;
683}
684
685static int uvd_v4_2_set_powergating_state(void *handle,
686					  enum amd_powergating_state state)
687{
688	/* This doesn't actually powergate the UVD block.
689	 * That's done in the dpm code via the SMC.  This
690	 * just re-inits the block as necessary.  The actual
691	 * gating still happens in the dpm code.  We should
692	 * revisit this when there is a cleaner line between
693	 * the smc and the hw blocks
694	 */
695	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
696
697	if (state == AMD_PG_STATE_GATE) {
698		uvd_v4_2_stop(adev);
699		if (adev->pg_flags & AMD_PG_SUPPORT_UVD && !adev->pm.dpm_enabled) {
700			if (!(RREG32_SMC(ixCURRENT_PG_STATUS) &
701				CURRENT_PG_STATUS__UVD_PG_STATUS_MASK)) {
702				WREG32(mmUVD_PGFSM_CONFIG, (UVD_PGFSM_CONFIG__UVD_PGFSM_FSM_ADDR_MASK   |
703							UVD_PGFSM_CONFIG__UVD_PGFSM_POWER_DOWN_MASK |
704							UVD_PGFSM_CONFIG__UVD_PGFSM_P1_SELECT_MASK));
705				mdelay(20);
706			}
707		}
708		return 0;
709	} else {
710		if (adev->pg_flags & AMD_PG_SUPPORT_UVD && !adev->pm.dpm_enabled) {
711			if (RREG32_SMC(ixCURRENT_PG_STATUS) &
712				CURRENT_PG_STATUS__UVD_PG_STATUS_MASK) {
713				WREG32(mmUVD_PGFSM_CONFIG, (UVD_PGFSM_CONFIG__UVD_PGFSM_FSM_ADDR_MASK   |
714						UVD_PGFSM_CONFIG__UVD_PGFSM_POWER_UP_MASK |
715						UVD_PGFSM_CONFIG__UVD_PGFSM_P1_SELECT_MASK));
716				mdelay(30);
717			}
718		}
719		return uvd_v4_2_start(adev);
720	}
721}
722
723static const struct amd_ip_funcs uvd_v4_2_ip_funcs = {
724	.name = "uvd_v4_2",
725	.early_init = uvd_v4_2_early_init,
726	.late_init = NULL,
727	.sw_init = uvd_v4_2_sw_init,
728	.sw_fini = uvd_v4_2_sw_fini,
729	.hw_init = uvd_v4_2_hw_init,
730	.hw_fini = uvd_v4_2_hw_fini,
731	.suspend = uvd_v4_2_suspend,
732	.resume = uvd_v4_2_resume,
733	.is_idle = uvd_v4_2_is_idle,
734	.wait_for_idle = uvd_v4_2_wait_for_idle,
735	.soft_reset = uvd_v4_2_soft_reset,
736	.set_clockgating_state = uvd_v4_2_set_clockgating_state,
737	.set_powergating_state = uvd_v4_2_set_powergating_state,
738};
739
740static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = {
741	.type = AMDGPU_RING_TYPE_UVD,
742	.align_mask = 0xf,
743	.support_64bit_ptrs = false,
744	.no_user_fence = true,
745	.get_rptr = uvd_v4_2_ring_get_rptr,
746	.get_wptr = uvd_v4_2_ring_get_wptr,
747	.set_wptr = uvd_v4_2_ring_set_wptr,
748	.parse_cs = amdgpu_uvd_ring_parse_cs,
749	.emit_frame_size =
750		14, /* uvd_v4_2_ring_emit_fence  x1 no user fence */
751	.emit_ib_size = 4, /* uvd_v4_2_ring_emit_ib */
752	.emit_ib = uvd_v4_2_ring_emit_ib,
753	.emit_fence = uvd_v4_2_ring_emit_fence,
754	.test_ring = uvd_v4_2_ring_test_ring,
755	.test_ib = amdgpu_uvd_ring_test_ib,
756	.insert_nop = uvd_v4_2_ring_insert_nop,
757	.pad_ib = amdgpu_ring_generic_pad_ib,
758	.begin_use = amdgpu_uvd_ring_begin_use,
759	.end_use = amdgpu_uvd_ring_end_use,
760};
761
762static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev)
763{
764	adev->uvd.inst->ring.funcs = &uvd_v4_2_ring_funcs;
765}
766
767static const struct amdgpu_irq_src_funcs uvd_v4_2_irq_funcs = {
768	.set = uvd_v4_2_set_interrupt_state,
769	.process = uvd_v4_2_process_interrupt,
770};
771
772static void uvd_v4_2_set_irq_funcs(struct amdgpu_device *adev)
773{
774	adev->uvd.inst->irq.num_types = 1;
775	adev->uvd.inst->irq.funcs = &uvd_v4_2_irq_funcs;
776}
777
778const struct amdgpu_ip_block_version uvd_v4_2_ip_block =
779{
780		.type = AMD_IP_BLOCK_TYPE_UVD,
781		.major = 4,
782		.minor = 2,
783		.rev = 0,
784		.funcs = &uvd_v4_2_ip_funcs,
785};
v6.2
  1/*
  2 * Copyright 2013 Advanced Micro Devices, Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 *
 22 * Authors: Christian König <christian.koenig@amd.com>
 23 */
 24
 25#include <linux/firmware.h>
 26
 27#include "amdgpu.h"
 28#include "amdgpu_uvd.h"
 29#include "cikd.h"
 30
 31#include "uvd/uvd_4_2_d.h"
 32#include "uvd/uvd_4_2_sh_mask.h"
 33
 34#include "oss/oss_2_0_d.h"
 35#include "oss/oss_2_0_sh_mask.h"
 36
 37#include "bif/bif_4_1_d.h"
 38
 39#include "smu/smu_7_0_1_d.h"
 40#include "smu/smu_7_0_1_sh_mask.h"
 41
 42static void uvd_v4_2_mc_resume(struct amdgpu_device *adev);
 43static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev);
 44static void uvd_v4_2_set_irq_funcs(struct amdgpu_device *adev);
 45static int uvd_v4_2_start(struct amdgpu_device *adev);
 46static void uvd_v4_2_stop(struct amdgpu_device *adev);
 47static int uvd_v4_2_set_clockgating_state(void *handle,
 48				enum amd_clockgating_state state);
 49static void uvd_v4_2_set_dcm(struct amdgpu_device *adev,
 50			     bool sw_mode);
 51/**
 52 * uvd_v4_2_ring_get_rptr - get read pointer
 53 *
 54 * @ring: amdgpu_ring pointer
 55 *
 56 * Returns the current hardware read pointer
 57 */
 58static uint64_t uvd_v4_2_ring_get_rptr(struct amdgpu_ring *ring)
 59{
 60	struct amdgpu_device *adev = ring->adev;
 61
 62	return RREG32(mmUVD_RBC_RB_RPTR);
 63}
 64
 65/**
 66 * uvd_v4_2_ring_get_wptr - get write pointer
 67 *
 68 * @ring: amdgpu_ring pointer
 69 *
 70 * Returns the current hardware write pointer
 71 */
 72static uint64_t uvd_v4_2_ring_get_wptr(struct amdgpu_ring *ring)
 73{
 74	struct amdgpu_device *adev = ring->adev;
 75
 76	return RREG32(mmUVD_RBC_RB_WPTR);
 77}
 78
 79/**
 80 * uvd_v4_2_ring_set_wptr - set write pointer
 81 *
 82 * @ring: amdgpu_ring pointer
 83 *
 84 * Commits the write pointer to the hardware
 85 */
 86static void uvd_v4_2_ring_set_wptr(struct amdgpu_ring *ring)
 87{
 88	struct amdgpu_device *adev = ring->adev;
 89
 90	WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
 91}
 92
 93static int uvd_v4_2_early_init(void *handle)
 94{
 95	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 96	adev->uvd.num_uvd_inst = 1;
 97
 98	uvd_v4_2_set_ring_funcs(adev);
 99	uvd_v4_2_set_irq_funcs(adev);
100
101	return 0;
102}
103
104static int uvd_v4_2_sw_init(void *handle)
105{
106	struct amdgpu_ring *ring;
107	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
108	int r;
109
110	/* UVD TRAP */
111	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 124, &adev->uvd.inst->irq);
112	if (r)
113		return r;
114
115	r = amdgpu_uvd_sw_init(adev);
116	if (r)
117		return r;
118
119	ring = &adev->uvd.inst->ring;
120	sprintf(ring->name, "uvd");
121	r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0,
122			     AMDGPU_RING_PRIO_DEFAULT, NULL);
123	if (r)
124		return r;
125
126	r = amdgpu_uvd_resume(adev);
127	if (r)
128		return r;
129
130	r = amdgpu_uvd_entity_init(adev);
131
132	return r;
133}
134
135static int uvd_v4_2_sw_fini(void *handle)
136{
137	int r;
138	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
139
140	r = amdgpu_uvd_suspend(adev);
141	if (r)
142		return r;
143
144	return amdgpu_uvd_sw_fini(adev);
145}
146
147static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev,
148				 bool enable);
149/**
150 * uvd_v4_2_hw_init - start and test UVD block
151 *
152 * @handle: handle used to pass amdgpu_device pointer
153 *
154 * Initialize the hardware, boot up the VCPU and do some testing
155 */
156static int uvd_v4_2_hw_init(void *handle)
157{
158	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
159	struct amdgpu_ring *ring = &adev->uvd.inst->ring;
160	uint32_t tmp;
161	int r;
162
163	uvd_v4_2_enable_mgcg(adev, true);
164	amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
165
166	r = amdgpu_ring_test_helper(ring);
167	if (r)
168		goto done;
169
170	r = amdgpu_ring_alloc(ring, 10);
171	if (r) {
172		DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r);
173		goto done;
174	}
175
176	tmp = PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0);
177	amdgpu_ring_write(ring, tmp);
178	amdgpu_ring_write(ring, 0xFFFFF);
179
180	tmp = PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0);
181	amdgpu_ring_write(ring, tmp);
182	amdgpu_ring_write(ring, 0xFFFFF);
183
184	tmp = PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0);
185	amdgpu_ring_write(ring, tmp);
186	amdgpu_ring_write(ring, 0xFFFFF);
187
188	/* Clear timeout status bits */
189	amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0));
190	amdgpu_ring_write(ring, 0x8);
191
192	amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0));
193	amdgpu_ring_write(ring, 3);
194
195	amdgpu_ring_commit(ring);
196
197done:
198	if (!r)
199		DRM_INFO("UVD initialized successfully.\n");
200
201	return r;
202}
203
204/**
205 * uvd_v4_2_hw_fini - stop the hardware block
206 *
207 * @handle: handle used to pass amdgpu_device pointer
208 *
209 * Stop the UVD block, mark ring as not ready any more
210 */
211static int uvd_v4_2_hw_fini(void *handle)
212{
213	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
214
215	cancel_delayed_work_sync(&adev->uvd.idle_work);
216
217	if (RREG32(mmUVD_STATUS) != 0)
218		uvd_v4_2_stop(adev);
219
 
 
220	return 0;
221}
222
223static int uvd_v4_2_suspend(void *handle)
224{
225	int r;
226	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
227
228	/*
229	 * Proper cleanups before halting the HW engine:
230	 *   - cancel the delayed idle work
231	 *   - enable powergating
232	 *   - enable clockgating
233	 *   - disable dpm
234	 *
235	 * TODO: to align with the VCN implementation, move the
236	 * jobs for clockgating/powergating/dpm setting to
237	 * ->set_powergating_state().
238	 */
239	cancel_delayed_work_sync(&adev->uvd.idle_work);
240
241	if (adev->pm.dpm_enabled) {
242		amdgpu_dpm_enable_uvd(adev, false);
243	} else {
244		amdgpu_asic_set_uvd_clocks(adev, 0, 0);
245		/* shutdown the UVD block */
246		amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
247						       AMD_PG_STATE_GATE);
248		amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
249						       AMD_CG_STATE_GATE);
250	}
251
252	r = uvd_v4_2_hw_fini(adev);
253	if (r)
254		return r;
255
256	return amdgpu_uvd_suspend(adev);
257}
258
259static int uvd_v4_2_resume(void *handle)
260{
261	int r;
262	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
263
264	r = amdgpu_uvd_resume(adev);
265	if (r)
266		return r;
267
268	return uvd_v4_2_hw_init(adev);
269}
270
271/**
272 * uvd_v4_2_start - start UVD block
273 *
274 * @adev: amdgpu_device pointer
275 *
276 * Setup and start the UVD block
277 */
278static int uvd_v4_2_start(struct amdgpu_device *adev)
279{
280	struct amdgpu_ring *ring = &adev->uvd.inst->ring;
281	uint32_t rb_bufsz;
282	int i, j, r;
283	u32 tmp;
284	/* disable byte swapping */
285	u32 lmi_swap_cntl = 0;
286	u32 mp_swap_cntl = 0;
287
288	/* set uvd busy */
289	WREG32_P(mmUVD_STATUS, 1<<2, ~(1<<2));
290
291	uvd_v4_2_set_dcm(adev, true);
292	WREG32(mmUVD_CGC_GATE, 0);
293
294	/* take UVD block out of reset */
295	WREG32_P(mmSRBM_SOFT_RESET, 0, ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK);
296	mdelay(5);
297
298	/* enable VCPU clock */
299	WREG32(mmUVD_VCPU_CNTL,  1 << 9);
300
301	/* disable interupt */
302	WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1));
303
304#ifdef __BIG_ENDIAN
305	/* swap (8 in 32) RB and IB */
306	lmi_swap_cntl = 0xa;
307	mp_swap_cntl = 0;
308#endif
309	WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
310	WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
311	/* initialize UVD memory controller */
312	WREG32(mmUVD_LMI_CTRL, 0x203108);
313
314	tmp = RREG32(mmUVD_MPC_CNTL);
315	WREG32(mmUVD_MPC_CNTL, tmp | 0x10);
316
317	WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040);
318	WREG32(mmUVD_MPC_SET_MUXA1, 0x0);
319	WREG32(mmUVD_MPC_SET_MUXB0, 0x40c2040);
320	WREG32(mmUVD_MPC_SET_MUXB1, 0x0);
321	WREG32(mmUVD_MPC_SET_ALU, 0);
322	WREG32(mmUVD_MPC_SET_MUX, 0x88);
323
324	uvd_v4_2_mc_resume(adev);
325
326	tmp = RREG32_UVD_CTX(ixUVD_LMI_CACHE_CTRL);
327	WREG32_UVD_CTX(ixUVD_LMI_CACHE_CTRL, tmp & (~0x10));
328
329	/* enable UMC */
330	WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
331
332	WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK);
333
334	WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
335
336	WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
337
338	mdelay(10);
339
340	for (i = 0; i < 10; ++i) {
341		uint32_t status;
342		for (j = 0; j < 100; ++j) {
343			status = RREG32(mmUVD_STATUS);
344			if (status & 2)
345				break;
346			mdelay(10);
347		}
348		r = 0;
349		if (status & 2)
350			break;
351
352		DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
353		WREG32_P(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
354				~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
355		mdelay(10);
356		WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
357		mdelay(10);
358		r = -1;
359	}
360
361	if (r) {
362		DRM_ERROR("UVD not responding, giving up!!!\n");
363		return r;
364	}
365
366	/* enable interupt */
367	WREG32_P(mmUVD_MASTINT_EN, 3<<1, ~(3 << 1));
368
369	WREG32_P(mmUVD_STATUS, 0, ~(1<<2));
370
371	/* force RBC into idle state */
372	WREG32(mmUVD_RBC_RB_CNTL, 0x11010101);
373
374	/* Set the write pointer delay */
375	WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0);
376
377	/* program the 4GB memory segment for rptr and ring buffer */
378	WREG32(mmUVD_LMI_EXT40_ADDR, upper_32_bits(ring->gpu_addr) |
379				   (0x7 << 16) | (0x1 << 31));
380
381	/* Initialize the ring buffer's read and write pointers */
382	WREG32(mmUVD_RBC_RB_RPTR, 0x0);
383
384	ring->wptr = RREG32(mmUVD_RBC_RB_RPTR);
385	WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
386
387	/* set the ring address */
388	WREG32(mmUVD_RBC_RB_BASE, ring->gpu_addr);
389
390	/* Set ring buffer size */
391	rb_bufsz = order_base_2(ring->ring_size);
392	rb_bufsz = (0x1 << 8) | rb_bufsz;
393	WREG32_P(mmUVD_RBC_RB_CNTL, rb_bufsz, ~0x11f1f);
394
395	return 0;
396}
397
398/**
399 * uvd_v4_2_stop - stop UVD block
400 *
401 * @adev: amdgpu_device pointer
402 *
403 * stop the UVD block
404 */
405static void uvd_v4_2_stop(struct amdgpu_device *adev)
406{
407	uint32_t i, j;
408	uint32_t status;
409
410	WREG32(mmUVD_RBC_RB_CNTL, 0x11010101);
411
412	for (i = 0; i < 10; ++i) {
413		for (j = 0; j < 100; ++j) {
414			status = RREG32(mmUVD_STATUS);
415			if (status & 2)
416				break;
417			mdelay(1);
418		}
419		if (status & 2)
420			break;
421	}
422
423	for (i = 0; i < 10; ++i) {
424		for (j = 0; j < 100; ++j) {
425			status = RREG32(mmUVD_LMI_STATUS);
426			if (status & 0xf)
427				break;
428			mdelay(1);
429		}
430		if (status & 0xf)
431			break;
432	}
433
434	/* Stall UMC and register bus before resetting VCPU */
435	WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
436
437	for (i = 0; i < 10; ++i) {
438		for (j = 0; j < 100; ++j) {
439			status = RREG32(mmUVD_LMI_STATUS);
440			if (status & 0x240)
441				break;
442			mdelay(1);
443		}
444		if (status & 0x240)
445			break;
446	}
447
448	WREG32_P(0x3D49, 0, ~(1 << 2));
449
450	WREG32_P(mmUVD_VCPU_CNTL, 0, ~(1 << 9));
451
452	/* put LMI, VCPU, RBC etc... into reset */
453	WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
454		UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
455		UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
456
457	WREG32(mmUVD_STATUS, 0);
458
459	uvd_v4_2_set_dcm(adev, false);
460}
461
462/**
463 * uvd_v4_2_ring_emit_fence - emit an fence & trap command
464 *
465 * @ring: amdgpu_ring pointer
466 * @addr: address
467 * @seq: sequence number
468 * @flags: fence related flags
469 *
470 * Write a fence and a trap command to the ring.
471 */
472static void uvd_v4_2_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
473				     unsigned flags)
474{
475	WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
476
477	amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
478	amdgpu_ring_write(ring, seq);
479	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
480	amdgpu_ring_write(ring, addr & 0xffffffff);
481	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
482	amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
483	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
484	amdgpu_ring_write(ring, 0);
485
486	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
487	amdgpu_ring_write(ring, 0);
488	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
489	amdgpu_ring_write(ring, 0);
490	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
491	amdgpu_ring_write(ring, 2);
492}
493
494/**
495 * uvd_v4_2_ring_test_ring - register write test
496 *
497 * @ring: amdgpu_ring pointer
498 *
499 * Test if we can successfully write to the context register
500 */
501static int uvd_v4_2_ring_test_ring(struct amdgpu_ring *ring)
502{
503	struct amdgpu_device *adev = ring->adev;
504	uint32_t tmp = 0;
505	unsigned i;
506	int r;
507
508	WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD);
509	r = amdgpu_ring_alloc(ring, 3);
510	if (r)
511		return r;
512
513	amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
514	amdgpu_ring_write(ring, 0xDEADBEEF);
515	amdgpu_ring_commit(ring);
516	for (i = 0; i < adev->usec_timeout; i++) {
517		tmp = RREG32(mmUVD_CONTEXT_ID);
518		if (tmp == 0xDEADBEEF)
519			break;
520		udelay(1);
521	}
522
523	if (i >= adev->usec_timeout)
524		r = -ETIMEDOUT;
525
526	return r;
527}
528
529/**
530 * uvd_v4_2_ring_emit_ib - execute indirect buffer
531 *
532 * @ring: amdgpu_ring pointer
533 * @job: iob associated with the indirect buffer
534 * @ib: indirect buffer to execute
535 * @flags: flags associated with the indirect buffer
536 *
537 * Write ring commands to execute the indirect buffer
538 */
539static void uvd_v4_2_ring_emit_ib(struct amdgpu_ring *ring,
540				  struct amdgpu_job *job,
541				  struct amdgpu_ib *ib,
542				  uint32_t flags)
543{
544	amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_BASE, 0));
545	amdgpu_ring_write(ring, ib->gpu_addr);
546	amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0));
547	amdgpu_ring_write(ring, ib->length_dw);
548}
549
550static void uvd_v4_2_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
551{
552	int i;
553
554	WARN_ON(ring->wptr % 2 || count % 2);
555
556	for (i = 0; i < count / 2; i++) {
557		amdgpu_ring_write(ring, PACKET0(mmUVD_NO_OP, 0));
558		amdgpu_ring_write(ring, 0);
559	}
560}
561
562/**
563 * uvd_v4_2_mc_resume - memory controller programming
564 *
565 * @adev: amdgpu_device pointer
566 *
567 * Let the UVD memory controller know it's offsets
568 */
569static void uvd_v4_2_mc_resume(struct amdgpu_device *adev)
570{
571	uint64_t addr;
572	uint32_t size;
573
574	/* program the VCPU memory controller bits 0-27 */
575	addr = (adev->uvd.inst->gpu_addr + AMDGPU_UVD_FIRMWARE_OFFSET) >> 3;
576	size = AMDGPU_UVD_FIRMWARE_SIZE(adev) >> 3;
577	WREG32(mmUVD_VCPU_CACHE_OFFSET0, addr);
578	WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
579
580	addr += size;
581	size = AMDGPU_UVD_HEAP_SIZE >> 3;
582	WREG32(mmUVD_VCPU_CACHE_OFFSET1, addr);
583	WREG32(mmUVD_VCPU_CACHE_SIZE1, size);
584
585	addr += size;
586	size = (AMDGPU_UVD_STACK_SIZE +
587	       (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles)) >> 3;
588	WREG32(mmUVD_VCPU_CACHE_OFFSET2, addr);
589	WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
590
591	/* bits 28-31 */
592	addr = (adev->uvd.inst->gpu_addr >> 28) & 0xF;
593	WREG32(mmUVD_LMI_ADDR_EXT, (addr << 12) | (addr << 0));
594
595	/* bits 32-39 */
596	addr = (adev->uvd.inst->gpu_addr >> 32) & 0xFF;
597	WREG32(mmUVD_LMI_EXT40_ADDR, addr | (0x9 << 16) | (0x1 << 31));
598
599	WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
600	WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
601	WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
602}
603
604static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev,
605				 bool enable)
606{
607	u32 orig, data;
608
609	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) {
610		data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
611		data |= 0xfff;
612		WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
613
614		orig = data = RREG32(mmUVD_CGC_CTRL);
615		data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
616		if (orig != data)
617			WREG32(mmUVD_CGC_CTRL, data);
618	} else {
619		data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
620		data &= ~0xfff;
621		WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
622
623		orig = data = RREG32(mmUVD_CGC_CTRL);
624		data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
625		if (orig != data)
626			WREG32(mmUVD_CGC_CTRL, data);
627	}
628}
629
630static void uvd_v4_2_set_dcm(struct amdgpu_device *adev,
631			     bool sw_mode)
632{
633	u32 tmp, tmp2;
634
635	WREG32_FIELD(UVD_CGC_GATE, REGS, 0);
636
637	tmp = RREG32(mmUVD_CGC_CTRL);
638	tmp &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK | UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
639	tmp |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
640		(1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT) |
641		(4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT);
642
643	if (sw_mode) {
644		tmp &= ~0x7ffff800;
645		tmp2 = UVD_CGC_CTRL2__DYN_OCLK_RAMP_EN_MASK |
646			UVD_CGC_CTRL2__DYN_RCLK_RAMP_EN_MASK |
647			(7 << UVD_CGC_CTRL2__GATER_DIV_ID__SHIFT);
648	} else {
649		tmp |= 0x7ffff800;
650		tmp2 = 0;
651	}
652
653	WREG32(mmUVD_CGC_CTRL, tmp);
654	WREG32_UVD_CTX(ixUVD_CGC_CTRL2, tmp2);
655}
656
657static bool uvd_v4_2_is_idle(void *handle)
658{
659	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
660
661	return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
662}
663
664static int uvd_v4_2_wait_for_idle(void *handle)
665{
666	unsigned i;
667	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
668
669	for (i = 0; i < adev->usec_timeout; i++) {
670		if (!(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK))
671			return 0;
672	}
673	return -ETIMEDOUT;
674}
675
676static int uvd_v4_2_soft_reset(void *handle)
677{
678	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
679
680	uvd_v4_2_stop(adev);
681
682	WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK,
683			~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK);
684	mdelay(5);
685
686	return uvd_v4_2_start(adev);
687}
688
689static int uvd_v4_2_set_interrupt_state(struct amdgpu_device *adev,
690					struct amdgpu_irq_src *source,
691					unsigned type,
692					enum amdgpu_interrupt_state state)
693{
694	// TODO
695	return 0;
696}
697
698static int uvd_v4_2_process_interrupt(struct amdgpu_device *adev,
699				      struct amdgpu_irq_src *source,
700				      struct amdgpu_iv_entry *entry)
701{
702	DRM_DEBUG("IH: UVD TRAP\n");
703	amdgpu_fence_process(&adev->uvd.inst->ring);
704	return 0;
705}
706
707static int uvd_v4_2_set_clockgating_state(void *handle,
708					  enum amd_clockgating_state state)
709{
710	return 0;
711}
712
713static int uvd_v4_2_set_powergating_state(void *handle,
714					  enum amd_powergating_state state)
715{
716	/* This doesn't actually powergate the UVD block.
717	 * That's done in the dpm code via the SMC.  This
718	 * just re-inits the block as necessary.  The actual
719	 * gating still happens in the dpm code.  We should
720	 * revisit this when there is a cleaner line between
721	 * the smc and the hw blocks
722	 */
723	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
724
725	if (state == AMD_PG_STATE_GATE) {
726		uvd_v4_2_stop(adev);
727		if (adev->pg_flags & AMD_PG_SUPPORT_UVD && !adev->pm.dpm_enabled) {
728			if (!(RREG32_SMC(ixCURRENT_PG_STATUS) &
729				CURRENT_PG_STATUS__UVD_PG_STATUS_MASK)) {
730				WREG32(mmUVD_PGFSM_CONFIG, (UVD_PGFSM_CONFIG__UVD_PGFSM_FSM_ADDR_MASK   |
731							UVD_PGFSM_CONFIG__UVD_PGFSM_POWER_DOWN_MASK |
732							UVD_PGFSM_CONFIG__UVD_PGFSM_P1_SELECT_MASK));
733				mdelay(20);
734			}
735		}
736		return 0;
737	} else {
738		if (adev->pg_flags & AMD_PG_SUPPORT_UVD && !adev->pm.dpm_enabled) {
739			if (RREG32_SMC(ixCURRENT_PG_STATUS) &
740				CURRENT_PG_STATUS__UVD_PG_STATUS_MASK) {
741				WREG32(mmUVD_PGFSM_CONFIG, (UVD_PGFSM_CONFIG__UVD_PGFSM_FSM_ADDR_MASK   |
742						UVD_PGFSM_CONFIG__UVD_PGFSM_POWER_UP_MASK |
743						UVD_PGFSM_CONFIG__UVD_PGFSM_P1_SELECT_MASK));
744				mdelay(30);
745			}
746		}
747		return uvd_v4_2_start(adev);
748	}
749}
750
751static const struct amd_ip_funcs uvd_v4_2_ip_funcs = {
752	.name = "uvd_v4_2",
753	.early_init = uvd_v4_2_early_init,
754	.late_init = NULL,
755	.sw_init = uvd_v4_2_sw_init,
756	.sw_fini = uvd_v4_2_sw_fini,
757	.hw_init = uvd_v4_2_hw_init,
758	.hw_fini = uvd_v4_2_hw_fini,
759	.suspend = uvd_v4_2_suspend,
760	.resume = uvd_v4_2_resume,
761	.is_idle = uvd_v4_2_is_idle,
762	.wait_for_idle = uvd_v4_2_wait_for_idle,
763	.soft_reset = uvd_v4_2_soft_reset,
764	.set_clockgating_state = uvd_v4_2_set_clockgating_state,
765	.set_powergating_state = uvd_v4_2_set_powergating_state,
766};
767
768static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = {
769	.type = AMDGPU_RING_TYPE_UVD,
770	.align_mask = 0xf,
771	.support_64bit_ptrs = false,
772	.no_user_fence = true,
773	.get_rptr = uvd_v4_2_ring_get_rptr,
774	.get_wptr = uvd_v4_2_ring_get_wptr,
775	.set_wptr = uvd_v4_2_ring_set_wptr,
776	.parse_cs = amdgpu_uvd_ring_parse_cs,
777	.emit_frame_size =
778		14, /* uvd_v4_2_ring_emit_fence  x1 no user fence */
779	.emit_ib_size = 4, /* uvd_v4_2_ring_emit_ib */
780	.emit_ib = uvd_v4_2_ring_emit_ib,
781	.emit_fence = uvd_v4_2_ring_emit_fence,
782	.test_ring = uvd_v4_2_ring_test_ring,
783	.test_ib = amdgpu_uvd_ring_test_ib,
784	.insert_nop = uvd_v4_2_ring_insert_nop,
785	.pad_ib = amdgpu_ring_generic_pad_ib,
786	.begin_use = amdgpu_uvd_ring_begin_use,
787	.end_use = amdgpu_uvd_ring_end_use,
788};
789
790static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev)
791{
792	adev->uvd.inst->ring.funcs = &uvd_v4_2_ring_funcs;
793}
794
795static const struct amdgpu_irq_src_funcs uvd_v4_2_irq_funcs = {
796	.set = uvd_v4_2_set_interrupt_state,
797	.process = uvd_v4_2_process_interrupt,
798};
799
800static void uvd_v4_2_set_irq_funcs(struct amdgpu_device *adev)
801{
802	adev->uvd.inst->irq.num_types = 1;
803	adev->uvd.inst->irq.funcs = &uvd_v4_2_irq_funcs;
804}
805
806const struct amdgpu_ip_block_version uvd_v4_2_ip_block =
807{
808		.type = AMD_IP_BLOCK_TYPE_UVD,
809		.major = 4,
810		.minor = 2,
811		.rev = 0,
812		.funcs = &uvd_v4_2_ip_funcs,
813};