Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.15.
  1/*
  2 * Copyright 2014 Advanced Micro Devices, Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 *
 22 * Authors: Christian König <christian.koenig@amd.com>
 23 */
 24
 25#include <linux/firmware.h>
 26#include <drm/drmP.h>
 27#include "amdgpu.h"
 28#include "amdgpu_uvd.h"
 29#include "vid.h"
 30#include "uvd/uvd_5_0_d.h"
 31#include "uvd/uvd_5_0_sh_mask.h"
 32#include "oss/oss_2_0_d.h"
 33#include "oss/oss_2_0_sh_mask.h"
 34#include "bif/bif_5_0_d.h"
 35#include "vi.h"
 36#include "smu/smu_7_1_2_d.h"
 37#include "smu/smu_7_1_2_sh_mask.h"
 38
 39static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev);
 40static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev);
 41static int uvd_v5_0_start(struct amdgpu_device *adev);
 42static void uvd_v5_0_stop(struct amdgpu_device *adev);
 43static int uvd_v5_0_set_clockgating_state(void *handle,
 44					  enum amd_clockgating_state state);
 45static void uvd_v5_0_enable_mgcg(struct amdgpu_device *adev,
 46				 bool enable);
 47/**
 48 * uvd_v5_0_ring_get_rptr - get read pointer
 49 *
 50 * @ring: amdgpu_ring pointer
 51 *
 52 * Returns the current hardware read pointer
 53 */
 54static uint32_t uvd_v5_0_ring_get_rptr(struct amdgpu_ring *ring)
 55{
 56	struct amdgpu_device *adev = ring->adev;
 57
 58	return RREG32(mmUVD_RBC_RB_RPTR);
 59}
 60
 61/**
 62 * uvd_v5_0_ring_get_wptr - get write pointer
 63 *
 64 * @ring: amdgpu_ring pointer
 65 *
 66 * Returns the current hardware write pointer
 67 */
 68static uint32_t uvd_v5_0_ring_get_wptr(struct amdgpu_ring *ring)
 69{
 70	struct amdgpu_device *adev = ring->adev;
 71
 72	return RREG32(mmUVD_RBC_RB_WPTR);
 73}
 74
 75/**
 76 * uvd_v5_0_ring_set_wptr - set write pointer
 77 *
 78 * @ring: amdgpu_ring pointer
 79 *
 80 * Commits the write pointer to the hardware
 81 */
 82static void uvd_v5_0_ring_set_wptr(struct amdgpu_ring *ring)
 83{
 84	struct amdgpu_device *adev = ring->adev;
 85
 86	WREG32(mmUVD_RBC_RB_WPTR, ring->wptr);
 87}
 88
 89static int uvd_v5_0_early_init(void *handle)
 90{
 91	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 92
 93	uvd_v5_0_set_ring_funcs(adev);
 94	uvd_v5_0_set_irq_funcs(adev);
 95
 96	return 0;
 97}
 98
 99static int uvd_v5_0_sw_init(void *handle)
100{
101	struct amdgpu_ring *ring;
102	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
103	int r;
104
105	/* UVD TRAP */
106	r = amdgpu_irq_add_id(adev, 124, &adev->uvd.irq);
107	if (r)
108		return r;
109
110	r = amdgpu_uvd_sw_init(adev);
111	if (r)
112		return r;
113
114	r = amdgpu_uvd_resume(adev);
115	if (r)
116		return r;
117
118	ring = &adev->uvd.ring;
119	sprintf(ring->name, "uvd");
120	r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0);
121
122	return r;
123}
124
125static int uvd_v5_0_sw_fini(void *handle)
126{
127	int r;
128	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
129
130	r = amdgpu_uvd_suspend(adev);
131	if (r)
132		return r;
133
134	r = amdgpu_uvd_sw_fini(adev);
135	if (r)
136		return r;
137
138	return r;
139}
140
141/**
142 * uvd_v5_0_hw_init - start and test UVD block
143 *
144 * @adev: amdgpu_device pointer
145 *
146 * Initialize the hardware, boot up the VCPU and do some testing
147 */
148static int uvd_v5_0_hw_init(void *handle)
149{
150	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
151	struct amdgpu_ring *ring = &adev->uvd.ring;
152	uint32_t tmp;
153	int r;
154
155	r = uvd_v5_0_start(adev);
156	if (r)
157		goto done;
158
159	ring->ready = true;
160	r = amdgpu_ring_test_ring(ring);
161	if (r) {
162		ring->ready = false;
163		goto done;
164	}
165
166	r = amdgpu_ring_alloc(ring, 10);
167	if (r) {
168		DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r);
169		goto done;
170	}
171
172	tmp = PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0);
173	amdgpu_ring_write(ring, tmp);
174	amdgpu_ring_write(ring, 0xFFFFF);
175
176	tmp = PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0);
177	amdgpu_ring_write(ring, tmp);
178	amdgpu_ring_write(ring, 0xFFFFF);
179
180	tmp = PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0);
181	amdgpu_ring_write(ring, tmp);
182	amdgpu_ring_write(ring, 0xFFFFF);
183
184	/* Clear timeout status bits */
185	amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0));
186	amdgpu_ring_write(ring, 0x8);
187
188	amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0));
189	amdgpu_ring_write(ring, 3);
190
191	amdgpu_ring_commit(ring);
192done:
193	if (!r)
194		DRM_INFO("UVD initialized successfully.\n");
195
196	return r;
197}
198
199/**
200 * uvd_v5_0_hw_fini - stop the hardware block
201 *
202 * @adev: amdgpu_device pointer
203 *
204 * Stop the UVD block, mark ring as not ready any more
205 */
206static int uvd_v5_0_hw_fini(void *handle)
207{
208	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
209	struct amdgpu_ring *ring = &adev->uvd.ring;
210
211	uvd_v5_0_stop(adev);
212	ring->ready = false;
213
214	return 0;
215}
216
217static int uvd_v5_0_suspend(void *handle)
218{
219	int r;
220	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
221
222	r = uvd_v5_0_hw_fini(adev);
223	if (r)
224		return r;
225	uvd_v5_0_set_clockgating_state(adev, AMD_CG_STATE_GATE);
226
227	r = amdgpu_uvd_suspend(adev);
228	if (r)
229		return r;
230
231	return r;
232}
233
234static int uvd_v5_0_resume(void *handle)
235{
236	int r;
237	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
238
239	r = amdgpu_uvd_resume(adev);
240	if (r)
241		return r;
242
243	r = uvd_v5_0_hw_init(adev);
244	if (r)
245		return r;
246
247	return r;
248}
249
250/**
251 * uvd_v5_0_mc_resume - memory controller programming
252 *
253 * @adev: amdgpu_device pointer
254 *
255 * Let the UVD memory controller know it's offsets
256 */
257static void uvd_v5_0_mc_resume(struct amdgpu_device *adev)
258{
259	uint64_t offset;
260	uint32_t size;
261
262	/* programm memory controller bits 0-27 */
263	WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
264			lower_32_bits(adev->uvd.gpu_addr));
265	WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
266			upper_32_bits(adev->uvd.gpu_addr));
267
268	offset = AMDGPU_UVD_FIRMWARE_OFFSET;
269	size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4);
270	WREG32(mmUVD_VCPU_CACHE_OFFSET0, offset >> 3);
271	WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
272
273	offset += size;
274	size = AMDGPU_UVD_HEAP_SIZE;
275	WREG32(mmUVD_VCPU_CACHE_OFFSET1, offset >> 3);
276	WREG32(mmUVD_VCPU_CACHE_SIZE1, size);
277
278	offset += size;
279	size = AMDGPU_UVD_STACK_SIZE +
280	       (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles);
281	WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3);
282	WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
283
284	WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
285	WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
286	WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
287}
288
289/**
290 * uvd_v5_0_start - start UVD block
291 *
292 * @adev: amdgpu_device pointer
293 *
294 * Setup and start the UVD block
295 */
296static int uvd_v5_0_start(struct amdgpu_device *adev)
297{
298	struct amdgpu_ring *ring = &adev->uvd.ring;
299	uint32_t rb_bufsz, tmp;
300	uint32_t lmi_swap_cntl;
301	uint32_t mp_swap_cntl;
302	int i, j, r;
303
304	/*disable DPG */
305	WREG32_P(mmUVD_POWER_STATUS, 0, ~(1 << 2));
306
307	/* disable byte swapping */
308	lmi_swap_cntl = 0;
309	mp_swap_cntl = 0;
310
311	uvd_v5_0_mc_resume(adev);
312
313	amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
314	uvd_v5_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
315	uvd_v5_0_enable_mgcg(adev, true);
316
317	/* disable interupt */
318	WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1));
319
320	/* stall UMC and register bus before resetting VCPU */
321	WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
322	mdelay(1);
323
324	/* put LMI, VCPU, RBC etc... into reset */
325	WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
326		UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK | UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
327		UVD_SOFT_RESET__RBC_SOFT_RESET_MASK | UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
328		UVD_SOFT_RESET__CXW_SOFT_RESET_MASK | UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
329		UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
330	mdelay(5);
331
332	/* take UVD block out of reset */
333	WREG32_P(mmSRBM_SOFT_RESET, 0, ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK);
334	mdelay(5);
335
336	/* initialize UVD memory controller */
337	WREG32(mmUVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) |
338			     (1 << 21) | (1 << 9) | (1 << 20));
339
340#ifdef __BIG_ENDIAN
341	/* swap (8 in 32) RB and IB */
342	lmi_swap_cntl = 0xa;
343	mp_swap_cntl = 0;
344#endif
345	WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
346	WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
347
348	WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040);
349	WREG32(mmUVD_MPC_SET_MUXA1, 0x0);
350	WREG32(mmUVD_MPC_SET_MUXB0, 0x40c2040);
351	WREG32(mmUVD_MPC_SET_MUXB1, 0x0);
352	WREG32(mmUVD_MPC_SET_ALU, 0);
353	WREG32(mmUVD_MPC_SET_MUX, 0x88);
354
355	/* take all subblocks out of reset, except VCPU */
356	WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
357	mdelay(5);
358
359	/* enable VCPU clock */
360	WREG32(mmUVD_VCPU_CNTL,  1 << 9);
361
362	/* enable UMC */
363	WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
364
365	/* boot up the VCPU */
366	WREG32(mmUVD_SOFT_RESET, 0);
367	mdelay(10);
368
369	for (i = 0; i < 10; ++i) {
370		uint32_t status;
371		for (j = 0; j < 100; ++j) {
372			status = RREG32(mmUVD_STATUS);
373			if (status & 2)
374				break;
375			mdelay(10);
376		}
377		r = 0;
378		if (status & 2)
379			break;
380
381		DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
382		WREG32_P(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
383				~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
384		mdelay(10);
385		WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
386		mdelay(10);
387		r = -1;
388	}
389
390	if (r) {
391		DRM_ERROR("UVD not responding, giving up!!!\n");
392		return r;
393	}
394	/* enable master interrupt */
395	WREG32_P(mmUVD_MASTINT_EN, 3 << 1, ~(3 << 1));
396
397	/* clear the bit 4 of UVD_STATUS */
398	WREG32_P(mmUVD_STATUS, 0, ~(2 << 1));
399
400	rb_bufsz = order_base_2(ring->ring_size);
401	tmp = 0;
402	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
403	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
404	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
405	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
406	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
407	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
408	/* force RBC into idle state */
409	WREG32(mmUVD_RBC_RB_CNTL, tmp);
410
411	/* set the write pointer delay */
412	WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0);
413
414	/* set the wb address */
415	WREG32(mmUVD_RBC_RB_RPTR_ADDR, (upper_32_bits(ring->gpu_addr) >> 2));
416
417	/* programm the RB_BASE for ring buffer */
418	WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
419			lower_32_bits(ring->gpu_addr));
420	WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
421			upper_32_bits(ring->gpu_addr));
422
423	/* Initialize the ring buffer's read and write pointers */
424	WREG32(mmUVD_RBC_RB_RPTR, 0);
425
426	ring->wptr = RREG32(mmUVD_RBC_RB_RPTR);
427	WREG32(mmUVD_RBC_RB_WPTR, ring->wptr);
428
429	WREG32_P(mmUVD_RBC_RB_CNTL, 0, ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
430
431	return 0;
432}
433
434/**
435 * uvd_v5_0_stop - stop UVD block
436 *
437 * @adev: amdgpu_device pointer
438 *
439 * stop the UVD block
440 */
441static void uvd_v5_0_stop(struct amdgpu_device *adev)
442{
443	/* force RBC into idle state */
444	WREG32(mmUVD_RBC_RB_CNTL, 0x11010101);
445
446	/* Stall UMC and register bus before resetting VCPU */
447	WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
448	mdelay(1);
449
450	/* put VCPU into reset */
451	WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
452	mdelay(5);
453
454	/* disable VCPU clock */
455	WREG32(mmUVD_VCPU_CNTL, 0x0);
456
457	/* Unstall UMC and register bus */
458	WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
459}
460
461/**
462 * uvd_v5_0_ring_emit_fence - emit an fence & trap command
463 *
464 * @ring: amdgpu_ring pointer
465 * @fence: fence to emit
466 *
467 * Write a fence and a trap command to the ring.
468 */
469static void uvd_v5_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
470				     unsigned flags)
471{
472	WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
473
474	amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
475	amdgpu_ring_write(ring, seq);
476	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
477	amdgpu_ring_write(ring, addr & 0xffffffff);
478	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
479	amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
480	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
481	amdgpu_ring_write(ring, 0);
482
483	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
484	amdgpu_ring_write(ring, 0);
485	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
486	amdgpu_ring_write(ring, 0);
487	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
488	amdgpu_ring_write(ring, 2);
489}
490
491/**
492 * uvd_v5_0_ring_emit_hdp_flush - emit an hdp flush
493 *
494 * @ring: amdgpu_ring pointer
495 *
496 * Emits an hdp flush.
497 */
498static void uvd_v5_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
499{
500	amdgpu_ring_write(ring, PACKET0(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0));
501	amdgpu_ring_write(ring, 0);
502}
503
504/**
505 * uvd_v5_0_ring_hdp_invalidate - emit an hdp invalidate
506 *
507 * @ring: amdgpu_ring pointer
508 *
509 * Emits an hdp invalidate.
510 */
511static void uvd_v5_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
512{
513	amdgpu_ring_write(ring, PACKET0(mmHDP_DEBUG0, 0));
514	amdgpu_ring_write(ring, 1);
515}
516
517/**
518 * uvd_v5_0_ring_test_ring - register write test
519 *
520 * @ring: amdgpu_ring pointer
521 *
522 * Test if we can successfully write to the context register
523 */
524static int uvd_v5_0_ring_test_ring(struct amdgpu_ring *ring)
525{
526	struct amdgpu_device *adev = ring->adev;
527	uint32_t tmp = 0;
528	unsigned i;
529	int r;
530
531	WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD);
532	r = amdgpu_ring_alloc(ring, 3);
533	if (r) {
534		DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
535			  ring->idx, r);
536		return r;
537	}
538	amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
539	amdgpu_ring_write(ring, 0xDEADBEEF);
540	amdgpu_ring_commit(ring);
541	for (i = 0; i < adev->usec_timeout; i++) {
542		tmp = RREG32(mmUVD_CONTEXT_ID);
543		if (tmp == 0xDEADBEEF)
544			break;
545		DRM_UDELAY(1);
546	}
547
548	if (i < adev->usec_timeout) {
549		DRM_INFO("ring test on %d succeeded in %d usecs\n",
550			 ring->idx, i);
551	} else {
552		DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
553			  ring->idx, tmp);
554		r = -EINVAL;
555	}
556	return r;
557}
558
559/**
560 * uvd_v5_0_ring_emit_ib - execute indirect buffer
561 *
562 * @ring: amdgpu_ring pointer
563 * @ib: indirect buffer to execute
564 *
565 * Write ring commands to execute the indirect buffer
566 */
567static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
568				  struct amdgpu_ib *ib,
569				  unsigned vm_id, bool ctx_switch)
570{
571	amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0));
572	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
573	amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH, 0));
574	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
575	amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0));
576	amdgpu_ring_write(ring, ib->length_dw);
577}
578
579static bool uvd_v5_0_is_idle(void *handle)
580{
581	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
582
583	return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
584}
585
586static int uvd_v5_0_wait_for_idle(void *handle)
587{
588	unsigned i;
589	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
590
591	for (i = 0; i < adev->usec_timeout; i++) {
592		if (!(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK))
593			return 0;
594	}
595	return -ETIMEDOUT;
596}
597
598static int uvd_v5_0_soft_reset(void *handle)
599{
600	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
601
602	uvd_v5_0_stop(adev);
603
604	WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK,
605			~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK);
606	mdelay(5);
607
608	return uvd_v5_0_start(adev);
609}
610
611static int uvd_v5_0_set_interrupt_state(struct amdgpu_device *adev,
612					struct amdgpu_irq_src *source,
613					unsigned type,
614					enum amdgpu_interrupt_state state)
615{
616	// TODO
617	return 0;
618}
619
620static int uvd_v5_0_process_interrupt(struct amdgpu_device *adev,
621				      struct amdgpu_irq_src *source,
622				      struct amdgpu_iv_entry *entry)
623{
624	DRM_DEBUG("IH: UVD TRAP\n");
625	amdgpu_fence_process(&adev->uvd.ring);
626	return 0;
627}
628
629static void uvd_v5_0_enable_clock_gating(struct amdgpu_device *adev, bool enable)
630{
631	uint32_t data1, data3, suvd_flags;
632
633	data1 = RREG32(mmUVD_SUVD_CGC_GATE);
634	data3 = RREG32(mmUVD_CGC_GATE);
635
636	suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
637		     UVD_SUVD_CGC_GATE__SIT_MASK |
638		     UVD_SUVD_CGC_GATE__SMP_MASK |
639		     UVD_SUVD_CGC_GATE__SCM_MASK |
640		     UVD_SUVD_CGC_GATE__SDB_MASK;
641
642	if (enable) {
643		data3 |= (UVD_CGC_GATE__SYS_MASK     |
644			UVD_CGC_GATE__UDEC_MASK      |
645			UVD_CGC_GATE__MPEG2_MASK     |
646			UVD_CGC_GATE__RBC_MASK       |
647			UVD_CGC_GATE__LMI_MC_MASK    |
648			UVD_CGC_GATE__IDCT_MASK      |
649			UVD_CGC_GATE__MPRD_MASK      |
650			UVD_CGC_GATE__MPC_MASK       |
651			UVD_CGC_GATE__LBSI_MASK      |
652			UVD_CGC_GATE__LRBBM_MASK     |
653			UVD_CGC_GATE__UDEC_RE_MASK   |
654			UVD_CGC_GATE__UDEC_CM_MASK   |
655			UVD_CGC_GATE__UDEC_IT_MASK   |
656			UVD_CGC_GATE__UDEC_DB_MASK   |
657			UVD_CGC_GATE__UDEC_MP_MASK   |
658			UVD_CGC_GATE__WCB_MASK       |
659			UVD_CGC_GATE__JPEG_MASK      |
660			UVD_CGC_GATE__SCPU_MASK);
661		/* only in pg enabled, we can gate clock to vcpu*/
662		if (adev->pg_flags & AMD_PG_SUPPORT_UVD)
663			data3 |= UVD_CGC_GATE__VCPU_MASK;
664		data3 &= ~UVD_CGC_GATE__REGS_MASK;
665		data1 |= suvd_flags;
666	} else {
667		data3 = 0;
668		data1 = 0;
669	}
670
671	WREG32(mmUVD_SUVD_CGC_GATE, data1);
672	WREG32(mmUVD_CGC_GATE, data3);
673}
674
675static void uvd_v5_0_set_sw_clock_gating(struct amdgpu_device *adev)
676{
677	uint32_t data, data2;
678
679	data = RREG32(mmUVD_CGC_CTRL);
680	data2 = RREG32(mmUVD_SUVD_CGC_CTRL);
681
682
683	data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK |
684		  UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
685
686
687	data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
688		(1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) |
689		(4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY));
690
691	data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
692			UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
693			UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
694			UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
695			UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
696			UVD_CGC_CTRL__SYS_MODE_MASK |
697			UVD_CGC_CTRL__UDEC_MODE_MASK |
698			UVD_CGC_CTRL__MPEG2_MODE_MASK |
699			UVD_CGC_CTRL__REGS_MODE_MASK |
700			UVD_CGC_CTRL__RBC_MODE_MASK |
701			UVD_CGC_CTRL__LMI_MC_MODE_MASK |
702			UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
703			UVD_CGC_CTRL__IDCT_MODE_MASK |
704			UVD_CGC_CTRL__MPRD_MODE_MASK |
705			UVD_CGC_CTRL__MPC_MODE_MASK |
706			UVD_CGC_CTRL__LBSI_MODE_MASK |
707			UVD_CGC_CTRL__LRBBM_MODE_MASK |
708			UVD_CGC_CTRL__WCB_MODE_MASK |
709			UVD_CGC_CTRL__VCPU_MODE_MASK |
710			UVD_CGC_CTRL__JPEG_MODE_MASK |
711			UVD_CGC_CTRL__SCPU_MODE_MASK);
712	data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK |
713			UVD_SUVD_CGC_CTRL__SIT_MODE_MASK |
714			UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
715			UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
716			UVD_SUVD_CGC_CTRL__SDB_MODE_MASK);
717
718	WREG32(mmUVD_CGC_CTRL, data);
719	WREG32(mmUVD_SUVD_CGC_CTRL, data2);
720}
721
722#if 0
723static void uvd_v5_0_set_hw_clock_gating(struct amdgpu_device *adev)
724{
725	uint32_t data, data1, cgc_flags, suvd_flags;
726
727	data = RREG32(mmUVD_CGC_GATE);
728	data1 = RREG32(mmUVD_SUVD_CGC_GATE);
729
730	cgc_flags = UVD_CGC_GATE__SYS_MASK |
731				UVD_CGC_GATE__UDEC_MASK |
732				UVD_CGC_GATE__MPEG2_MASK |
733				UVD_CGC_GATE__RBC_MASK |
734				UVD_CGC_GATE__LMI_MC_MASK |
735				UVD_CGC_GATE__IDCT_MASK |
736				UVD_CGC_GATE__MPRD_MASK |
737				UVD_CGC_GATE__MPC_MASK |
738				UVD_CGC_GATE__LBSI_MASK |
739				UVD_CGC_GATE__LRBBM_MASK |
740				UVD_CGC_GATE__UDEC_RE_MASK |
741				UVD_CGC_GATE__UDEC_CM_MASK |
742				UVD_CGC_GATE__UDEC_IT_MASK |
743				UVD_CGC_GATE__UDEC_DB_MASK |
744				UVD_CGC_GATE__UDEC_MP_MASK |
745				UVD_CGC_GATE__WCB_MASK |
746				UVD_CGC_GATE__VCPU_MASK |
747				UVD_CGC_GATE__SCPU_MASK;
748
749	suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
750				UVD_SUVD_CGC_GATE__SIT_MASK |
751				UVD_SUVD_CGC_GATE__SMP_MASK |
752				UVD_SUVD_CGC_GATE__SCM_MASK |
753				UVD_SUVD_CGC_GATE__SDB_MASK;
754
755	data |= cgc_flags;
756	data1 |= suvd_flags;
757
758	WREG32(mmUVD_CGC_GATE, data);
759	WREG32(mmUVD_SUVD_CGC_GATE, data1);
760}
761#endif
762
763static void uvd_v5_0_enable_mgcg(struct amdgpu_device *adev,
764				 bool enable)
765{
766	u32 orig, data;
767
768	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) {
769		data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
770		data |= 0xfff;
771		WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
772
773		orig = data = RREG32(mmUVD_CGC_CTRL);
774		data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
775		if (orig != data)
776			WREG32(mmUVD_CGC_CTRL, data);
777	} else {
778		data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
779		data &= ~0xfff;
780		WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
781
782		orig = data = RREG32(mmUVD_CGC_CTRL);
783		data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
784		if (orig != data)
785			WREG32(mmUVD_CGC_CTRL, data);
786	}
787}
788
789static int uvd_v5_0_set_clockgating_state(void *handle,
790					  enum amd_clockgating_state state)
791{
792	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
793	bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
794
795	if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
796		return 0;
797
798	if (enable) {
799		/* wait for STATUS to clear */
800		if (uvd_v5_0_wait_for_idle(handle))
801			return -EBUSY;
802		uvd_v5_0_enable_clock_gating(adev, true);
803
804		/* enable HW gates because UVD is idle */
805/*		uvd_v5_0_set_hw_clock_gating(adev); */
806	} else {
807		uvd_v5_0_enable_clock_gating(adev, false);
808	}
809
810	uvd_v5_0_set_sw_clock_gating(adev);
811	return 0;
812}
813
814static int uvd_v5_0_set_powergating_state(void *handle,
815					  enum amd_powergating_state state)
816{
817	/* This doesn't actually powergate the UVD block.
818	 * That's done in the dpm code via the SMC.  This
819	 * just re-inits the block as necessary.  The actual
820	 * gating still happens in the dpm code.  We should
821	 * revisit this when there is a cleaner line between
822	 * the smc and the hw blocks
823	 */
824	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
825
826	if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
827		return 0;
828
829	if (state == AMD_PG_STATE_GATE) {
830		uvd_v5_0_stop(adev);
831		return 0;
832	} else {
833		return uvd_v5_0_start(adev);
834	}
835}
836
837static const struct amd_ip_funcs uvd_v5_0_ip_funcs = {
838	.name = "uvd_v5_0",
839	.early_init = uvd_v5_0_early_init,
840	.late_init = NULL,
841	.sw_init = uvd_v5_0_sw_init,
842	.sw_fini = uvd_v5_0_sw_fini,
843	.hw_init = uvd_v5_0_hw_init,
844	.hw_fini = uvd_v5_0_hw_fini,
845	.suspend = uvd_v5_0_suspend,
846	.resume = uvd_v5_0_resume,
847	.is_idle = uvd_v5_0_is_idle,
848	.wait_for_idle = uvd_v5_0_wait_for_idle,
849	.soft_reset = uvd_v5_0_soft_reset,
850	.set_clockgating_state = uvd_v5_0_set_clockgating_state,
851	.set_powergating_state = uvd_v5_0_set_powergating_state,
852};
853
854static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = {
855	.type = AMDGPU_RING_TYPE_UVD,
856	.align_mask = 0xf,
857	.nop = PACKET0(mmUVD_NO_OP, 0),
858	.get_rptr = uvd_v5_0_ring_get_rptr,
859	.get_wptr = uvd_v5_0_ring_get_wptr,
860	.set_wptr = uvd_v5_0_ring_set_wptr,
861	.parse_cs = amdgpu_uvd_ring_parse_cs,
862	.emit_frame_size =
863		2 + /* uvd_v5_0_ring_emit_hdp_flush */
864		2 + /* uvd_v5_0_ring_emit_hdp_invalidate */
865		14, /* uvd_v5_0_ring_emit_fence  x1 no user fence */
866	.emit_ib_size = 6, /* uvd_v5_0_ring_emit_ib */
867	.emit_ib = uvd_v5_0_ring_emit_ib,
868	.emit_fence = uvd_v5_0_ring_emit_fence,
869	.emit_hdp_flush = uvd_v5_0_ring_emit_hdp_flush,
870	.emit_hdp_invalidate = uvd_v5_0_ring_emit_hdp_invalidate,
871	.test_ring = uvd_v5_0_ring_test_ring,
872	.test_ib = amdgpu_uvd_ring_test_ib,
873	.insert_nop = amdgpu_ring_insert_nop,
874	.pad_ib = amdgpu_ring_generic_pad_ib,
875	.begin_use = amdgpu_uvd_ring_begin_use,
876	.end_use = amdgpu_uvd_ring_end_use,
877};
878
879static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev)
880{
881	adev->uvd.ring.funcs = &uvd_v5_0_ring_funcs;
882}
883
884static const struct amdgpu_irq_src_funcs uvd_v5_0_irq_funcs = {
885	.set = uvd_v5_0_set_interrupt_state,
886	.process = uvd_v5_0_process_interrupt,
887};
888
889static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev)
890{
891	adev->uvd.irq.num_types = 1;
892	adev->uvd.irq.funcs = &uvd_v5_0_irq_funcs;
893}
894
895const struct amdgpu_ip_block_version uvd_v5_0_ip_block =
896{
897		.type = AMD_IP_BLOCK_TYPE_UVD,
898		.major = 5,
899		.minor = 0,
900		.rev = 0,
901		.funcs = &uvd_v5_0_ip_funcs,
902};