Loading...
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Christian König <christian.koenig@amd.com>
23 */
24
25#include <linux/delay.h>
26#include <linux/firmware.h>
27
28#include "amdgpu.h"
29#include "amdgpu_uvd.h"
30#include "vid.h"
31#include "uvd/uvd_5_0_d.h"
32#include "uvd/uvd_5_0_sh_mask.h"
33#include "oss/oss_2_0_d.h"
34#include "oss/oss_2_0_sh_mask.h"
35#include "bif/bif_5_0_d.h"
36#include "vi.h"
37#include "smu/smu_7_1_2_d.h"
38#include "smu/smu_7_1_2_sh_mask.h"
39#include "ivsrcid/ivsrcid_vislands30.h"
40
41static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev);
42static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev);
43static int uvd_v5_0_start(struct amdgpu_device *adev);
44static void uvd_v5_0_stop(struct amdgpu_device *adev);
45static int uvd_v5_0_set_clockgating_state(void *handle,
46 enum amd_clockgating_state state);
47static void uvd_v5_0_enable_mgcg(struct amdgpu_device *adev,
48 bool enable);
49/**
50 * uvd_v5_0_ring_get_rptr - get read pointer
51 *
52 * @ring: amdgpu_ring pointer
53 *
54 * Returns the current hardware read pointer
55 */
56static uint64_t uvd_v5_0_ring_get_rptr(struct amdgpu_ring *ring)
57{
58 struct amdgpu_device *adev = ring->adev;
59
60 return RREG32(mmUVD_RBC_RB_RPTR);
61}
62
63/**
64 * uvd_v5_0_ring_get_wptr - get write pointer
65 *
66 * @ring: amdgpu_ring pointer
67 *
68 * Returns the current hardware write pointer
69 */
70static uint64_t uvd_v5_0_ring_get_wptr(struct amdgpu_ring *ring)
71{
72 struct amdgpu_device *adev = ring->adev;
73
74 return RREG32(mmUVD_RBC_RB_WPTR);
75}
76
77/**
78 * uvd_v5_0_ring_set_wptr - set write pointer
79 *
80 * @ring: amdgpu_ring pointer
81 *
82 * Commits the write pointer to the hardware
83 */
84static void uvd_v5_0_ring_set_wptr(struct amdgpu_ring *ring)
85{
86 struct amdgpu_device *adev = ring->adev;
87
88 WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
89}
90
91static int uvd_v5_0_early_init(void *handle)
92{
93 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
94 adev->uvd.num_uvd_inst = 1;
95
96 uvd_v5_0_set_ring_funcs(adev);
97 uvd_v5_0_set_irq_funcs(adev);
98
99 return 0;
100}
101
102static int uvd_v5_0_sw_init(void *handle)
103{
104 struct amdgpu_ring *ring;
105 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
106 int r;
107
108 /* UVD TRAP */
109 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_UVD_SYSTEM_MESSAGE, &adev->uvd.inst->irq);
110 if (r)
111 return r;
112
113 r = amdgpu_uvd_sw_init(adev);
114 if (r)
115 return r;
116
117 ring = &adev->uvd.inst->ring;
118 sprintf(ring->name, "uvd");
119 r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0,
120 AMDGPU_RING_PRIO_DEFAULT, NULL);
121 if (r)
122 return r;
123
124 r = amdgpu_uvd_resume(adev);
125 if (r)
126 return r;
127
128 return r;
129}
130
131static int uvd_v5_0_sw_fini(void *handle)
132{
133 int r;
134 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
135
136 r = amdgpu_uvd_suspend(adev);
137 if (r)
138 return r;
139
140 return amdgpu_uvd_sw_fini(adev);
141}
142
143/**
144 * uvd_v5_0_hw_init - start and test UVD block
145 *
146 * @handle: handle used to pass amdgpu_device pointer
147 *
148 * Initialize the hardware, boot up the VCPU and do some testing
149 */
150static int uvd_v5_0_hw_init(void *handle)
151{
152 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
153 struct amdgpu_ring *ring = &adev->uvd.inst->ring;
154 uint32_t tmp;
155 int r;
156
157 amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
158 uvd_v5_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
159 uvd_v5_0_enable_mgcg(adev, true);
160
161 r = amdgpu_ring_test_helper(ring);
162 if (r)
163 goto done;
164
165 r = amdgpu_ring_alloc(ring, 10);
166 if (r) {
167 DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r);
168 goto done;
169 }
170
171 tmp = PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0);
172 amdgpu_ring_write(ring, tmp);
173 amdgpu_ring_write(ring, 0xFFFFF);
174
175 tmp = PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0);
176 amdgpu_ring_write(ring, tmp);
177 amdgpu_ring_write(ring, 0xFFFFF);
178
179 tmp = PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0);
180 amdgpu_ring_write(ring, tmp);
181 amdgpu_ring_write(ring, 0xFFFFF);
182
183 /* Clear timeout status bits */
184 amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0));
185 amdgpu_ring_write(ring, 0x8);
186
187 amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0));
188 amdgpu_ring_write(ring, 3);
189
190 amdgpu_ring_commit(ring);
191
192done:
193 if (!r)
194 DRM_INFO("UVD initialized successfully.\n");
195
196 return r;
197
198}
199
200/**
201 * uvd_v5_0_hw_fini - stop the hardware block
202 *
203 * @handle: handle used to pass amdgpu_device pointer
204 *
205 * Stop the UVD block, mark ring as not ready any more
206 */
207static int uvd_v5_0_hw_fini(void *handle)
208{
209 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
210
211 cancel_delayed_work_sync(&adev->uvd.idle_work);
212
213 if (RREG32(mmUVD_STATUS) != 0)
214 uvd_v5_0_stop(adev);
215
216 return 0;
217}
218
219static int uvd_v5_0_prepare_suspend(void *handle)
220{
221 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
222
223 return amdgpu_uvd_prepare_suspend(adev);
224}
225
226static int uvd_v5_0_suspend(void *handle)
227{
228 int r;
229 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
230
231 /*
232 * Proper cleanups before halting the HW engine:
233 * - cancel the delayed idle work
234 * - enable powergating
235 * - enable clockgating
236 * - disable dpm
237 *
238 * TODO: to align with the VCN implementation, move the
239 * jobs for clockgating/powergating/dpm setting to
240 * ->set_powergating_state().
241 */
242 cancel_delayed_work_sync(&adev->uvd.idle_work);
243
244 if (adev->pm.dpm_enabled) {
245 amdgpu_dpm_enable_uvd(adev, false);
246 } else {
247 amdgpu_asic_set_uvd_clocks(adev, 0, 0);
248 /* shutdown the UVD block */
249 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
250 AMD_PG_STATE_GATE);
251 amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
252 AMD_CG_STATE_GATE);
253 }
254
255 r = uvd_v5_0_hw_fini(adev);
256 if (r)
257 return r;
258
259 return amdgpu_uvd_suspend(adev);
260}
261
262static int uvd_v5_0_resume(void *handle)
263{
264 int r;
265 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
266
267 r = amdgpu_uvd_resume(adev);
268 if (r)
269 return r;
270
271 return uvd_v5_0_hw_init(adev);
272}
273
274/**
275 * uvd_v5_0_mc_resume - memory controller programming
276 *
277 * @adev: amdgpu_device pointer
278 *
279 * Let the UVD memory controller know it's offsets
280 */
281static void uvd_v5_0_mc_resume(struct amdgpu_device *adev)
282{
283 uint64_t offset;
284 uint32_t size;
285
286 /* program memory controller bits 0-27 */
287 WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
288 lower_32_bits(adev->uvd.inst->gpu_addr));
289 WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
290 upper_32_bits(adev->uvd.inst->gpu_addr));
291
292 offset = AMDGPU_UVD_FIRMWARE_OFFSET;
293 size = AMDGPU_UVD_FIRMWARE_SIZE(adev);
294 WREG32(mmUVD_VCPU_CACHE_OFFSET0, offset >> 3);
295 WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
296
297 offset += size;
298 size = AMDGPU_UVD_HEAP_SIZE;
299 WREG32(mmUVD_VCPU_CACHE_OFFSET1, offset >> 3);
300 WREG32(mmUVD_VCPU_CACHE_SIZE1, size);
301
302 offset += size;
303 size = AMDGPU_UVD_STACK_SIZE +
304 (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles);
305 WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3);
306 WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
307
308 WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
309 WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
310 WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
311}
312
313/**
314 * uvd_v5_0_start - start UVD block
315 *
316 * @adev: amdgpu_device pointer
317 *
318 * Setup and start the UVD block
319 */
320static int uvd_v5_0_start(struct amdgpu_device *adev)
321{
322 struct amdgpu_ring *ring = &adev->uvd.inst->ring;
323 uint32_t rb_bufsz, tmp;
324 uint32_t lmi_swap_cntl;
325 uint32_t mp_swap_cntl;
326 int i, j, r;
327
328 /*disable DPG */
329 WREG32_P(mmUVD_POWER_STATUS, 0, ~(1 << 2));
330
331 /* disable byte swapping */
332 lmi_swap_cntl = 0;
333 mp_swap_cntl = 0;
334
335 uvd_v5_0_mc_resume(adev);
336
337 /* disable interupt */
338 WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1));
339
340 /* stall UMC and register bus before resetting VCPU */
341 WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
342 mdelay(1);
343
344 /* put LMI, VCPU, RBC etc... into reset */
345 WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
346 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK | UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
347 UVD_SOFT_RESET__RBC_SOFT_RESET_MASK | UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
348 UVD_SOFT_RESET__CXW_SOFT_RESET_MASK | UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
349 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
350 mdelay(5);
351
352 /* take UVD block out of reset */
353 WREG32_P(mmSRBM_SOFT_RESET, 0, ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK);
354 mdelay(5);
355
356 /* initialize UVD memory controller */
357 WREG32(mmUVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) |
358 (1 << 21) | (1 << 9) | (1 << 20));
359
360#ifdef __BIG_ENDIAN
361 /* swap (8 in 32) RB and IB */
362 lmi_swap_cntl = 0xa;
363 mp_swap_cntl = 0;
364#endif
365 WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
366 WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
367
368 WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040);
369 WREG32(mmUVD_MPC_SET_MUXA1, 0x0);
370 WREG32(mmUVD_MPC_SET_MUXB0, 0x40c2040);
371 WREG32(mmUVD_MPC_SET_MUXB1, 0x0);
372 WREG32(mmUVD_MPC_SET_ALU, 0);
373 WREG32(mmUVD_MPC_SET_MUX, 0x88);
374
375 /* take all subblocks out of reset, except VCPU */
376 WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
377 mdelay(5);
378
379 /* enable VCPU clock */
380 WREG32(mmUVD_VCPU_CNTL, 1 << 9);
381
382 /* enable UMC */
383 WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
384
385 /* boot up the VCPU */
386 WREG32(mmUVD_SOFT_RESET, 0);
387 mdelay(10);
388
389 for (i = 0; i < 10; ++i) {
390 uint32_t status;
391 for (j = 0; j < 100; ++j) {
392 status = RREG32(mmUVD_STATUS);
393 if (status & 2)
394 break;
395 mdelay(10);
396 }
397 r = 0;
398 if (status & 2)
399 break;
400
401 DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
402 WREG32_P(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
403 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
404 mdelay(10);
405 WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
406 mdelay(10);
407 r = -1;
408 }
409
410 if (r) {
411 DRM_ERROR("UVD not responding, giving up!!!\n");
412 return r;
413 }
414 /* enable master interrupt */
415 WREG32_P(mmUVD_MASTINT_EN, 3 << 1, ~(3 << 1));
416
417 /* clear the bit 4 of UVD_STATUS */
418 WREG32_P(mmUVD_STATUS, 0, ~(2 << 1));
419
420 rb_bufsz = order_base_2(ring->ring_size);
421 tmp = 0;
422 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
423 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
424 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
425 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
426 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
427 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
428 /* force RBC into idle state */
429 WREG32(mmUVD_RBC_RB_CNTL, tmp);
430
431 /* set the write pointer delay */
432 WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0);
433
434 /* set the wb address */
435 WREG32(mmUVD_RBC_RB_RPTR_ADDR, (upper_32_bits(ring->gpu_addr) >> 2));
436
437 /* program the RB_BASE for ring buffer */
438 WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
439 lower_32_bits(ring->gpu_addr));
440 WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
441 upper_32_bits(ring->gpu_addr));
442
443 /* Initialize the ring buffer's read and write pointers */
444 WREG32(mmUVD_RBC_RB_RPTR, 0);
445
446 ring->wptr = RREG32(mmUVD_RBC_RB_RPTR);
447 WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
448
449 WREG32_P(mmUVD_RBC_RB_CNTL, 0, ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
450
451 return 0;
452}
453
454/**
455 * uvd_v5_0_stop - stop UVD block
456 *
457 * @adev: amdgpu_device pointer
458 *
459 * stop the UVD block
460 */
461static void uvd_v5_0_stop(struct amdgpu_device *adev)
462{
463 /* force RBC into idle state */
464 WREG32(mmUVD_RBC_RB_CNTL, 0x11010101);
465
466 /* Stall UMC and register bus before resetting VCPU */
467 WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
468 mdelay(1);
469
470 /* put VCPU into reset */
471 WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
472 mdelay(5);
473
474 /* disable VCPU clock */
475 WREG32(mmUVD_VCPU_CNTL, 0x0);
476
477 /* Unstall UMC and register bus */
478 WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
479
480 WREG32(mmUVD_STATUS, 0);
481}
482
483/**
484 * uvd_v5_0_ring_emit_fence - emit an fence & trap command
485 *
486 * @ring: amdgpu_ring pointer
487 * @addr: address
488 * @seq: sequence number
489 * @flags: fence related flags
490 *
491 * Write a fence and a trap command to the ring.
492 */
493static void uvd_v5_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
494 unsigned flags)
495{
496 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
497
498 amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
499 amdgpu_ring_write(ring, seq);
500 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
501 amdgpu_ring_write(ring, addr & 0xffffffff);
502 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
503 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
504 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
505 amdgpu_ring_write(ring, 0);
506
507 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
508 amdgpu_ring_write(ring, 0);
509 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
510 amdgpu_ring_write(ring, 0);
511 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
512 amdgpu_ring_write(ring, 2);
513}
514
515/**
516 * uvd_v5_0_ring_test_ring - register write test
517 *
518 * @ring: amdgpu_ring pointer
519 *
520 * Test if we can successfully write to the context register
521 */
522static int uvd_v5_0_ring_test_ring(struct amdgpu_ring *ring)
523{
524 struct amdgpu_device *adev = ring->adev;
525 uint32_t tmp = 0;
526 unsigned i;
527 int r;
528
529 WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD);
530 r = amdgpu_ring_alloc(ring, 3);
531 if (r)
532 return r;
533 amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
534 amdgpu_ring_write(ring, 0xDEADBEEF);
535 amdgpu_ring_commit(ring);
536 for (i = 0; i < adev->usec_timeout; i++) {
537 tmp = RREG32(mmUVD_CONTEXT_ID);
538 if (tmp == 0xDEADBEEF)
539 break;
540 udelay(1);
541 }
542
543 if (i >= adev->usec_timeout)
544 r = -ETIMEDOUT;
545
546 return r;
547}
548
549/**
550 * uvd_v5_0_ring_emit_ib - execute indirect buffer
551 *
552 * @ring: amdgpu_ring pointer
553 * @job: job to retrieve vmid from
554 * @ib: indirect buffer to execute
555 * @flags: unused
556 *
557 * Write ring commands to execute the indirect buffer
558 */
559static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
560 struct amdgpu_job *job,
561 struct amdgpu_ib *ib,
562 uint32_t flags)
563{
564 amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0));
565 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
566 amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH, 0));
567 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
568 amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0));
569 amdgpu_ring_write(ring, ib->length_dw);
570}
571
572static void uvd_v5_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
573{
574 int i;
575
576 WARN_ON(ring->wptr % 2 || count % 2);
577
578 for (i = 0; i < count / 2; i++) {
579 amdgpu_ring_write(ring, PACKET0(mmUVD_NO_OP, 0));
580 amdgpu_ring_write(ring, 0);
581 }
582}
583
584static bool uvd_v5_0_is_idle(void *handle)
585{
586 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
587
588 return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
589}
590
591static int uvd_v5_0_wait_for_idle(void *handle)
592{
593 unsigned i;
594 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
595
596 for (i = 0; i < adev->usec_timeout; i++) {
597 if (!(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK))
598 return 0;
599 }
600 return -ETIMEDOUT;
601}
602
603static int uvd_v5_0_soft_reset(void *handle)
604{
605 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
606
607 uvd_v5_0_stop(adev);
608
609 WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK,
610 ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK);
611 mdelay(5);
612
613 return uvd_v5_0_start(adev);
614}
615
616static int uvd_v5_0_set_interrupt_state(struct amdgpu_device *adev,
617 struct amdgpu_irq_src *source,
618 unsigned type,
619 enum amdgpu_interrupt_state state)
620{
621 // TODO
622 return 0;
623}
624
625static int uvd_v5_0_process_interrupt(struct amdgpu_device *adev,
626 struct amdgpu_irq_src *source,
627 struct amdgpu_iv_entry *entry)
628{
629 DRM_DEBUG("IH: UVD TRAP\n");
630 amdgpu_fence_process(&adev->uvd.inst->ring);
631 return 0;
632}
633
634static void uvd_v5_0_enable_clock_gating(struct amdgpu_device *adev, bool enable)
635{
636 uint32_t data1, data3, suvd_flags;
637
638 data1 = RREG32(mmUVD_SUVD_CGC_GATE);
639 data3 = RREG32(mmUVD_CGC_GATE);
640
641 suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
642 UVD_SUVD_CGC_GATE__SIT_MASK |
643 UVD_SUVD_CGC_GATE__SMP_MASK |
644 UVD_SUVD_CGC_GATE__SCM_MASK |
645 UVD_SUVD_CGC_GATE__SDB_MASK;
646
647 if (enable) {
648 data3 |= (UVD_CGC_GATE__SYS_MASK |
649 UVD_CGC_GATE__UDEC_MASK |
650 UVD_CGC_GATE__MPEG2_MASK |
651 UVD_CGC_GATE__RBC_MASK |
652 UVD_CGC_GATE__LMI_MC_MASK |
653 UVD_CGC_GATE__IDCT_MASK |
654 UVD_CGC_GATE__MPRD_MASK |
655 UVD_CGC_GATE__MPC_MASK |
656 UVD_CGC_GATE__LBSI_MASK |
657 UVD_CGC_GATE__LRBBM_MASK |
658 UVD_CGC_GATE__UDEC_RE_MASK |
659 UVD_CGC_GATE__UDEC_CM_MASK |
660 UVD_CGC_GATE__UDEC_IT_MASK |
661 UVD_CGC_GATE__UDEC_DB_MASK |
662 UVD_CGC_GATE__UDEC_MP_MASK |
663 UVD_CGC_GATE__WCB_MASK |
664 UVD_CGC_GATE__JPEG_MASK |
665 UVD_CGC_GATE__SCPU_MASK);
666 /* only in pg enabled, we can gate clock to vcpu*/
667 if (adev->pg_flags & AMD_PG_SUPPORT_UVD)
668 data3 |= UVD_CGC_GATE__VCPU_MASK;
669 data3 &= ~UVD_CGC_GATE__REGS_MASK;
670 data1 |= suvd_flags;
671 } else {
672 data3 = 0;
673 data1 = 0;
674 }
675
676 WREG32(mmUVD_SUVD_CGC_GATE, data1);
677 WREG32(mmUVD_CGC_GATE, data3);
678}
679
680static void uvd_v5_0_set_sw_clock_gating(struct amdgpu_device *adev)
681{
682 uint32_t data, data2;
683
684 data = RREG32(mmUVD_CGC_CTRL);
685 data2 = RREG32(mmUVD_SUVD_CGC_CTRL);
686
687
688 data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK |
689 UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
690
691
692 data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
693 (1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) |
694 (4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY));
695
696 data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
697 UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
698 UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
699 UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
700 UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
701 UVD_CGC_CTRL__SYS_MODE_MASK |
702 UVD_CGC_CTRL__UDEC_MODE_MASK |
703 UVD_CGC_CTRL__MPEG2_MODE_MASK |
704 UVD_CGC_CTRL__REGS_MODE_MASK |
705 UVD_CGC_CTRL__RBC_MODE_MASK |
706 UVD_CGC_CTRL__LMI_MC_MODE_MASK |
707 UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
708 UVD_CGC_CTRL__IDCT_MODE_MASK |
709 UVD_CGC_CTRL__MPRD_MODE_MASK |
710 UVD_CGC_CTRL__MPC_MODE_MASK |
711 UVD_CGC_CTRL__LBSI_MODE_MASK |
712 UVD_CGC_CTRL__LRBBM_MODE_MASK |
713 UVD_CGC_CTRL__WCB_MODE_MASK |
714 UVD_CGC_CTRL__VCPU_MODE_MASK |
715 UVD_CGC_CTRL__JPEG_MODE_MASK |
716 UVD_CGC_CTRL__SCPU_MODE_MASK);
717 data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK |
718 UVD_SUVD_CGC_CTRL__SIT_MODE_MASK |
719 UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
720 UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
721 UVD_SUVD_CGC_CTRL__SDB_MODE_MASK);
722
723 WREG32(mmUVD_CGC_CTRL, data);
724 WREG32(mmUVD_SUVD_CGC_CTRL, data2);
725}
726
727#if 0
728static void uvd_v5_0_set_hw_clock_gating(struct amdgpu_device *adev)
729{
730 uint32_t data, data1, cgc_flags, suvd_flags;
731
732 data = RREG32(mmUVD_CGC_GATE);
733 data1 = RREG32(mmUVD_SUVD_CGC_GATE);
734
735 cgc_flags = UVD_CGC_GATE__SYS_MASK |
736 UVD_CGC_GATE__UDEC_MASK |
737 UVD_CGC_GATE__MPEG2_MASK |
738 UVD_CGC_GATE__RBC_MASK |
739 UVD_CGC_GATE__LMI_MC_MASK |
740 UVD_CGC_GATE__IDCT_MASK |
741 UVD_CGC_GATE__MPRD_MASK |
742 UVD_CGC_GATE__MPC_MASK |
743 UVD_CGC_GATE__LBSI_MASK |
744 UVD_CGC_GATE__LRBBM_MASK |
745 UVD_CGC_GATE__UDEC_RE_MASK |
746 UVD_CGC_GATE__UDEC_CM_MASK |
747 UVD_CGC_GATE__UDEC_IT_MASK |
748 UVD_CGC_GATE__UDEC_DB_MASK |
749 UVD_CGC_GATE__UDEC_MP_MASK |
750 UVD_CGC_GATE__WCB_MASK |
751 UVD_CGC_GATE__VCPU_MASK |
752 UVD_CGC_GATE__SCPU_MASK;
753
754 suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
755 UVD_SUVD_CGC_GATE__SIT_MASK |
756 UVD_SUVD_CGC_GATE__SMP_MASK |
757 UVD_SUVD_CGC_GATE__SCM_MASK |
758 UVD_SUVD_CGC_GATE__SDB_MASK;
759
760 data |= cgc_flags;
761 data1 |= suvd_flags;
762
763 WREG32(mmUVD_CGC_GATE, data);
764 WREG32(mmUVD_SUVD_CGC_GATE, data1);
765}
766#endif
767
768static void uvd_v5_0_enable_mgcg(struct amdgpu_device *adev,
769 bool enable)
770{
771 u32 orig, data;
772
773 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) {
774 data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
775 data |= 0xfff;
776 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
777
778 orig = data = RREG32(mmUVD_CGC_CTRL);
779 data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
780 if (orig != data)
781 WREG32(mmUVD_CGC_CTRL, data);
782 } else {
783 data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
784 data &= ~0xfff;
785 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
786
787 orig = data = RREG32(mmUVD_CGC_CTRL);
788 data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
789 if (orig != data)
790 WREG32(mmUVD_CGC_CTRL, data);
791 }
792}
793
794static int uvd_v5_0_set_clockgating_state(void *handle,
795 enum amd_clockgating_state state)
796{
797 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
798 bool enable = (state == AMD_CG_STATE_GATE);
799
800 if (enable) {
801 /* wait for STATUS to clear */
802 if (uvd_v5_0_wait_for_idle(handle))
803 return -EBUSY;
804 uvd_v5_0_enable_clock_gating(adev, true);
805
806 /* enable HW gates because UVD is idle */
807/* uvd_v5_0_set_hw_clock_gating(adev); */
808 } else {
809 uvd_v5_0_enable_clock_gating(adev, false);
810 }
811
812 uvd_v5_0_set_sw_clock_gating(adev);
813 return 0;
814}
815
816static int uvd_v5_0_set_powergating_state(void *handle,
817 enum amd_powergating_state state)
818{
819 /* This doesn't actually powergate the UVD block.
820 * That's done in the dpm code via the SMC. This
821 * just re-inits the block as necessary. The actual
822 * gating still happens in the dpm code. We should
823 * revisit this when there is a cleaner line between
824 * the smc and the hw blocks
825 */
826 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
827 int ret = 0;
828
829 if (state == AMD_PG_STATE_GATE) {
830 uvd_v5_0_stop(adev);
831 } else {
832 ret = uvd_v5_0_start(adev);
833 if (ret)
834 goto out;
835 }
836
837out:
838 return ret;
839}
840
841static void uvd_v5_0_get_clockgating_state(void *handle, u64 *flags)
842{
843 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
844 int data;
845
846 mutex_lock(&adev->pm.mutex);
847
848 if (RREG32_SMC(ixCURRENT_PG_STATUS) &
849 CURRENT_PG_STATUS__UVD_PG_STATUS_MASK) {
850 DRM_INFO("Cannot get clockgating state when UVD is powergated.\n");
851 goto out;
852 }
853
854 /* AMD_CG_SUPPORT_UVD_MGCG */
855 data = RREG32(mmUVD_CGC_CTRL);
856 if (data & UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK)
857 *flags |= AMD_CG_SUPPORT_UVD_MGCG;
858
859out:
860 mutex_unlock(&adev->pm.mutex);
861}
862
863static const struct amd_ip_funcs uvd_v5_0_ip_funcs = {
864 .name = "uvd_v5_0",
865 .early_init = uvd_v5_0_early_init,
866 .late_init = NULL,
867 .sw_init = uvd_v5_0_sw_init,
868 .sw_fini = uvd_v5_0_sw_fini,
869 .hw_init = uvd_v5_0_hw_init,
870 .hw_fini = uvd_v5_0_hw_fini,
871 .prepare_suspend = uvd_v5_0_prepare_suspend,
872 .suspend = uvd_v5_0_suspend,
873 .resume = uvd_v5_0_resume,
874 .is_idle = uvd_v5_0_is_idle,
875 .wait_for_idle = uvd_v5_0_wait_for_idle,
876 .soft_reset = uvd_v5_0_soft_reset,
877 .set_clockgating_state = uvd_v5_0_set_clockgating_state,
878 .set_powergating_state = uvd_v5_0_set_powergating_state,
879 .get_clockgating_state = uvd_v5_0_get_clockgating_state,
880};
881
882static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = {
883 .type = AMDGPU_RING_TYPE_UVD,
884 .align_mask = 0xf,
885 .support_64bit_ptrs = false,
886 .no_user_fence = true,
887 .get_rptr = uvd_v5_0_ring_get_rptr,
888 .get_wptr = uvd_v5_0_ring_get_wptr,
889 .set_wptr = uvd_v5_0_ring_set_wptr,
890 .parse_cs = amdgpu_uvd_ring_parse_cs,
891 .emit_frame_size =
892 14, /* uvd_v5_0_ring_emit_fence x1 no user fence */
893 .emit_ib_size = 6, /* uvd_v5_0_ring_emit_ib */
894 .emit_ib = uvd_v5_0_ring_emit_ib,
895 .emit_fence = uvd_v5_0_ring_emit_fence,
896 .test_ring = uvd_v5_0_ring_test_ring,
897 .test_ib = amdgpu_uvd_ring_test_ib,
898 .insert_nop = uvd_v5_0_ring_insert_nop,
899 .pad_ib = amdgpu_ring_generic_pad_ib,
900 .begin_use = amdgpu_uvd_ring_begin_use,
901 .end_use = amdgpu_uvd_ring_end_use,
902};
903
904static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev)
905{
906 adev->uvd.inst->ring.funcs = &uvd_v5_0_ring_funcs;
907}
908
909static const struct amdgpu_irq_src_funcs uvd_v5_0_irq_funcs = {
910 .set = uvd_v5_0_set_interrupt_state,
911 .process = uvd_v5_0_process_interrupt,
912};
913
914static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev)
915{
916 adev->uvd.inst->irq.num_types = 1;
917 adev->uvd.inst->irq.funcs = &uvd_v5_0_irq_funcs;
918}
919
920const struct amdgpu_ip_block_version uvd_v5_0_ip_block =
921{
922 .type = AMD_IP_BLOCK_TYPE_UVD,
923 .major = 5,
924 .minor = 0,
925 .rev = 0,
926 .funcs = &uvd_v5_0_ip_funcs,
927};
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Christian König <christian.koenig@amd.com>
23 */
24
25#include <linux/firmware.h>
26#include <drm/drmP.h>
27#include "amdgpu.h"
28#include "amdgpu_uvd.h"
29#include "vid.h"
30#include "uvd/uvd_5_0_d.h"
31#include "uvd/uvd_5_0_sh_mask.h"
32#include "oss/oss_2_0_d.h"
33#include "oss/oss_2_0_sh_mask.h"
34#include "bif/bif_5_0_d.h"
35#include "vi.h"
36#include "smu/smu_7_1_2_d.h"
37#include "smu/smu_7_1_2_sh_mask.h"
38
39static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev);
40static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev);
41static int uvd_v5_0_start(struct amdgpu_device *adev);
42static void uvd_v5_0_stop(struct amdgpu_device *adev);
43static int uvd_v5_0_set_clockgating_state(void *handle,
44 enum amd_clockgating_state state);
45static void uvd_v5_0_enable_mgcg(struct amdgpu_device *adev,
46 bool enable);
47/**
48 * uvd_v5_0_ring_get_rptr - get read pointer
49 *
50 * @ring: amdgpu_ring pointer
51 *
52 * Returns the current hardware read pointer
53 */
54static uint32_t uvd_v5_0_ring_get_rptr(struct amdgpu_ring *ring)
55{
56 struct amdgpu_device *adev = ring->adev;
57
58 return RREG32(mmUVD_RBC_RB_RPTR);
59}
60
61/**
62 * uvd_v5_0_ring_get_wptr - get write pointer
63 *
64 * @ring: amdgpu_ring pointer
65 *
66 * Returns the current hardware write pointer
67 */
68static uint32_t uvd_v5_0_ring_get_wptr(struct amdgpu_ring *ring)
69{
70 struct amdgpu_device *adev = ring->adev;
71
72 return RREG32(mmUVD_RBC_RB_WPTR);
73}
74
75/**
76 * uvd_v5_0_ring_set_wptr - set write pointer
77 *
78 * @ring: amdgpu_ring pointer
79 *
80 * Commits the write pointer to the hardware
81 */
82static void uvd_v5_0_ring_set_wptr(struct amdgpu_ring *ring)
83{
84 struct amdgpu_device *adev = ring->adev;
85
86 WREG32(mmUVD_RBC_RB_WPTR, ring->wptr);
87}
88
89static int uvd_v5_0_early_init(void *handle)
90{
91 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
92
93 uvd_v5_0_set_ring_funcs(adev);
94 uvd_v5_0_set_irq_funcs(adev);
95
96 return 0;
97}
98
99static int uvd_v5_0_sw_init(void *handle)
100{
101 struct amdgpu_ring *ring;
102 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
103 int r;
104
105 /* UVD TRAP */
106 r = amdgpu_irq_add_id(adev, 124, &adev->uvd.irq);
107 if (r)
108 return r;
109
110 r = amdgpu_uvd_sw_init(adev);
111 if (r)
112 return r;
113
114 r = amdgpu_uvd_resume(adev);
115 if (r)
116 return r;
117
118 ring = &adev->uvd.ring;
119 sprintf(ring->name, "uvd");
120 r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0);
121
122 return r;
123}
124
125static int uvd_v5_0_sw_fini(void *handle)
126{
127 int r;
128 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
129
130 r = amdgpu_uvd_suspend(adev);
131 if (r)
132 return r;
133
134 r = amdgpu_uvd_sw_fini(adev);
135 if (r)
136 return r;
137
138 return r;
139}
140
141/**
142 * uvd_v5_0_hw_init - start and test UVD block
143 *
144 * @adev: amdgpu_device pointer
145 *
146 * Initialize the hardware, boot up the VCPU and do some testing
147 */
148static int uvd_v5_0_hw_init(void *handle)
149{
150 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
151 struct amdgpu_ring *ring = &adev->uvd.ring;
152 uint32_t tmp;
153 int r;
154
155 r = uvd_v5_0_start(adev);
156 if (r)
157 goto done;
158
159 ring->ready = true;
160 r = amdgpu_ring_test_ring(ring);
161 if (r) {
162 ring->ready = false;
163 goto done;
164 }
165
166 r = amdgpu_ring_alloc(ring, 10);
167 if (r) {
168 DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r);
169 goto done;
170 }
171
172 tmp = PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0);
173 amdgpu_ring_write(ring, tmp);
174 amdgpu_ring_write(ring, 0xFFFFF);
175
176 tmp = PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0);
177 amdgpu_ring_write(ring, tmp);
178 amdgpu_ring_write(ring, 0xFFFFF);
179
180 tmp = PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0);
181 amdgpu_ring_write(ring, tmp);
182 amdgpu_ring_write(ring, 0xFFFFF);
183
184 /* Clear timeout status bits */
185 amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0));
186 amdgpu_ring_write(ring, 0x8);
187
188 amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0));
189 amdgpu_ring_write(ring, 3);
190
191 amdgpu_ring_commit(ring);
192done:
193 if (!r)
194 DRM_INFO("UVD initialized successfully.\n");
195
196 return r;
197}
198
199/**
200 * uvd_v5_0_hw_fini - stop the hardware block
201 *
202 * @adev: amdgpu_device pointer
203 *
204 * Stop the UVD block, mark ring as not ready any more
205 */
206static int uvd_v5_0_hw_fini(void *handle)
207{
208 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
209 struct amdgpu_ring *ring = &adev->uvd.ring;
210
211 uvd_v5_0_stop(adev);
212 ring->ready = false;
213
214 return 0;
215}
216
217static int uvd_v5_0_suspend(void *handle)
218{
219 int r;
220 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
221
222 r = uvd_v5_0_hw_fini(adev);
223 if (r)
224 return r;
225 uvd_v5_0_set_clockgating_state(adev, AMD_CG_STATE_GATE);
226
227 r = amdgpu_uvd_suspend(adev);
228 if (r)
229 return r;
230
231 return r;
232}
233
234static int uvd_v5_0_resume(void *handle)
235{
236 int r;
237 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
238
239 r = amdgpu_uvd_resume(adev);
240 if (r)
241 return r;
242
243 r = uvd_v5_0_hw_init(adev);
244 if (r)
245 return r;
246
247 return r;
248}
249
250/**
251 * uvd_v5_0_mc_resume - memory controller programming
252 *
253 * @adev: amdgpu_device pointer
254 *
255 * Let the UVD memory controller know it's offsets
256 */
257static void uvd_v5_0_mc_resume(struct amdgpu_device *adev)
258{
259 uint64_t offset;
260 uint32_t size;
261
262 /* programm memory controller bits 0-27 */
263 WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
264 lower_32_bits(adev->uvd.gpu_addr));
265 WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
266 upper_32_bits(adev->uvd.gpu_addr));
267
268 offset = AMDGPU_UVD_FIRMWARE_OFFSET;
269 size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4);
270 WREG32(mmUVD_VCPU_CACHE_OFFSET0, offset >> 3);
271 WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
272
273 offset += size;
274 size = AMDGPU_UVD_HEAP_SIZE;
275 WREG32(mmUVD_VCPU_CACHE_OFFSET1, offset >> 3);
276 WREG32(mmUVD_VCPU_CACHE_SIZE1, size);
277
278 offset += size;
279 size = AMDGPU_UVD_STACK_SIZE +
280 (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles);
281 WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3);
282 WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
283
284 WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
285 WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
286 WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
287}
288
289/**
290 * uvd_v5_0_start - start UVD block
291 *
292 * @adev: amdgpu_device pointer
293 *
294 * Setup and start the UVD block
295 */
296static int uvd_v5_0_start(struct amdgpu_device *adev)
297{
298 struct amdgpu_ring *ring = &adev->uvd.ring;
299 uint32_t rb_bufsz, tmp;
300 uint32_t lmi_swap_cntl;
301 uint32_t mp_swap_cntl;
302 int i, j, r;
303
304 /*disable DPG */
305 WREG32_P(mmUVD_POWER_STATUS, 0, ~(1 << 2));
306
307 /* disable byte swapping */
308 lmi_swap_cntl = 0;
309 mp_swap_cntl = 0;
310
311 uvd_v5_0_mc_resume(adev);
312
313 amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
314 uvd_v5_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
315 uvd_v5_0_enable_mgcg(adev, true);
316
317 /* disable interupt */
318 WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1));
319
320 /* stall UMC and register bus before resetting VCPU */
321 WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
322 mdelay(1);
323
324 /* put LMI, VCPU, RBC etc... into reset */
325 WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
326 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK | UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
327 UVD_SOFT_RESET__RBC_SOFT_RESET_MASK | UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
328 UVD_SOFT_RESET__CXW_SOFT_RESET_MASK | UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
329 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
330 mdelay(5);
331
332 /* take UVD block out of reset */
333 WREG32_P(mmSRBM_SOFT_RESET, 0, ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK);
334 mdelay(5);
335
336 /* initialize UVD memory controller */
337 WREG32(mmUVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) |
338 (1 << 21) | (1 << 9) | (1 << 20));
339
340#ifdef __BIG_ENDIAN
341 /* swap (8 in 32) RB and IB */
342 lmi_swap_cntl = 0xa;
343 mp_swap_cntl = 0;
344#endif
345 WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
346 WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
347
348 WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040);
349 WREG32(mmUVD_MPC_SET_MUXA1, 0x0);
350 WREG32(mmUVD_MPC_SET_MUXB0, 0x40c2040);
351 WREG32(mmUVD_MPC_SET_MUXB1, 0x0);
352 WREG32(mmUVD_MPC_SET_ALU, 0);
353 WREG32(mmUVD_MPC_SET_MUX, 0x88);
354
355 /* take all subblocks out of reset, except VCPU */
356 WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
357 mdelay(5);
358
359 /* enable VCPU clock */
360 WREG32(mmUVD_VCPU_CNTL, 1 << 9);
361
362 /* enable UMC */
363 WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
364
365 /* boot up the VCPU */
366 WREG32(mmUVD_SOFT_RESET, 0);
367 mdelay(10);
368
369 for (i = 0; i < 10; ++i) {
370 uint32_t status;
371 for (j = 0; j < 100; ++j) {
372 status = RREG32(mmUVD_STATUS);
373 if (status & 2)
374 break;
375 mdelay(10);
376 }
377 r = 0;
378 if (status & 2)
379 break;
380
381 DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
382 WREG32_P(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
383 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
384 mdelay(10);
385 WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
386 mdelay(10);
387 r = -1;
388 }
389
390 if (r) {
391 DRM_ERROR("UVD not responding, giving up!!!\n");
392 return r;
393 }
394 /* enable master interrupt */
395 WREG32_P(mmUVD_MASTINT_EN, 3 << 1, ~(3 << 1));
396
397 /* clear the bit 4 of UVD_STATUS */
398 WREG32_P(mmUVD_STATUS, 0, ~(2 << 1));
399
400 rb_bufsz = order_base_2(ring->ring_size);
401 tmp = 0;
402 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
403 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
404 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
405 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
406 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
407 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
408 /* force RBC into idle state */
409 WREG32(mmUVD_RBC_RB_CNTL, tmp);
410
411 /* set the write pointer delay */
412 WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0);
413
414 /* set the wb address */
415 WREG32(mmUVD_RBC_RB_RPTR_ADDR, (upper_32_bits(ring->gpu_addr) >> 2));
416
417 /* programm the RB_BASE for ring buffer */
418 WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
419 lower_32_bits(ring->gpu_addr));
420 WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
421 upper_32_bits(ring->gpu_addr));
422
423 /* Initialize the ring buffer's read and write pointers */
424 WREG32(mmUVD_RBC_RB_RPTR, 0);
425
426 ring->wptr = RREG32(mmUVD_RBC_RB_RPTR);
427 WREG32(mmUVD_RBC_RB_WPTR, ring->wptr);
428
429 WREG32_P(mmUVD_RBC_RB_CNTL, 0, ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
430
431 return 0;
432}
433
434/**
435 * uvd_v5_0_stop - stop UVD block
436 *
437 * @adev: amdgpu_device pointer
438 *
439 * stop the UVD block
440 */
441static void uvd_v5_0_stop(struct amdgpu_device *adev)
442{
443 /* force RBC into idle state */
444 WREG32(mmUVD_RBC_RB_CNTL, 0x11010101);
445
446 /* Stall UMC and register bus before resetting VCPU */
447 WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
448 mdelay(1);
449
450 /* put VCPU into reset */
451 WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
452 mdelay(5);
453
454 /* disable VCPU clock */
455 WREG32(mmUVD_VCPU_CNTL, 0x0);
456
457 /* Unstall UMC and register bus */
458 WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
459}
460
461/**
462 * uvd_v5_0_ring_emit_fence - emit an fence & trap command
463 *
464 * @ring: amdgpu_ring pointer
465 * @fence: fence to emit
466 *
467 * Write a fence and a trap command to the ring.
468 */
469static void uvd_v5_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
470 unsigned flags)
471{
472 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
473
474 amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
475 amdgpu_ring_write(ring, seq);
476 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
477 amdgpu_ring_write(ring, addr & 0xffffffff);
478 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
479 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
480 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
481 amdgpu_ring_write(ring, 0);
482
483 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
484 amdgpu_ring_write(ring, 0);
485 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
486 amdgpu_ring_write(ring, 0);
487 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
488 amdgpu_ring_write(ring, 2);
489}
490
491/**
492 * uvd_v5_0_ring_emit_hdp_flush - emit an hdp flush
493 *
494 * @ring: amdgpu_ring pointer
495 *
496 * Emits an hdp flush.
497 */
498static void uvd_v5_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
499{
500 amdgpu_ring_write(ring, PACKET0(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0));
501 amdgpu_ring_write(ring, 0);
502}
503
504/**
505 * uvd_v5_0_ring_hdp_invalidate - emit an hdp invalidate
506 *
507 * @ring: amdgpu_ring pointer
508 *
509 * Emits an hdp invalidate.
510 */
511static void uvd_v5_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
512{
513 amdgpu_ring_write(ring, PACKET0(mmHDP_DEBUG0, 0));
514 amdgpu_ring_write(ring, 1);
515}
516
517/**
518 * uvd_v5_0_ring_test_ring - register write test
519 *
520 * @ring: amdgpu_ring pointer
521 *
522 * Test if we can successfully write to the context register
523 */
524static int uvd_v5_0_ring_test_ring(struct amdgpu_ring *ring)
525{
526 struct amdgpu_device *adev = ring->adev;
527 uint32_t tmp = 0;
528 unsigned i;
529 int r;
530
531 WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD);
532 r = amdgpu_ring_alloc(ring, 3);
533 if (r) {
534 DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
535 ring->idx, r);
536 return r;
537 }
538 amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
539 amdgpu_ring_write(ring, 0xDEADBEEF);
540 amdgpu_ring_commit(ring);
541 for (i = 0; i < adev->usec_timeout; i++) {
542 tmp = RREG32(mmUVD_CONTEXT_ID);
543 if (tmp == 0xDEADBEEF)
544 break;
545 DRM_UDELAY(1);
546 }
547
548 if (i < adev->usec_timeout) {
549 DRM_INFO("ring test on %d succeeded in %d usecs\n",
550 ring->idx, i);
551 } else {
552 DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
553 ring->idx, tmp);
554 r = -EINVAL;
555 }
556 return r;
557}
558
559/**
560 * uvd_v5_0_ring_emit_ib - execute indirect buffer
561 *
562 * @ring: amdgpu_ring pointer
563 * @ib: indirect buffer to execute
564 *
565 * Write ring commands to execute the indirect buffer
566 */
567static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
568 struct amdgpu_ib *ib,
569 unsigned vm_id, bool ctx_switch)
570{
571 amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0));
572 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
573 amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH, 0));
574 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
575 amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0));
576 amdgpu_ring_write(ring, ib->length_dw);
577}
578
579static bool uvd_v5_0_is_idle(void *handle)
580{
581 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
582
583 return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
584}
585
586static int uvd_v5_0_wait_for_idle(void *handle)
587{
588 unsigned i;
589 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
590
591 for (i = 0; i < adev->usec_timeout; i++) {
592 if (!(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK))
593 return 0;
594 }
595 return -ETIMEDOUT;
596}
597
598static int uvd_v5_0_soft_reset(void *handle)
599{
600 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
601
602 uvd_v5_0_stop(adev);
603
604 WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK,
605 ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK);
606 mdelay(5);
607
608 return uvd_v5_0_start(adev);
609}
610
611static int uvd_v5_0_set_interrupt_state(struct amdgpu_device *adev,
612 struct amdgpu_irq_src *source,
613 unsigned type,
614 enum amdgpu_interrupt_state state)
615{
616 // TODO
617 return 0;
618}
619
620static int uvd_v5_0_process_interrupt(struct amdgpu_device *adev,
621 struct amdgpu_irq_src *source,
622 struct amdgpu_iv_entry *entry)
623{
624 DRM_DEBUG("IH: UVD TRAP\n");
625 amdgpu_fence_process(&adev->uvd.ring);
626 return 0;
627}
628
629static void uvd_v5_0_enable_clock_gating(struct amdgpu_device *adev, bool enable)
630{
631 uint32_t data1, data3, suvd_flags;
632
633 data1 = RREG32(mmUVD_SUVD_CGC_GATE);
634 data3 = RREG32(mmUVD_CGC_GATE);
635
636 suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
637 UVD_SUVD_CGC_GATE__SIT_MASK |
638 UVD_SUVD_CGC_GATE__SMP_MASK |
639 UVD_SUVD_CGC_GATE__SCM_MASK |
640 UVD_SUVD_CGC_GATE__SDB_MASK;
641
642 if (enable) {
643 data3 |= (UVD_CGC_GATE__SYS_MASK |
644 UVD_CGC_GATE__UDEC_MASK |
645 UVD_CGC_GATE__MPEG2_MASK |
646 UVD_CGC_GATE__RBC_MASK |
647 UVD_CGC_GATE__LMI_MC_MASK |
648 UVD_CGC_GATE__IDCT_MASK |
649 UVD_CGC_GATE__MPRD_MASK |
650 UVD_CGC_GATE__MPC_MASK |
651 UVD_CGC_GATE__LBSI_MASK |
652 UVD_CGC_GATE__LRBBM_MASK |
653 UVD_CGC_GATE__UDEC_RE_MASK |
654 UVD_CGC_GATE__UDEC_CM_MASK |
655 UVD_CGC_GATE__UDEC_IT_MASK |
656 UVD_CGC_GATE__UDEC_DB_MASK |
657 UVD_CGC_GATE__UDEC_MP_MASK |
658 UVD_CGC_GATE__WCB_MASK |
659 UVD_CGC_GATE__JPEG_MASK |
660 UVD_CGC_GATE__SCPU_MASK);
661 /* only in pg enabled, we can gate clock to vcpu*/
662 if (adev->pg_flags & AMD_PG_SUPPORT_UVD)
663 data3 |= UVD_CGC_GATE__VCPU_MASK;
664 data3 &= ~UVD_CGC_GATE__REGS_MASK;
665 data1 |= suvd_flags;
666 } else {
667 data3 = 0;
668 data1 = 0;
669 }
670
671 WREG32(mmUVD_SUVD_CGC_GATE, data1);
672 WREG32(mmUVD_CGC_GATE, data3);
673}
674
675static void uvd_v5_0_set_sw_clock_gating(struct amdgpu_device *adev)
676{
677 uint32_t data, data2;
678
679 data = RREG32(mmUVD_CGC_CTRL);
680 data2 = RREG32(mmUVD_SUVD_CGC_CTRL);
681
682
683 data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK |
684 UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
685
686
687 data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
688 (1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) |
689 (4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY));
690
691 data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
692 UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
693 UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
694 UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
695 UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
696 UVD_CGC_CTRL__SYS_MODE_MASK |
697 UVD_CGC_CTRL__UDEC_MODE_MASK |
698 UVD_CGC_CTRL__MPEG2_MODE_MASK |
699 UVD_CGC_CTRL__REGS_MODE_MASK |
700 UVD_CGC_CTRL__RBC_MODE_MASK |
701 UVD_CGC_CTRL__LMI_MC_MODE_MASK |
702 UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
703 UVD_CGC_CTRL__IDCT_MODE_MASK |
704 UVD_CGC_CTRL__MPRD_MODE_MASK |
705 UVD_CGC_CTRL__MPC_MODE_MASK |
706 UVD_CGC_CTRL__LBSI_MODE_MASK |
707 UVD_CGC_CTRL__LRBBM_MODE_MASK |
708 UVD_CGC_CTRL__WCB_MODE_MASK |
709 UVD_CGC_CTRL__VCPU_MODE_MASK |
710 UVD_CGC_CTRL__JPEG_MODE_MASK |
711 UVD_CGC_CTRL__SCPU_MODE_MASK);
712 data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK |
713 UVD_SUVD_CGC_CTRL__SIT_MODE_MASK |
714 UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
715 UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
716 UVD_SUVD_CGC_CTRL__SDB_MODE_MASK);
717
718 WREG32(mmUVD_CGC_CTRL, data);
719 WREG32(mmUVD_SUVD_CGC_CTRL, data2);
720}
721
722#if 0
723static void uvd_v5_0_set_hw_clock_gating(struct amdgpu_device *adev)
724{
725 uint32_t data, data1, cgc_flags, suvd_flags;
726
727 data = RREG32(mmUVD_CGC_GATE);
728 data1 = RREG32(mmUVD_SUVD_CGC_GATE);
729
730 cgc_flags = UVD_CGC_GATE__SYS_MASK |
731 UVD_CGC_GATE__UDEC_MASK |
732 UVD_CGC_GATE__MPEG2_MASK |
733 UVD_CGC_GATE__RBC_MASK |
734 UVD_CGC_GATE__LMI_MC_MASK |
735 UVD_CGC_GATE__IDCT_MASK |
736 UVD_CGC_GATE__MPRD_MASK |
737 UVD_CGC_GATE__MPC_MASK |
738 UVD_CGC_GATE__LBSI_MASK |
739 UVD_CGC_GATE__LRBBM_MASK |
740 UVD_CGC_GATE__UDEC_RE_MASK |
741 UVD_CGC_GATE__UDEC_CM_MASK |
742 UVD_CGC_GATE__UDEC_IT_MASK |
743 UVD_CGC_GATE__UDEC_DB_MASK |
744 UVD_CGC_GATE__UDEC_MP_MASK |
745 UVD_CGC_GATE__WCB_MASK |
746 UVD_CGC_GATE__VCPU_MASK |
747 UVD_CGC_GATE__SCPU_MASK;
748
749 suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
750 UVD_SUVD_CGC_GATE__SIT_MASK |
751 UVD_SUVD_CGC_GATE__SMP_MASK |
752 UVD_SUVD_CGC_GATE__SCM_MASK |
753 UVD_SUVD_CGC_GATE__SDB_MASK;
754
755 data |= cgc_flags;
756 data1 |= suvd_flags;
757
758 WREG32(mmUVD_CGC_GATE, data);
759 WREG32(mmUVD_SUVD_CGC_GATE, data1);
760}
761#endif
762
763static void uvd_v5_0_enable_mgcg(struct amdgpu_device *adev,
764 bool enable)
765{
766 u32 orig, data;
767
768 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) {
769 data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
770 data |= 0xfff;
771 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
772
773 orig = data = RREG32(mmUVD_CGC_CTRL);
774 data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
775 if (orig != data)
776 WREG32(mmUVD_CGC_CTRL, data);
777 } else {
778 data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
779 data &= ~0xfff;
780 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
781
782 orig = data = RREG32(mmUVD_CGC_CTRL);
783 data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
784 if (orig != data)
785 WREG32(mmUVD_CGC_CTRL, data);
786 }
787}
788
789static int uvd_v5_0_set_clockgating_state(void *handle,
790 enum amd_clockgating_state state)
791{
792 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
793 bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
794
795 if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
796 return 0;
797
798 if (enable) {
799 /* wait for STATUS to clear */
800 if (uvd_v5_0_wait_for_idle(handle))
801 return -EBUSY;
802 uvd_v5_0_enable_clock_gating(adev, true);
803
804 /* enable HW gates because UVD is idle */
805/* uvd_v5_0_set_hw_clock_gating(adev); */
806 } else {
807 uvd_v5_0_enable_clock_gating(adev, false);
808 }
809
810 uvd_v5_0_set_sw_clock_gating(adev);
811 return 0;
812}
813
814static int uvd_v5_0_set_powergating_state(void *handle,
815 enum amd_powergating_state state)
816{
817 /* This doesn't actually powergate the UVD block.
818 * That's done in the dpm code via the SMC. This
819 * just re-inits the block as necessary. The actual
820 * gating still happens in the dpm code. We should
821 * revisit this when there is a cleaner line between
822 * the smc and the hw blocks
823 */
824 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
825
826 if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
827 return 0;
828
829 if (state == AMD_PG_STATE_GATE) {
830 uvd_v5_0_stop(adev);
831 return 0;
832 } else {
833 return uvd_v5_0_start(adev);
834 }
835}
836
837static const struct amd_ip_funcs uvd_v5_0_ip_funcs = {
838 .name = "uvd_v5_0",
839 .early_init = uvd_v5_0_early_init,
840 .late_init = NULL,
841 .sw_init = uvd_v5_0_sw_init,
842 .sw_fini = uvd_v5_0_sw_fini,
843 .hw_init = uvd_v5_0_hw_init,
844 .hw_fini = uvd_v5_0_hw_fini,
845 .suspend = uvd_v5_0_suspend,
846 .resume = uvd_v5_0_resume,
847 .is_idle = uvd_v5_0_is_idle,
848 .wait_for_idle = uvd_v5_0_wait_for_idle,
849 .soft_reset = uvd_v5_0_soft_reset,
850 .set_clockgating_state = uvd_v5_0_set_clockgating_state,
851 .set_powergating_state = uvd_v5_0_set_powergating_state,
852};
853
854static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = {
855 .type = AMDGPU_RING_TYPE_UVD,
856 .align_mask = 0xf,
857 .nop = PACKET0(mmUVD_NO_OP, 0),
858 .get_rptr = uvd_v5_0_ring_get_rptr,
859 .get_wptr = uvd_v5_0_ring_get_wptr,
860 .set_wptr = uvd_v5_0_ring_set_wptr,
861 .parse_cs = amdgpu_uvd_ring_parse_cs,
862 .emit_frame_size =
863 2 + /* uvd_v5_0_ring_emit_hdp_flush */
864 2 + /* uvd_v5_0_ring_emit_hdp_invalidate */
865 14, /* uvd_v5_0_ring_emit_fence x1 no user fence */
866 .emit_ib_size = 6, /* uvd_v5_0_ring_emit_ib */
867 .emit_ib = uvd_v5_0_ring_emit_ib,
868 .emit_fence = uvd_v5_0_ring_emit_fence,
869 .emit_hdp_flush = uvd_v5_0_ring_emit_hdp_flush,
870 .emit_hdp_invalidate = uvd_v5_0_ring_emit_hdp_invalidate,
871 .test_ring = uvd_v5_0_ring_test_ring,
872 .test_ib = amdgpu_uvd_ring_test_ib,
873 .insert_nop = amdgpu_ring_insert_nop,
874 .pad_ib = amdgpu_ring_generic_pad_ib,
875 .begin_use = amdgpu_uvd_ring_begin_use,
876 .end_use = amdgpu_uvd_ring_end_use,
877};
878
879static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev)
880{
881 adev->uvd.ring.funcs = &uvd_v5_0_ring_funcs;
882}
883
884static const struct amdgpu_irq_src_funcs uvd_v5_0_irq_funcs = {
885 .set = uvd_v5_0_set_interrupt_state,
886 .process = uvd_v5_0_process_interrupt,
887};
888
889static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev)
890{
891 adev->uvd.irq.num_types = 1;
892 adev->uvd.irq.funcs = &uvd_v5_0_irq_funcs;
893}
894
895const struct amdgpu_ip_block_version uvd_v5_0_ip_block =
896{
897 .type = AMD_IP_BLOCK_TYPE_UVD,
898 .major = 5,
899 .minor = 0,
900 .rev = 0,
901 .funcs = &uvd_v5_0_ip_funcs,
902};