Loading...
1/*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Christian König <christian.koenig@amd.com>
23 */
24
25#include <linux/firmware.h>
26
27#include "amdgpu.h"
28#include "amdgpu_uvd.h"
29#include "cikd.h"
30
31#include "uvd/uvd_4_2_d.h"
32#include "uvd/uvd_4_2_sh_mask.h"
33
34#include "oss/oss_2_0_d.h"
35#include "oss/oss_2_0_sh_mask.h"
36
37#include "bif/bif_4_1_d.h"
38
39#include "smu/smu_7_0_1_d.h"
40#include "smu/smu_7_0_1_sh_mask.h"
41
42static void uvd_v4_2_mc_resume(struct amdgpu_device *adev);
43static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev);
44static void uvd_v4_2_set_irq_funcs(struct amdgpu_device *adev);
45static int uvd_v4_2_start(struct amdgpu_device *adev);
46static void uvd_v4_2_stop(struct amdgpu_device *adev);
47static int uvd_v4_2_set_clockgating_state(void *handle,
48 enum amd_clockgating_state state);
49static void uvd_v4_2_set_dcm(struct amdgpu_device *adev,
50 bool sw_mode);
51/**
52 * uvd_v4_2_ring_get_rptr - get read pointer
53 *
54 * @ring: amdgpu_ring pointer
55 *
56 * Returns the current hardware read pointer
57 */
58static uint64_t uvd_v4_2_ring_get_rptr(struct amdgpu_ring *ring)
59{
60 struct amdgpu_device *adev = ring->adev;
61
62 return RREG32(mmUVD_RBC_RB_RPTR);
63}
64
65/**
66 * uvd_v4_2_ring_get_wptr - get write pointer
67 *
68 * @ring: amdgpu_ring pointer
69 *
70 * Returns the current hardware write pointer
71 */
72static uint64_t uvd_v4_2_ring_get_wptr(struct amdgpu_ring *ring)
73{
74 struct amdgpu_device *adev = ring->adev;
75
76 return RREG32(mmUVD_RBC_RB_WPTR);
77}
78
79/**
80 * uvd_v4_2_ring_set_wptr - set write pointer
81 *
82 * @ring: amdgpu_ring pointer
83 *
84 * Commits the write pointer to the hardware
85 */
86static void uvd_v4_2_ring_set_wptr(struct amdgpu_ring *ring)
87{
88 struct amdgpu_device *adev = ring->adev;
89
90 WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
91}
92
93static int uvd_v4_2_early_init(void *handle)
94{
95 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
96 adev->uvd.num_uvd_inst = 1;
97
98 uvd_v4_2_set_ring_funcs(adev);
99 uvd_v4_2_set_irq_funcs(adev);
100
101 return 0;
102}
103
104static int uvd_v4_2_sw_init(void *handle)
105{
106 struct amdgpu_ring *ring;
107 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
108 int r;
109
110 /* UVD TRAP */
111 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 124, &adev->uvd.inst->irq);
112 if (r)
113 return r;
114
115 r = amdgpu_uvd_sw_init(adev);
116 if (r)
117 return r;
118
119 ring = &adev->uvd.inst->ring;
120 sprintf(ring->name, "uvd");
121 r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0,
122 AMDGPU_RING_PRIO_DEFAULT, NULL);
123 if (r)
124 return r;
125
126 r = amdgpu_uvd_resume(adev);
127 if (r)
128 return r;
129
130 return r;
131}
132
133static int uvd_v4_2_sw_fini(void *handle)
134{
135 int r;
136 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
137
138 r = amdgpu_uvd_suspend(adev);
139 if (r)
140 return r;
141
142 return amdgpu_uvd_sw_fini(adev);
143}
144
145static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev,
146 bool enable);
147/**
148 * uvd_v4_2_hw_init - start and test UVD block
149 *
150 * @handle: handle used to pass amdgpu_device pointer
151 *
152 * Initialize the hardware, boot up the VCPU and do some testing
153 */
154static int uvd_v4_2_hw_init(void *handle)
155{
156 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
157 struct amdgpu_ring *ring = &adev->uvd.inst->ring;
158 uint32_t tmp;
159 int r;
160
161 uvd_v4_2_enable_mgcg(adev, true);
162 amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
163
164 r = amdgpu_ring_test_helper(ring);
165 if (r)
166 goto done;
167
168 r = amdgpu_ring_alloc(ring, 10);
169 if (r) {
170 DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r);
171 goto done;
172 }
173
174 tmp = PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0);
175 amdgpu_ring_write(ring, tmp);
176 amdgpu_ring_write(ring, 0xFFFFF);
177
178 tmp = PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0);
179 amdgpu_ring_write(ring, tmp);
180 amdgpu_ring_write(ring, 0xFFFFF);
181
182 tmp = PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0);
183 amdgpu_ring_write(ring, tmp);
184 amdgpu_ring_write(ring, 0xFFFFF);
185
186 /* Clear timeout status bits */
187 amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0));
188 amdgpu_ring_write(ring, 0x8);
189
190 amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0));
191 amdgpu_ring_write(ring, 3);
192
193 amdgpu_ring_commit(ring);
194
195done:
196 if (!r)
197 DRM_INFO("UVD initialized successfully.\n");
198
199 return r;
200}
201
202/**
203 * uvd_v4_2_hw_fini - stop the hardware block
204 *
205 * @handle: handle used to pass amdgpu_device pointer
206 *
207 * Stop the UVD block, mark ring as not ready any more
208 */
209static int uvd_v4_2_hw_fini(void *handle)
210{
211 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
212
213 cancel_delayed_work_sync(&adev->uvd.idle_work);
214
215 if (RREG32(mmUVD_STATUS) != 0)
216 uvd_v4_2_stop(adev);
217
218 return 0;
219}
220
221static int uvd_v4_2_prepare_suspend(void *handle)
222{
223 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
224
225 return amdgpu_uvd_prepare_suspend(adev);
226}
227
228static int uvd_v4_2_suspend(void *handle)
229{
230 int r;
231 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
232
233 /*
234 * Proper cleanups before halting the HW engine:
235 * - cancel the delayed idle work
236 * - enable powergating
237 * - enable clockgating
238 * - disable dpm
239 *
240 * TODO: to align with the VCN implementation, move the
241 * jobs for clockgating/powergating/dpm setting to
242 * ->set_powergating_state().
243 */
244 cancel_delayed_work_sync(&adev->uvd.idle_work);
245
246 if (adev->pm.dpm_enabled) {
247 amdgpu_dpm_enable_uvd(adev, false);
248 } else {
249 amdgpu_asic_set_uvd_clocks(adev, 0, 0);
250 /* shutdown the UVD block */
251 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
252 AMD_PG_STATE_GATE);
253 amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
254 AMD_CG_STATE_GATE);
255 }
256
257 r = uvd_v4_2_hw_fini(adev);
258 if (r)
259 return r;
260
261 return amdgpu_uvd_suspend(adev);
262}
263
264static int uvd_v4_2_resume(void *handle)
265{
266 int r;
267 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
268
269 r = amdgpu_uvd_resume(adev);
270 if (r)
271 return r;
272
273 return uvd_v4_2_hw_init(adev);
274}
275
276/**
277 * uvd_v4_2_start - start UVD block
278 *
279 * @adev: amdgpu_device pointer
280 *
281 * Setup and start the UVD block
282 */
283static int uvd_v4_2_start(struct amdgpu_device *adev)
284{
285 struct amdgpu_ring *ring = &adev->uvd.inst->ring;
286 uint32_t rb_bufsz;
287 int i, j, r;
288 u32 tmp;
289 /* disable byte swapping */
290 u32 lmi_swap_cntl = 0;
291 u32 mp_swap_cntl = 0;
292
293 /* set uvd busy */
294 WREG32_P(mmUVD_STATUS, 1<<2, ~(1<<2));
295
296 uvd_v4_2_set_dcm(adev, true);
297 WREG32(mmUVD_CGC_GATE, 0);
298
299 /* take UVD block out of reset */
300 WREG32_P(mmSRBM_SOFT_RESET, 0, ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK);
301 mdelay(5);
302
303 /* enable VCPU clock */
304 WREG32(mmUVD_VCPU_CNTL, 1 << 9);
305
306 /* disable interupt */
307 WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1));
308
309#ifdef __BIG_ENDIAN
310 /* swap (8 in 32) RB and IB */
311 lmi_swap_cntl = 0xa;
312 mp_swap_cntl = 0;
313#endif
314 WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
315 WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
316 /* initialize UVD memory controller */
317 WREG32(mmUVD_LMI_CTRL, 0x203108);
318
319 tmp = RREG32(mmUVD_MPC_CNTL);
320 WREG32(mmUVD_MPC_CNTL, tmp | 0x10);
321
322 WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040);
323 WREG32(mmUVD_MPC_SET_MUXA1, 0x0);
324 WREG32(mmUVD_MPC_SET_MUXB0, 0x40c2040);
325 WREG32(mmUVD_MPC_SET_MUXB1, 0x0);
326 WREG32(mmUVD_MPC_SET_ALU, 0);
327 WREG32(mmUVD_MPC_SET_MUX, 0x88);
328
329 uvd_v4_2_mc_resume(adev);
330
331 tmp = RREG32_UVD_CTX(ixUVD_LMI_CACHE_CTRL);
332 WREG32_UVD_CTX(ixUVD_LMI_CACHE_CTRL, tmp & (~0x10));
333
334 /* enable UMC */
335 WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
336
337 WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK);
338
339 WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
340
341 WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
342
343 mdelay(10);
344
345 for (i = 0; i < 10; ++i) {
346 uint32_t status;
347 for (j = 0; j < 100; ++j) {
348 status = RREG32(mmUVD_STATUS);
349 if (status & 2)
350 break;
351 mdelay(10);
352 }
353 r = 0;
354 if (status & 2)
355 break;
356
357 DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
358 WREG32_P(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
359 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
360 mdelay(10);
361 WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
362 mdelay(10);
363 r = -1;
364 }
365
366 if (r) {
367 DRM_ERROR("UVD not responding, giving up!!!\n");
368 return r;
369 }
370
371 /* enable interupt */
372 WREG32_P(mmUVD_MASTINT_EN, 3<<1, ~(3 << 1));
373
374 WREG32_P(mmUVD_STATUS, 0, ~(1<<2));
375
376 /* force RBC into idle state */
377 WREG32(mmUVD_RBC_RB_CNTL, 0x11010101);
378
379 /* Set the write pointer delay */
380 WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0);
381
382 /* program the 4GB memory segment for rptr and ring buffer */
383 WREG32(mmUVD_LMI_EXT40_ADDR, upper_32_bits(ring->gpu_addr) |
384 (0x7 << 16) | (0x1 << 31));
385
386 /* Initialize the ring buffer's read and write pointers */
387 WREG32(mmUVD_RBC_RB_RPTR, 0x0);
388
389 ring->wptr = RREG32(mmUVD_RBC_RB_RPTR);
390 WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
391
392 /* set the ring address */
393 WREG32(mmUVD_RBC_RB_BASE, ring->gpu_addr);
394
395 /* Set ring buffer size */
396 rb_bufsz = order_base_2(ring->ring_size);
397 rb_bufsz = (0x1 << 8) | rb_bufsz;
398 WREG32_P(mmUVD_RBC_RB_CNTL, rb_bufsz, ~0x11f1f);
399
400 return 0;
401}
402
403/**
404 * uvd_v4_2_stop - stop UVD block
405 *
406 * @adev: amdgpu_device pointer
407 *
408 * stop the UVD block
409 */
410static void uvd_v4_2_stop(struct amdgpu_device *adev)
411{
412 uint32_t i, j;
413 uint32_t status;
414
415 WREG32(mmUVD_RBC_RB_CNTL, 0x11010101);
416
417 for (i = 0; i < 10; ++i) {
418 for (j = 0; j < 100; ++j) {
419 status = RREG32(mmUVD_STATUS);
420 if (status & 2)
421 break;
422 mdelay(1);
423 }
424 if (status & 2)
425 break;
426 }
427
428 for (i = 0; i < 10; ++i) {
429 for (j = 0; j < 100; ++j) {
430 status = RREG32(mmUVD_LMI_STATUS);
431 if (status & 0xf)
432 break;
433 mdelay(1);
434 }
435 if (status & 0xf)
436 break;
437 }
438
439 /* Stall UMC and register bus before resetting VCPU */
440 WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
441
442 for (i = 0; i < 10; ++i) {
443 for (j = 0; j < 100; ++j) {
444 status = RREG32(mmUVD_LMI_STATUS);
445 if (status & 0x240)
446 break;
447 mdelay(1);
448 }
449 if (status & 0x240)
450 break;
451 }
452
453 WREG32_P(0x3D49, 0, ~(1 << 2));
454
455 WREG32_P(mmUVD_VCPU_CNTL, 0, ~(1 << 9));
456
457 /* put LMI, VCPU, RBC etc... into reset */
458 WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
459 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
460 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
461
462 WREG32(mmUVD_STATUS, 0);
463
464 uvd_v4_2_set_dcm(adev, false);
465}
466
467/**
468 * uvd_v4_2_ring_emit_fence - emit an fence & trap command
469 *
470 * @ring: amdgpu_ring pointer
471 * @addr: address
472 * @seq: sequence number
473 * @flags: fence related flags
474 *
475 * Write a fence and a trap command to the ring.
476 */
477static void uvd_v4_2_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
478 unsigned flags)
479{
480 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
481
482 amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
483 amdgpu_ring_write(ring, seq);
484 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
485 amdgpu_ring_write(ring, addr & 0xffffffff);
486 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
487 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
488 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
489 amdgpu_ring_write(ring, 0);
490
491 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
492 amdgpu_ring_write(ring, 0);
493 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
494 amdgpu_ring_write(ring, 0);
495 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
496 amdgpu_ring_write(ring, 2);
497}
498
499/**
500 * uvd_v4_2_ring_test_ring - register write test
501 *
502 * @ring: amdgpu_ring pointer
503 *
504 * Test if we can successfully write to the context register
505 */
506static int uvd_v4_2_ring_test_ring(struct amdgpu_ring *ring)
507{
508 struct amdgpu_device *adev = ring->adev;
509 uint32_t tmp = 0;
510 unsigned i;
511 int r;
512
513 WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD);
514 r = amdgpu_ring_alloc(ring, 3);
515 if (r)
516 return r;
517
518 amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
519 amdgpu_ring_write(ring, 0xDEADBEEF);
520 amdgpu_ring_commit(ring);
521 for (i = 0; i < adev->usec_timeout; i++) {
522 tmp = RREG32(mmUVD_CONTEXT_ID);
523 if (tmp == 0xDEADBEEF)
524 break;
525 udelay(1);
526 }
527
528 if (i >= adev->usec_timeout)
529 r = -ETIMEDOUT;
530
531 return r;
532}
533
534/**
535 * uvd_v4_2_ring_emit_ib - execute indirect buffer
536 *
537 * @ring: amdgpu_ring pointer
538 * @job: iob associated with the indirect buffer
539 * @ib: indirect buffer to execute
540 * @flags: flags associated with the indirect buffer
541 *
542 * Write ring commands to execute the indirect buffer
543 */
544static void uvd_v4_2_ring_emit_ib(struct amdgpu_ring *ring,
545 struct amdgpu_job *job,
546 struct amdgpu_ib *ib,
547 uint32_t flags)
548{
549 amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_BASE, 0));
550 amdgpu_ring_write(ring, ib->gpu_addr);
551 amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0));
552 amdgpu_ring_write(ring, ib->length_dw);
553}
554
555static void uvd_v4_2_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
556{
557 int i;
558
559 WARN_ON(ring->wptr % 2 || count % 2);
560
561 for (i = 0; i < count / 2; i++) {
562 amdgpu_ring_write(ring, PACKET0(mmUVD_NO_OP, 0));
563 amdgpu_ring_write(ring, 0);
564 }
565}
566
567/**
568 * uvd_v4_2_mc_resume - memory controller programming
569 *
570 * @adev: amdgpu_device pointer
571 *
572 * Let the UVD memory controller know it's offsets
573 */
574static void uvd_v4_2_mc_resume(struct amdgpu_device *adev)
575{
576 uint64_t addr;
577 uint32_t size;
578
579 /* program the VCPU memory controller bits 0-27 */
580 addr = (adev->uvd.inst->gpu_addr + AMDGPU_UVD_FIRMWARE_OFFSET) >> 3;
581 size = AMDGPU_UVD_FIRMWARE_SIZE(adev) >> 3;
582 WREG32(mmUVD_VCPU_CACHE_OFFSET0, addr);
583 WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
584
585 addr += size;
586 size = AMDGPU_UVD_HEAP_SIZE >> 3;
587 WREG32(mmUVD_VCPU_CACHE_OFFSET1, addr);
588 WREG32(mmUVD_VCPU_CACHE_SIZE1, size);
589
590 addr += size;
591 size = (AMDGPU_UVD_STACK_SIZE +
592 (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles)) >> 3;
593 WREG32(mmUVD_VCPU_CACHE_OFFSET2, addr);
594 WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
595
596 /* bits 28-31 */
597 addr = (adev->uvd.inst->gpu_addr >> 28) & 0xF;
598 WREG32(mmUVD_LMI_ADDR_EXT, (addr << 12) | (addr << 0));
599
600 /* bits 32-39 */
601 addr = (adev->uvd.inst->gpu_addr >> 32) & 0xFF;
602 WREG32(mmUVD_LMI_EXT40_ADDR, addr | (0x9 << 16) | (0x1 << 31));
603
604 WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
605 WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
606 WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
607}
608
609static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev,
610 bool enable)
611{
612 u32 orig, data;
613
614 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) {
615 data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
616 data |= 0xfff;
617 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
618
619 orig = data = RREG32(mmUVD_CGC_CTRL);
620 data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
621 if (orig != data)
622 WREG32(mmUVD_CGC_CTRL, data);
623 } else {
624 data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
625 data &= ~0xfff;
626 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
627
628 orig = data = RREG32(mmUVD_CGC_CTRL);
629 data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
630 if (orig != data)
631 WREG32(mmUVD_CGC_CTRL, data);
632 }
633}
634
635static void uvd_v4_2_set_dcm(struct amdgpu_device *adev,
636 bool sw_mode)
637{
638 u32 tmp, tmp2;
639
640 WREG32_FIELD(UVD_CGC_GATE, REGS, 0);
641
642 tmp = RREG32(mmUVD_CGC_CTRL);
643 tmp &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK | UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
644 tmp |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
645 (1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT) |
646 (4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT);
647
648 if (sw_mode) {
649 tmp &= ~0x7ffff800;
650 tmp2 = UVD_CGC_CTRL2__DYN_OCLK_RAMP_EN_MASK |
651 UVD_CGC_CTRL2__DYN_RCLK_RAMP_EN_MASK |
652 (7 << UVD_CGC_CTRL2__GATER_DIV_ID__SHIFT);
653 } else {
654 tmp |= 0x7ffff800;
655 tmp2 = 0;
656 }
657
658 WREG32(mmUVD_CGC_CTRL, tmp);
659 WREG32_UVD_CTX(ixUVD_CGC_CTRL2, tmp2);
660}
661
662static bool uvd_v4_2_is_idle(void *handle)
663{
664 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
665
666 return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
667}
668
669static int uvd_v4_2_wait_for_idle(void *handle)
670{
671 unsigned i;
672 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
673
674 for (i = 0; i < adev->usec_timeout; i++) {
675 if (!(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK))
676 return 0;
677 }
678 return -ETIMEDOUT;
679}
680
681static int uvd_v4_2_soft_reset(void *handle)
682{
683 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
684
685 uvd_v4_2_stop(adev);
686
687 WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK,
688 ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK);
689 mdelay(5);
690
691 return uvd_v4_2_start(adev);
692}
693
694static int uvd_v4_2_set_interrupt_state(struct amdgpu_device *adev,
695 struct amdgpu_irq_src *source,
696 unsigned type,
697 enum amdgpu_interrupt_state state)
698{
699 // TODO
700 return 0;
701}
702
703static int uvd_v4_2_process_interrupt(struct amdgpu_device *adev,
704 struct amdgpu_irq_src *source,
705 struct amdgpu_iv_entry *entry)
706{
707 DRM_DEBUG("IH: UVD TRAP\n");
708 amdgpu_fence_process(&adev->uvd.inst->ring);
709 return 0;
710}
711
712static int uvd_v4_2_set_clockgating_state(void *handle,
713 enum amd_clockgating_state state)
714{
715 return 0;
716}
717
718static int uvd_v4_2_set_powergating_state(void *handle,
719 enum amd_powergating_state state)
720{
721 /* This doesn't actually powergate the UVD block.
722 * That's done in the dpm code via the SMC. This
723 * just re-inits the block as necessary. The actual
724 * gating still happens in the dpm code. We should
725 * revisit this when there is a cleaner line between
726 * the smc and the hw blocks
727 */
728 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
729
730 if (state == AMD_PG_STATE_GATE) {
731 uvd_v4_2_stop(adev);
732 if (adev->pg_flags & AMD_PG_SUPPORT_UVD && !adev->pm.dpm_enabled) {
733 if (!(RREG32_SMC(ixCURRENT_PG_STATUS) &
734 CURRENT_PG_STATUS__UVD_PG_STATUS_MASK)) {
735 WREG32(mmUVD_PGFSM_CONFIG, (UVD_PGFSM_CONFIG__UVD_PGFSM_FSM_ADDR_MASK |
736 UVD_PGFSM_CONFIG__UVD_PGFSM_POWER_DOWN_MASK |
737 UVD_PGFSM_CONFIG__UVD_PGFSM_P1_SELECT_MASK));
738 mdelay(20);
739 }
740 }
741 return 0;
742 } else {
743 if (adev->pg_flags & AMD_PG_SUPPORT_UVD && !adev->pm.dpm_enabled) {
744 if (RREG32_SMC(ixCURRENT_PG_STATUS) &
745 CURRENT_PG_STATUS__UVD_PG_STATUS_MASK) {
746 WREG32(mmUVD_PGFSM_CONFIG, (UVD_PGFSM_CONFIG__UVD_PGFSM_FSM_ADDR_MASK |
747 UVD_PGFSM_CONFIG__UVD_PGFSM_POWER_UP_MASK |
748 UVD_PGFSM_CONFIG__UVD_PGFSM_P1_SELECT_MASK));
749 mdelay(30);
750 }
751 }
752 return uvd_v4_2_start(adev);
753 }
754}
755
756static const struct amd_ip_funcs uvd_v4_2_ip_funcs = {
757 .name = "uvd_v4_2",
758 .early_init = uvd_v4_2_early_init,
759 .late_init = NULL,
760 .sw_init = uvd_v4_2_sw_init,
761 .sw_fini = uvd_v4_2_sw_fini,
762 .hw_init = uvd_v4_2_hw_init,
763 .hw_fini = uvd_v4_2_hw_fini,
764 .prepare_suspend = uvd_v4_2_prepare_suspend,
765 .suspend = uvd_v4_2_suspend,
766 .resume = uvd_v4_2_resume,
767 .is_idle = uvd_v4_2_is_idle,
768 .wait_for_idle = uvd_v4_2_wait_for_idle,
769 .soft_reset = uvd_v4_2_soft_reset,
770 .set_clockgating_state = uvd_v4_2_set_clockgating_state,
771 .set_powergating_state = uvd_v4_2_set_powergating_state,
772};
773
774static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = {
775 .type = AMDGPU_RING_TYPE_UVD,
776 .align_mask = 0xf,
777 .support_64bit_ptrs = false,
778 .no_user_fence = true,
779 .get_rptr = uvd_v4_2_ring_get_rptr,
780 .get_wptr = uvd_v4_2_ring_get_wptr,
781 .set_wptr = uvd_v4_2_ring_set_wptr,
782 .parse_cs = amdgpu_uvd_ring_parse_cs,
783 .emit_frame_size =
784 14, /* uvd_v4_2_ring_emit_fence x1 no user fence */
785 .emit_ib_size = 4, /* uvd_v4_2_ring_emit_ib */
786 .emit_ib = uvd_v4_2_ring_emit_ib,
787 .emit_fence = uvd_v4_2_ring_emit_fence,
788 .test_ring = uvd_v4_2_ring_test_ring,
789 .test_ib = amdgpu_uvd_ring_test_ib,
790 .insert_nop = uvd_v4_2_ring_insert_nop,
791 .pad_ib = amdgpu_ring_generic_pad_ib,
792 .begin_use = amdgpu_uvd_ring_begin_use,
793 .end_use = amdgpu_uvd_ring_end_use,
794};
795
796static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev)
797{
798 adev->uvd.inst->ring.funcs = &uvd_v4_2_ring_funcs;
799}
800
801static const struct amdgpu_irq_src_funcs uvd_v4_2_irq_funcs = {
802 .set = uvd_v4_2_set_interrupt_state,
803 .process = uvd_v4_2_process_interrupt,
804};
805
806static void uvd_v4_2_set_irq_funcs(struct amdgpu_device *adev)
807{
808 adev->uvd.inst->irq.num_types = 1;
809 adev->uvd.inst->irq.funcs = &uvd_v4_2_irq_funcs;
810}
811
812const struct amdgpu_ip_block_version uvd_v4_2_ip_block =
813{
814 .type = AMD_IP_BLOCK_TYPE_UVD,
815 .major = 4,
816 .minor = 2,
817 .rev = 0,
818 .funcs = &uvd_v4_2_ip_funcs,
819};
1/*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Christian König <christian.koenig@amd.com>
23 */
24
25#include <linux/firmware.h>
26
27#include "amdgpu.h"
28#include "amdgpu_uvd.h"
29#include "cikd.h"
30
31#include "uvd/uvd_4_2_d.h"
32#include "uvd/uvd_4_2_sh_mask.h"
33
34#include "oss/oss_2_0_d.h"
35#include "oss/oss_2_0_sh_mask.h"
36
37#include "bif/bif_4_1_d.h"
38
39#include "smu/smu_7_0_1_d.h"
40#include "smu/smu_7_0_1_sh_mask.h"
41
42static void uvd_v4_2_mc_resume(struct amdgpu_device *adev);
43static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev);
44static void uvd_v4_2_set_irq_funcs(struct amdgpu_device *adev);
45static int uvd_v4_2_start(struct amdgpu_device *adev);
46static void uvd_v4_2_stop(struct amdgpu_device *adev);
47static int uvd_v4_2_set_clockgating_state(void *handle,
48 enum amd_clockgating_state state);
49static void uvd_v4_2_set_dcm(struct amdgpu_device *adev,
50 bool sw_mode);
51/**
52 * uvd_v4_2_ring_get_rptr - get read pointer
53 *
54 * @ring: amdgpu_ring pointer
55 *
56 * Returns the current hardware read pointer
57 */
58static uint64_t uvd_v4_2_ring_get_rptr(struct amdgpu_ring *ring)
59{
60 struct amdgpu_device *adev = ring->adev;
61
62 return RREG32(mmUVD_RBC_RB_RPTR);
63}
64
65/**
66 * uvd_v4_2_ring_get_wptr - get write pointer
67 *
68 * @ring: amdgpu_ring pointer
69 *
70 * Returns the current hardware write pointer
71 */
72static uint64_t uvd_v4_2_ring_get_wptr(struct amdgpu_ring *ring)
73{
74 struct amdgpu_device *adev = ring->adev;
75
76 return RREG32(mmUVD_RBC_RB_WPTR);
77}
78
79/**
80 * uvd_v4_2_ring_set_wptr - set write pointer
81 *
82 * @ring: amdgpu_ring pointer
83 *
84 * Commits the write pointer to the hardware
85 */
86static void uvd_v4_2_ring_set_wptr(struct amdgpu_ring *ring)
87{
88 struct amdgpu_device *adev = ring->adev;
89
90 WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
91}
92
93static int uvd_v4_2_early_init(void *handle)
94{
95 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
96 adev->uvd.num_uvd_inst = 1;
97
98 uvd_v4_2_set_ring_funcs(adev);
99 uvd_v4_2_set_irq_funcs(adev);
100
101 return 0;
102}
103
104static int uvd_v4_2_sw_init(void *handle)
105{
106 struct amdgpu_ring *ring;
107 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
108 int r;
109
110 /* UVD TRAP */
111 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 124, &adev->uvd.inst->irq);
112 if (r)
113 return r;
114
115 r = amdgpu_uvd_sw_init(adev);
116 if (r)
117 return r;
118
119 ring = &adev->uvd.inst->ring;
120 sprintf(ring->name, "uvd");
121 r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
122 if (r)
123 return r;
124
125 r = amdgpu_uvd_resume(adev);
126 if (r)
127 return r;
128
129 r = amdgpu_uvd_entity_init(adev);
130
131 return r;
132}
133
134static int uvd_v4_2_sw_fini(void *handle)
135{
136 int r;
137 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
138
139 r = amdgpu_uvd_suspend(adev);
140 if (r)
141 return r;
142
143 return amdgpu_uvd_sw_fini(adev);
144}
145
146static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev,
147 bool enable);
148/**
149 * uvd_v4_2_hw_init - start and test UVD block
150 *
151 * @adev: amdgpu_device pointer
152 *
153 * Initialize the hardware, boot up the VCPU and do some testing
154 */
155static int uvd_v4_2_hw_init(void *handle)
156{
157 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
158 struct amdgpu_ring *ring = &adev->uvd.inst->ring;
159 uint32_t tmp;
160 int r;
161
162 uvd_v4_2_enable_mgcg(adev, true);
163 amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
164
165 r = amdgpu_ring_test_helper(ring);
166 if (r)
167 goto done;
168
169 r = amdgpu_ring_alloc(ring, 10);
170 if (r) {
171 DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r);
172 goto done;
173 }
174
175 tmp = PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0);
176 amdgpu_ring_write(ring, tmp);
177 amdgpu_ring_write(ring, 0xFFFFF);
178
179 tmp = PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0);
180 amdgpu_ring_write(ring, tmp);
181 amdgpu_ring_write(ring, 0xFFFFF);
182
183 tmp = PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0);
184 amdgpu_ring_write(ring, tmp);
185 amdgpu_ring_write(ring, 0xFFFFF);
186
187 /* Clear timeout status bits */
188 amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0));
189 amdgpu_ring_write(ring, 0x8);
190
191 amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0));
192 amdgpu_ring_write(ring, 3);
193
194 amdgpu_ring_commit(ring);
195
196done:
197 if (!r)
198 DRM_INFO("UVD initialized successfully.\n");
199
200 return r;
201}
202
203/**
204 * uvd_v4_2_hw_fini - stop the hardware block
205 *
206 * @adev: amdgpu_device pointer
207 *
208 * Stop the UVD block, mark ring as not ready any more
209 */
210static int uvd_v4_2_hw_fini(void *handle)
211{
212 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
213 struct amdgpu_ring *ring = &adev->uvd.inst->ring;
214
215 if (RREG32(mmUVD_STATUS) != 0)
216 uvd_v4_2_stop(adev);
217
218 ring->sched.ready = false;
219
220 return 0;
221}
222
223static int uvd_v4_2_suspend(void *handle)
224{
225 int r;
226 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
227
228 r = uvd_v4_2_hw_fini(adev);
229 if (r)
230 return r;
231
232 return amdgpu_uvd_suspend(adev);
233}
234
235static int uvd_v4_2_resume(void *handle)
236{
237 int r;
238 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
239
240 r = amdgpu_uvd_resume(adev);
241 if (r)
242 return r;
243
244 return uvd_v4_2_hw_init(adev);
245}
246
247/**
248 * uvd_v4_2_start - start UVD block
249 *
250 * @adev: amdgpu_device pointer
251 *
252 * Setup and start the UVD block
253 */
254static int uvd_v4_2_start(struct amdgpu_device *adev)
255{
256 struct amdgpu_ring *ring = &adev->uvd.inst->ring;
257 uint32_t rb_bufsz;
258 int i, j, r;
259 u32 tmp;
260 /* disable byte swapping */
261 u32 lmi_swap_cntl = 0;
262 u32 mp_swap_cntl = 0;
263
264 /* set uvd busy */
265 WREG32_P(mmUVD_STATUS, 1<<2, ~(1<<2));
266
267 uvd_v4_2_set_dcm(adev, true);
268 WREG32(mmUVD_CGC_GATE, 0);
269
270 /* take UVD block out of reset */
271 WREG32_P(mmSRBM_SOFT_RESET, 0, ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK);
272 mdelay(5);
273
274 /* enable VCPU clock */
275 WREG32(mmUVD_VCPU_CNTL, 1 << 9);
276
277 /* disable interupt */
278 WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1));
279
280#ifdef __BIG_ENDIAN
281 /* swap (8 in 32) RB and IB */
282 lmi_swap_cntl = 0xa;
283 mp_swap_cntl = 0;
284#endif
285 WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
286 WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
287 /* initialize UVD memory controller */
288 WREG32(mmUVD_LMI_CTRL, 0x203108);
289
290 tmp = RREG32(mmUVD_MPC_CNTL);
291 WREG32(mmUVD_MPC_CNTL, tmp | 0x10);
292
293 WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040);
294 WREG32(mmUVD_MPC_SET_MUXA1, 0x0);
295 WREG32(mmUVD_MPC_SET_MUXB0, 0x40c2040);
296 WREG32(mmUVD_MPC_SET_MUXB1, 0x0);
297 WREG32(mmUVD_MPC_SET_ALU, 0);
298 WREG32(mmUVD_MPC_SET_MUX, 0x88);
299
300 uvd_v4_2_mc_resume(adev);
301
302 tmp = RREG32_UVD_CTX(ixUVD_LMI_CACHE_CTRL);
303 WREG32_UVD_CTX(ixUVD_LMI_CACHE_CTRL, tmp & (~0x10));
304
305 /* enable UMC */
306 WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
307
308 WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK);
309
310 WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
311
312 WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
313
314 mdelay(10);
315
316 for (i = 0; i < 10; ++i) {
317 uint32_t status;
318 for (j = 0; j < 100; ++j) {
319 status = RREG32(mmUVD_STATUS);
320 if (status & 2)
321 break;
322 mdelay(10);
323 }
324 r = 0;
325 if (status & 2)
326 break;
327
328 DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
329 WREG32_P(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
330 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
331 mdelay(10);
332 WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
333 mdelay(10);
334 r = -1;
335 }
336
337 if (r) {
338 DRM_ERROR("UVD not responding, giving up!!!\n");
339 return r;
340 }
341
342 /* enable interupt */
343 WREG32_P(mmUVD_MASTINT_EN, 3<<1, ~(3 << 1));
344
345 WREG32_P(mmUVD_STATUS, 0, ~(1<<2));
346
347 /* force RBC into idle state */
348 WREG32(mmUVD_RBC_RB_CNTL, 0x11010101);
349
350 /* Set the write pointer delay */
351 WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0);
352
353 /* programm the 4GB memory segment for rptr and ring buffer */
354 WREG32(mmUVD_LMI_EXT40_ADDR, upper_32_bits(ring->gpu_addr) |
355 (0x7 << 16) | (0x1 << 31));
356
357 /* Initialize the ring buffer's read and write pointers */
358 WREG32(mmUVD_RBC_RB_RPTR, 0x0);
359
360 ring->wptr = RREG32(mmUVD_RBC_RB_RPTR);
361 WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
362
363 /* set the ring address */
364 WREG32(mmUVD_RBC_RB_BASE, ring->gpu_addr);
365
366 /* Set ring buffer size */
367 rb_bufsz = order_base_2(ring->ring_size);
368 rb_bufsz = (0x1 << 8) | rb_bufsz;
369 WREG32_P(mmUVD_RBC_RB_CNTL, rb_bufsz, ~0x11f1f);
370
371 return 0;
372}
373
374/**
375 * uvd_v4_2_stop - stop UVD block
376 *
377 * @adev: amdgpu_device pointer
378 *
379 * stop the UVD block
380 */
381static void uvd_v4_2_stop(struct amdgpu_device *adev)
382{
383 uint32_t i, j;
384 uint32_t status;
385
386 WREG32(mmUVD_RBC_RB_CNTL, 0x11010101);
387
388 for (i = 0; i < 10; ++i) {
389 for (j = 0; j < 100; ++j) {
390 status = RREG32(mmUVD_STATUS);
391 if (status & 2)
392 break;
393 mdelay(1);
394 }
395 if (status & 2)
396 break;
397 }
398
399 for (i = 0; i < 10; ++i) {
400 for (j = 0; j < 100; ++j) {
401 status = RREG32(mmUVD_LMI_STATUS);
402 if (status & 0xf)
403 break;
404 mdelay(1);
405 }
406 if (status & 0xf)
407 break;
408 }
409
410 /* Stall UMC and register bus before resetting VCPU */
411 WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
412
413 for (i = 0; i < 10; ++i) {
414 for (j = 0; j < 100; ++j) {
415 status = RREG32(mmUVD_LMI_STATUS);
416 if (status & 0x240)
417 break;
418 mdelay(1);
419 }
420 if (status & 0x240)
421 break;
422 }
423
424 WREG32_P(0x3D49, 0, ~(1 << 2));
425
426 WREG32_P(mmUVD_VCPU_CNTL, 0, ~(1 << 9));
427
428 /* put LMI, VCPU, RBC etc... into reset */
429 WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
430 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
431 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
432
433 WREG32(mmUVD_STATUS, 0);
434
435 uvd_v4_2_set_dcm(adev, false);
436}
437
438/**
439 * uvd_v4_2_ring_emit_fence - emit an fence & trap command
440 *
441 * @ring: amdgpu_ring pointer
442 * @fence: fence to emit
443 *
444 * Write a fence and a trap command to the ring.
445 */
446static void uvd_v4_2_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
447 unsigned flags)
448{
449 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
450
451 amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
452 amdgpu_ring_write(ring, seq);
453 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
454 amdgpu_ring_write(ring, addr & 0xffffffff);
455 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
456 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
457 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
458 amdgpu_ring_write(ring, 0);
459
460 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
461 amdgpu_ring_write(ring, 0);
462 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
463 amdgpu_ring_write(ring, 0);
464 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
465 amdgpu_ring_write(ring, 2);
466}
467
468/**
469 * uvd_v4_2_ring_test_ring - register write test
470 *
471 * @ring: amdgpu_ring pointer
472 *
473 * Test if we can successfully write to the context register
474 */
475static int uvd_v4_2_ring_test_ring(struct amdgpu_ring *ring)
476{
477 struct amdgpu_device *adev = ring->adev;
478 uint32_t tmp = 0;
479 unsigned i;
480 int r;
481
482 WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD);
483 r = amdgpu_ring_alloc(ring, 3);
484 if (r)
485 return r;
486
487 amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
488 amdgpu_ring_write(ring, 0xDEADBEEF);
489 amdgpu_ring_commit(ring);
490 for (i = 0; i < adev->usec_timeout; i++) {
491 tmp = RREG32(mmUVD_CONTEXT_ID);
492 if (tmp == 0xDEADBEEF)
493 break;
494 udelay(1);
495 }
496
497 if (i >= adev->usec_timeout)
498 r = -ETIMEDOUT;
499
500 return r;
501}
502
503/**
504 * uvd_v4_2_ring_emit_ib - execute indirect buffer
505 *
506 * @ring: amdgpu_ring pointer
507 * @ib: indirect buffer to execute
508 *
509 * Write ring commands to execute the indirect buffer
510 */
511static void uvd_v4_2_ring_emit_ib(struct amdgpu_ring *ring,
512 struct amdgpu_job *job,
513 struct amdgpu_ib *ib,
514 uint32_t flags)
515{
516 amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_BASE, 0));
517 amdgpu_ring_write(ring, ib->gpu_addr);
518 amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0));
519 amdgpu_ring_write(ring, ib->length_dw);
520}
521
522static void uvd_v4_2_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
523{
524 int i;
525
526 WARN_ON(ring->wptr % 2 || count % 2);
527
528 for (i = 0; i < count / 2; i++) {
529 amdgpu_ring_write(ring, PACKET0(mmUVD_NO_OP, 0));
530 amdgpu_ring_write(ring, 0);
531 }
532}
533
534/**
535 * uvd_v4_2_mc_resume - memory controller programming
536 *
537 * @adev: amdgpu_device pointer
538 *
539 * Let the UVD memory controller know it's offsets
540 */
541static void uvd_v4_2_mc_resume(struct amdgpu_device *adev)
542{
543 uint64_t addr;
544 uint32_t size;
545
546 /* programm the VCPU memory controller bits 0-27 */
547 addr = (adev->uvd.inst->gpu_addr + AMDGPU_UVD_FIRMWARE_OFFSET) >> 3;
548 size = AMDGPU_UVD_FIRMWARE_SIZE(adev) >> 3;
549 WREG32(mmUVD_VCPU_CACHE_OFFSET0, addr);
550 WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
551
552 addr += size;
553 size = AMDGPU_UVD_HEAP_SIZE >> 3;
554 WREG32(mmUVD_VCPU_CACHE_OFFSET1, addr);
555 WREG32(mmUVD_VCPU_CACHE_SIZE1, size);
556
557 addr += size;
558 size = (AMDGPU_UVD_STACK_SIZE +
559 (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles)) >> 3;
560 WREG32(mmUVD_VCPU_CACHE_OFFSET2, addr);
561 WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
562
563 /* bits 28-31 */
564 addr = (adev->uvd.inst->gpu_addr >> 28) & 0xF;
565 WREG32(mmUVD_LMI_ADDR_EXT, (addr << 12) | (addr << 0));
566
567 /* bits 32-39 */
568 addr = (adev->uvd.inst->gpu_addr >> 32) & 0xFF;
569 WREG32(mmUVD_LMI_EXT40_ADDR, addr | (0x9 << 16) | (0x1 << 31));
570
571 WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
572 WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
573 WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
574}
575
576static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev,
577 bool enable)
578{
579 u32 orig, data;
580
581 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) {
582 data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
583 data |= 0xfff;
584 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
585
586 orig = data = RREG32(mmUVD_CGC_CTRL);
587 data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
588 if (orig != data)
589 WREG32(mmUVD_CGC_CTRL, data);
590 } else {
591 data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
592 data &= ~0xfff;
593 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
594
595 orig = data = RREG32(mmUVD_CGC_CTRL);
596 data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
597 if (orig != data)
598 WREG32(mmUVD_CGC_CTRL, data);
599 }
600}
601
602static void uvd_v4_2_set_dcm(struct amdgpu_device *adev,
603 bool sw_mode)
604{
605 u32 tmp, tmp2;
606
607 WREG32_FIELD(UVD_CGC_GATE, REGS, 0);
608
609 tmp = RREG32(mmUVD_CGC_CTRL);
610 tmp &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK | UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
611 tmp |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
612 (1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT) |
613 (4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT);
614
615 if (sw_mode) {
616 tmp &= ~0x7ffff800;
617 tmp2 = UVD_CGC_CTRL2__DYN_OCLK_RAMP_EN_MASK |
618 UVD_CGC_CTRL2__DYN_RCLK_RAMP_EN_MASK |
619 (7 << UVD_CGC_CTRL2__GATER_DIV_ID__SHIFT);
620 } else {
621 tmp |= 0x7ffff800;
622 tmp2 = 0;
623 }
624
625 WREG32(mmUVD_CGC_CTRL, tmp);
626 WREG32_UVD_CTX(ixUVD_CGC_CTRL2, tmp2);
627}
628
629static bool uvd_v4_2_is_idle(void *handle)
630{
631 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
632
633 return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
634}
635
636static int uvd_v4_2_wait_for_idle(void *handle)
637{
638 unsigned i;
639 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
640
641 for (i = 0; i < adev->usec_timeout; i++) {
642 if (!(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK))
643 return 0;
644 }
645 return -ETIMEDOUT;
646}
647
648static int uvd_v4_2_soft_reset(void *handle)
649{
650 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
651
652 uvd_v4_2_stop(adev);
653
654 WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK,
655 ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK);
656 mdelay(5);
657
658 return uvd_v4_2_start(adev);
659}
660
661static int uvd_v4_2_set_interrupt_state(struct amdgpu_device *adev,
662 struct amdgpu_irq_src *source,
663 unsigned type,
664 enum amdgpu_interrupt_state state)
665{
666 // TODO
667 return 0;
668}
669
670static int uvd_v4_2_process_interrupt(struct amdgpu_device *adev,
671 struct amdgpu_irq_src *source,
672 struct amdgpu_iv_entry *entry)
673{
674 DRM_DEBUG("IH: UVD TRAP\n");
675 amdgpu_fence_process(&adev->uvd.inst->ring);
676 return 0;
677}
678
679static int uvd_v4_2_set_clockgating_state(void *handle,
680 enum amd_clockgating_state state)
681{
682 return 0;
683}
684
685static int uvd_v4_2_set_powergating_state(void *handle,
686 enum amd_powergating_state state)
687{
688 /* This doesn't actually powergate the UVD block.
689 * That's done in the dpm code via the SMC. This
690 * just re-inits the block as necessary. The actual
691 * gating still happens in the dpm code. We should
692 * revisit this when there is a cleaner line between
693 * the smc and the hw blocks
694 */
695 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
696
697 if (state == AMD_PG_STATE_GATE) {
698 uvd_v4_2_stop(adev);
699 if (adev->pg_flags & AMD_PG_SUPPORT_UVD && !adev->pm.dpm_enabled) {
700 if (!(RREG32_SMC(ixCURRENT_PG_STATUS) &
701 CURRENT_PG_STATUS__UVD_PG_STATUS_MASK)) {
702 WREG32(mmUVD_PGFSM_CONFIG, (UVD_PGFSM_CONFIG__UVD_PGFSM_FSM_ADDR_MASK |
703 UVD_PGFSM_CONFIG__UVD_PGFSM_POWER_DOWN_MASK |
704 UVD_PGFSM_CONFIG__UVD_PGFSM_P1_SELECT_MASK));
705 mdelay(20);
706 }
707 }
708 return 0;
709 } else {
710 if (adev->pg_flags & AMD_PG_SUPPORT_UVD && !adev->pm.dpm_enabled) {
711 if (RREG32_SMC(ixCURRENT_PG_STATUS) &
712 CURRENT_PG_STATUS__UVD_PG_STATUS_MASK) {
713 WREG32(mmUVD_PGFSM_CONFIG, (UVD_PGFSM_CONFIG__UVD_PGFSM_FSM_ADDR_MASK |
714 UVD_PGFSM_CONFIG__UVD_PGFSM_POWER_UP_MASK |
715 UVD_PGFSM_CONFIG__UVD_PGFSM_P1_SELECT_MASK));
716 mdelay(30);
717 }
718 }
719 return uvd_v4_2_start(adev);
720 }
721}
722
723static const struct amd_ip_funcs uvd_v4_2_ip_funcs = {
724 .name = "uvd_v4_2",
725 .early_init = uvd_v4_2_early_init,
726 .late_init = NULL,
727 .sw_init = uvd_v4_2_sw_init,
728 .sw_fini = uvd_v4_2_sw_fini,
729 .hw_init = uvd_v4_2_hw_init,
730 .hw_fini = uvd_v4_2_hw_fini,
731 .suspend = uvd_v4_2_suspend,
732 .resume = uvd_v4_2_resume,
733 .is_idle = uvd_v4_2_is_idle,
734 .wait_for_idle = uvd_v4_2_wait_for_idle,
735 .soft_reset = uvd_v4_2_soft_reset,
736 .set_clockgating_state = uvd_v4_2_set_clockgating_state,
737 .set_powergating_state = uvd_v4_2_set_powergating_state,
738};
739
740static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = {
741 .type = AMDGPU_RING_TYPE_UVD,
742 .align_mask = 0xf,
743 .support_64bit_ptrs = false,
744 .no_user_fence = true,
745 .get_rptr = uvd_v4_2_ring_get_rptr,
746 .get_wptr = uvd_v4_2_ring_get_wptr,
747 .set_wptr = uvd_v4_2_ring_set_wptr,
748 .parse_cs = amdgpu_uvd_ring_parse_cs,
749 .emit_frame_size =
750 14, /* uvd_v4_2_ring_emit_fence x1 no user fence */
751 .emit_ib_size = 4, /* uvd_v4_2_ring_emit_ib */
752 .emit_ib = uvd_v4_2_ring_emit_ib,
753 .emit_fence = uvd_v4_2_ring_emit_fence,
754 .test_ring = uvd_v4_2_ring_test_ring,
755 .test_ib = amdgpu_uvd_ring_test_ib,
756 .insert_nop = uvd_v4_2_ring_insert_nop,
757 .pad_ib = amdgpu_ring_generic_pad_ib,
758 .begin_use = amdgpu_uvd_ring_begin_use,
759 .end_use = amdgpu_uvd_ring_end_use,
760};
761
762static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev)
763{
764 adev->uvd.inst->ring.funcs = &uvd_v4_2_ring_funcs;
765}
766
767static const struct amdgpu_irq_src_funcs uvd_v4_2_irq_funcs = {
768 .set = uvd_v4_2_set_interrupt_state,
769 .process = uvd_v4_2_process_interrupt,
770};
771
772static void uvd_v4_2_set_irq_funcs(struct amdgpu_device *adev)
773{
774 adev->uvd.inst->irq.num_types = 1;
775 adev->uvd.inst->irq.funcs = &uvd_v4_2_irq_funcs;
776}
777
778const struct amdgpu_ip_block_version uvd_v4_2_ip_block =
779{
780 .type = AMD_IP_BLOCK_TYPE_UVD,
781 .major = 4,
782 .minor = 2,
783 .rev = 0,
784 .funcs = &uvd_v4_2_ip_funcs,
785};