Loading...
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Christian König <christian.koenig@amd.com>
23 */
24
25#include <linux/firmware.h>
26
27#include "amdgpu.h"
28#include "amdgpu_uvd.h"
29#include "vid.h"
30#include "uvd/uvd_6_0_d.h"
31#include "uvd/uvd_6_0_sh_mask.h"
32#include "oss/oss_2_0_d.h"
33#include "oss/oss_2_0_sh_mask.h"
34#include "smu/smu_7_1_3_d.h"
35#include "smu/smu_7_1_3_sh_mask.h"
36#include "bif/bif_5_1_d.h"
37#include "gmc/gmc_8_1_d.h"
38#include "vi.h"
39#include "ivsrcid/ivsrcid_vislands30.h"
40
41/* Polaris10/11/12 firmware version */
42#define FW_1_130_16 ((1 << 24) | (130 << 16) | (16 << 8))
43
44static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev);
45static void uvd_v6_0_set_enc_ring_funcs(struct amdgpu_device *adev);
46
47static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev);
48static int uvd_v6_0_start(struct amdgpu_device *adev);
49static void uvd_v6_0_stop(struct amdgpu_device *adev);
50static void uvd_v6_0_set_sw_clock_gating(struct amdgpu_device *adev);
51static int uvd_v6_0_set_clockgating_state(void *handle,
52 enum amd_clockgating_state state);
53static void uvd_v6_0_enable_mgcg(struct amdgpu_device *adev,
54 bool enable);
55
56/**
57* uvd_v6_0_enc_support - get encode support status
58*
59* @adev: amdgpu_device pointer
60*
61* Returns the current hardware encode support status
62*/
63static inline bool uvd_v6_0_enc_support(struct amdgpu_device *adev)
64{
65 return ((adev->asic_type >= CHIP_POLARIS10) &&
66 (adev->asic_type <= CHIP_VEGAM) &&
67 (!adev->uvd.fw_version || adev->uvd.fw_version >= FW_1_130_16));
68}
69
70/**
71 * uvd_v6_0_ring_get_rptr - get read pointer
72 *
73 * @ring: amdgpu_ring pointer
74 *
75 * Returns the current hardware read pointer
76 */
77static uint64_t uvd_v6_0_ring_get_rptr(struct amdgpu_ring *ring)
78{
79 struct amdgpu_device *adev = ring->adev;
80
81 return RREG32(mmUVD_RBC_RB_RPTR);
82}
83
84/**
85 * uvd_v6_0_enc_ring_get_rptr - get enc read pointer
86 *
87 * @ring: amdgpu_ring pointer
88 *
89 * Returns the current hardware enc read pointer
90 */
91static uint64_t uvd_v6_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
92{
93 struct amdgpu_device *adev = ring->adev;
94
95 if (ring == &adev->uvd.inst->ring_enc[0])
96 return RREG32(mmUVD_RB_RPTR);
97 else
98 return RREG32(mmUVD_RB_RPTR2);
99}
100/**
101 * uvd_v6_0_ring_get_wptr - get write pointer
102 *
103 * @ring: amdgpu_ring pointer
104 *
105 * Returns the current hardware write pointer
106 */
107static uint64_t uvd_v6_0_ring_get_wptr(struct amdgpu_ring *ring)
108{
109 struct amdgpu_device *adev = ring->adev;
110
111 return RREG32(mmUVD_RBC_RB_WPTR);
112}
113
114/**
115 * uvd_v6_0_enc_ring_get_wptr - get enc write pointer
116 *
117 * @ring: amdgpu_ring pointer
118 *
119 * Returns the current hardware enc write pointer
120 */
121static uint64_t uvd_v6_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
122{
123 struct amdgpu_device *adev = ring->adev;
124
125 if (ring == &adev->uvd.inst->ring_enc[0])
126 return RREG32(mmUVD_RB_WPTR);
127 else
128 return RREG32(mmUVD_RB_WPTR2);
129}
130
131/**
132 * uvd_v6_0_ring_set_wptr - set write pointer
133 *
134 * @ring: amdgpu_ring pointer
135 *
136 * Commits the write pointer to the hardware
137 */
138static void uvd_v6_0_ring_set_wptr(struct amdgpu_ring *ring)
139{
140 struct amdgpu_device *adev = ring->adev;
141
142 WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
143}
144
145/**
146 * uvd_v6_0_enc_ring_set_wptr - set enc write pointer
147 *
148 * @ring: amdgpu_ring pointer
149 *
150 * Commits the enc write pointer to the hardware
151 */
152static void uvd_v6_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
153{
154 struct amdgpu_device *adev = ring->adev;
155
156 if (ring == &adev->uvd.inst->ring_enc[0])
157 WREG32(mmUVD_RB_WPTR,
158 lower_32_bits(ring->wptr));
159 else
160 WREG32(mmUVD_RB_WPTR2,
161 lower_32_bits(ring->wptr));
162}
163
164/**
165 * uvd_v6_0_enc_ring_test_ring - test if UVD ENC ring is working
166 *
167 * @ring: the engine to test on
168 *
169 */
170static int uvd_v6_0_enc_ring_test_ring(struct amdgpu_ring *ring)
171{
172 struct amdgpu_device *adev = ring->adev;
173 uint32_t rptr;
174 unsigned i;
175 int r;
176
177 r = amdgpu_ring_alloc(ring, 16);
178 if (r)
179 return r;
180
181 rptr = amdgpu_ring_get_rptr(ring);
182
183 amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
184 amdgpu_ring_commit(ring);
185
186 for (i = 0; i < adev->usec_timeout; i++) {
187 if (amdgpu_ring_get_rptr(ring) != rptr)
188 break;
189 udelay(1);
190 }
191
192 if (i >= adev->usec_timeout)
193 r = -ETIMEDOUT;
194
195 return r;
196}
197
198/**
199 * uvd_v6_0_enc_get_create_msg - generate a UVD ENC create msg
200 *
201 * @adev: amdgpu_device pointer
202 * @ring: ring we should submit the msg to
203 * @handle: session handle to use
204 * @fence: optional fence to return
205 *
206 * Open up a stream for HW test
207 */
208static int uvd_v6_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
209 struct amdgpu_bo *bo,
210 struct dma_fence **fence)
211{
212 const unsigned ib_size_dw = 16;
213 struct amdgpu_job *job;
214 struct amdgpu_ib *ib;
215 struct dma_fence *f = NULL;
216 uint64_t addr;
217 int i, r;
218
219 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
220 if (r)
221 return r;
222
223 ib = &job->ibs[0];
224 addr = amdgpu_bo_gpu_offset(bo);
225
226 ib->length_dw = 0;
227 ib->ptr[ib->length_dw++] = 0x00000018;
228 ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
229 ib->ptr[ib->length_dw++] = handle;
230 ib->ptr[ib->length_dw++] = 0x00010000;
231 ib->ptr[ib->length_dw++] = upper_32_bits(addr);
232 ib->ptr[ib->length_dw++] = addr;
233
234 ib->ptr[ib->length_dw++] = 0x00000014;
235 ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
236 ib->ptr[ib->length_dw++] = 0x0000001c;
237 ib->ptr[ib->length_dw++] = 0x00000001;
238 ib->ptr[ib->length_dw++] = 0x00000000;
239
240 ib->ptr[ib->length_dw++] = 0x00000008;
241 ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */
242
243 for (i = ib->length_dw; i < ib_size_dw; ++i)
244 ib->ptr[i] = 0x0;
245
246 r = amdgpu_job_submit_direct(job, ring, &f);
247 if (r)
248 goto err;
249
250 if (fence)
251 *fence = dma_fence_get(f);
252 dma_fence_put(f);
253 return 0;
254
255err:
256 amdgpu_job_free(job);
257 return r;
258}
259
260/**
261 * uvd_v6_0_enc_get_destroy_msg - generate a UVD ENC destroy msg
262 *
263 * @adev: amdgpu_device pointer
264 * @ring: ring we should submit the msg to
265 * @handle: session handle to use
266 * @fence: optional fence to return
267 *
268 * Close up a stream for HW test or if userspace failed to do so
269 */
270static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring,
271 uint32_t handle,
272 struct amdgpu_bo *bo,
273 struct dma_fence **fence)
274{
275 const unsigned ib_size_dw = 16;
276 struct amdgpu_job *job;
277 struct amdgpu_ib *ib;
278 struct dma_fence *f = NULL;
279 uint64_t addr;
280 int i, r;
281
282 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
283 if (r)
284 return r;
285
286 ib = &job->ibs[0];
287 addr = amdgpu_bo_gpu_offset(bo);
288
289 ib->length_dw = 0;
290 ib->ptr[ib->length_dw++] = 0x00000018;
291 ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
292 ib->ptr[ib->length_dw++] = handle;
293 ib->ptr[ib->length_dw++] = 0x00010000;
294 ib->ptr[ib->length_dw++] = upper_32_bits(addr);
295 ib->ptr[ib->length_dw++] = addr;
296
297 ib->ptr[ib->length_dw++] = 0x00000014;
298 ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
299 ib->ptr[ib->length_dw++] = 0x0000001c;
300 ib->ptr[ib->length_dw++] = 0x00000001;
301 ib->ptr[ib->length_dw++] = 0x00000000;
302
303 ib->ptr[ib->length_dw++] = 0x00000008;
304 ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */
305
306 for (i = ib->length_dw; i < ib_size_dw; ++i)
307 ib->ptr[i] = 0x0;
308
309 r = amdgpu_job_submit_direct(job, ring, &f);
310 if (r)
311 goto err;
312
313 if (fence)
314 *fence = dma_fence_get(f);
315 dma_fence_put(f);
316 return 0;
317
318err:
319 amdgpu_job_free(job);
320 return r;
321}
322
323/**
324 * uvd_v6_0_enc_ring_test_ib - test if UVD ENC IBs are working
325 *
326 * @ring: the engine to test on
327 *
328 */
329static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
330{
331 struct dma_fence *fence = NULL;
332 struct amdgpu_bo *bo = NULL;
333 long r;
334
335 r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE,
336 AMDGPU_GEM_DOMAIN_VRAM,
337 &bo, NULL, NULL);
338 if (r)
339 return r;
340
341 r = uvd_v6_0_enc_get_create_msg(ring, 1, bo, NULL);
342 if (r)
343 goto error;
344
345 r = uvd_v6_0_enc_get_destroy_msg(ring, 1, bo, &fence);
346 if (r)
347 goto error;
348
349 r = dma_fence_wait_timeout(fence, false, timeout);
350 if (r == 0)
351 r = -ETIMEDOUT;
352 else if (r > 0)
353 r = 0;
354
355error:
356 dma_fence_put(fence);
357 amdgpu_bo_unreserve(bo);
358 amdgpu_bo_unref(&bo);
359 return r;
360}
361
362static int uvd_v6_0_early_init(void *handle)
363{
364 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
365 adev->uvd.num_uvd_inst = 1;
366
367 if (!(adev->flags & AMD_IS_APU) &&
368 (RREG32_SMC(ixCC_HARVEST_FUSES) & CC_HARVEST_FUSES__UVD_DISABLE_MASK))
369 return -ENOENT;
370
371 uvd_v6_0_set_ring_funcs(adev);
372
373 if (uvd_v6_0_enc_support(adev)) {
374 adev->uvd.num_enc_rings = 2;
375 uvd_v6_0_set_enc_ring_funcs(adev);
376 }
377
378 uvd_v6_0_set_irq_funcs(adev);
379
380 return 0;
381}
382
383static int uvd_v6_0_sw_init(void *handle)
384{
385 struct amdgpu_ring *ring;
386 int i, r;
387 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
388
389 /* UVD TRAP */
390 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_UVD_SYSTEM_MESSAGE, &adev->uvd.inst->irq);
391 if (r)
392 return r;
393
394 /* UVD ENC TRAP */
395 if (uvd_v6_0_enc_support(adev)) {
396 for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
397 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + VISLANDS30_IV_SRCID_UVD_ENC_GEN_PURP, &adev->uvd.inst->irq);
398 if (r)
399 return r;
400 }
401 }
402
403 r = amdgpu_uvd_sw_init(adev);
404 if (r)
405 return r;
406
407 if (!uvd_v6_0_enc_support(adev)) {
408 for (i = 0; i < adev->uvd.num_enc_rings; ++i)
409 adev->uvd.inst->ring_enc[i].funcs = NULL;
410
411 adev->uvd.inst->irq.num_types = 1;
412 adev->uvd.num_enc_rings = 0;
413
414 DRM_INFO("UVD ENC is disabled\n");
415 }
416
417 ring = &adev->uvd.inst->ring;
418 sprintf(ring->name, "uvd");
419 r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
420 if (r)
421 return r;
422
423 r = amdgpu_uvd_resume(adev);
424 if (r)
425 return r;
426
427 if (uvd_v6_0_enc_support(adev)) {
428 for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
429 ring = &adev->uvd.inst->ring_enc[i];
430 sprintf(ring->name, "uvd_enc%d", i);
431 r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
432 if (r)
433 return r;
434 }
435 }
436
437 r = amdgpu_uvd_entity_init(adev);
438
439 return r;
440}
441
442static int uvd_v6_0_sw_fini(void *handle)
443{
444 int i, r;
445 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
446
447 r = amdgpu_uvd_suspend(adev);
448 if (r)
449 return r;
450
451 if (uvd_v6_0_enc_support(adev)) {
452 for (i = 0; i < adev->uvd.num_enc_rings; ++i)
453 amdgpu_ring_fini(&adev->uvd.inst->ring_enc[i]);
454 }
455
456 return amdgpu_uvd_sw_fini(adev);
457}
458
459/**
460 * uvd_v6_0_hw_init - start and test UVD block
461 *
462 * @adev: amdgpu_device pointer
463 *
464 * Initialize the hardware, boot up the VCPU and do some testing
465 */
466static int uvd_v6_0_hw_init(void *handle)
467{
468 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
469 struct amdgpu_ring *ring = &adev->uvd.inst->ring;
470 uint32_t tmp;
471 int i, r;
472
473 amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
474 uvd_v6_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
475 uvd_v6_0_enable_mgcg(adev, true);
476
477 r = amdgpu_ring_test_helper(ring);
478 if (r)
479 goto done;
480
481 r = amdgpu_ring_alloc(ring, 10);
482 if (r) {
483 DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r);
484 goto done;
485 }
486
487 tmp = PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0);
488 amdgpu_ring_write(ring, tmp);
489 amdgpu_ring_write(ring, 0xFFFFF);
490
491 tmp = PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0);
492 amdgpu_ring_write(ring, tmp);
493 amdgpu_ring_write(ring, 0xFFFFF);
494
495 tmp = PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0);
496 amdgpu_ring_write(ring, tmp);
497 amdgpu_ring_write(ring, 0xFFFFF);
498
499 /* Clear timeout status bits */
500 amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0));
501 amdgpu_ring_write(ring, 0x8);
502
503 amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0));
504 amdgpu_ring_write(ring, 3);
505
506 amdgpu_ring_commit(ring);
507
508 if (uvd_v6_0_enc_support(adev)) {
509 for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
510 ring = &adev->uvd.inst->ring_enc[i];
511 r = amdgpu_ring_test_helper(ring);
512 if (r)
513 goto done;
514 }
515 }
516
517done:
518 if (!r) {
519 if (uvd_v6_0_enc_support(adev))
520 DRM_INFO("UVD and UVD ENC initialized successfully.\n");
521 else
522 DRM_INFO("UVD initialized successfully.\n");
523 }
524
525 return r;
526}
527
528/**
529 * uvd_v6_0_hw_fini - stop the hardware block
530 *
531 * @adev: amdgpu_device pointer
532 *
533 * Stop the UVD block, mark ring as not ready any more
534 */
535static int uvd_v6_0_hw_fini(void *handle)
536{
537 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
538 struct amdgpu_ring *ring = &adev->uvd.inst->ring;
539
540 if (RREG32(mmUVD_STATUS) != 0)
541 uvd_v6_0_stop(adev);
542
543 ring->sched.ready = false;
544
545 return 0;
546}
547
548static int uvd_v6_0_suspend(void *handle)
549{
550 int r;
551 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
552
553 r = uvd_v6_0_hw_fini(adev);
554 if (r)
555 return r;
556
557 return amdgpu_uvd_suspend(adev);
558}
559
560static int uvd_v6_0_resume(void *handle)
561{
562 int r;
563 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
564
565 r = amdgpu_uvd_resume(adev);
566 if (r)
567 return r;
568
569 return uvd_v6_0_hw_init(adev);
570}
571
572/**
573 * uvd_v6_0_mc_resume - memory controller programming
574 *
575 * @adev: amdgpu_device pointer
576 *
577 * Let the UVD memory controller know it's offsets
578 */
579static void uvd_v6_0_mc_resume(struct amdgpu_device *adev)
580{
581 uint64_t offset;
582 uint32_t size;
583
584 /* programm memory controller bits 0-27 */
585 WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
586 lower_32_bits(adev->uvd.inst->gpu_addr));
587 WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
588 upper_32_bits(adev->uvd.inst->gpu_addr));
589
590 offset = AMDGPU_UVD_FIRMWARE_OFFSET;
591 size = AMDGPU_UVD_FIRMWARE_SIZE(adev);
592 WREG32(mmUVD_VCPU_CACHE_OFFSET0, offset >> 3);
593 WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
594
595 offset += size;
596 size = AMDGPU_UVD_HEAP_SIZE;
597 WREG32(mmUVD_VCPU_CACHE_OFFSET1, offset >> 3);
598 WREG32(mmUVD_VCPU_CACHE_SIZE1, size);
599
600 offset += size;
601 size = AMDGPU_UVD_STACK_SIZE +
602 (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles);
603 WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3);
604 WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
605
606 WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
607 WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
608 WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
609
610 WREG32(mmUVD_GP_SCRATCH4, adev->uvd.max_handles);
611}
612
613#if 0
614static void cz_set_uvd_clock_gating_branches(struct amdgpu_device *adev,
615 bool enable)
616{
617 u32 data, data1;
618
619 data = RREG32(mmUVD_CGC_GATE);
620 data1 = RREG32(mmUVD_SUVD_CGC_GATE);
621 if (enable) {
622 data |= UVD_CGC_GATE__SYS_MASK |
623 UVD_CGC_GATE__UDEC_MASK |
624 UVD_CGC_GATE__MPEG2_MASK |
625 UVD_CGC_GATE__RBC_MASK |
626 UVD_CGC_GATE__LMI_MC_MASK |
627 UVD_CGC_GATE__IDCT_MASK |
628 UVD_CGC_GATE__MPRD_MASK |
629 UVD_CGC_GATE__MPC_MASK |
630 UVD_CGC_GATE__LBSI_MASK |
631 UVD_CGC_GATE__LRBBM_MASK |
632 UVD_CGC_GATE__UDEC_RE_MASK |
633 UVD_CGC_GATE__UDEC_CM_MASK |
634 UVD_CGC_GATE__UDEC_IT_MASK |
635 UVD_CGC_GATE__UDEC_DB_MASK |
636 UVD_CGC_GATE__UDEC_MP_MASK |
637 UVD_CGC_GATE__WCB_MASK |
638 UVD_CGC_GATE__VCPU_MASK |
639 UVD_CGC_GATE__SCPU_MASK;
640 data1 |= UVD_SUVD_CGC_GATE__SRE_MASK |
641 UVD_SUVD_CGC_GATE__SIT_MASK |
642 UVD_SUVD_CGC_GATE__SMP_MASK |
643 UVD_SUVD_CGC_GATE__SCM_MASK |
644 UVD_SUVD_CGC_GATE__SDB_MASK |
645 UVD_SUVD_CGC_GATE__SRE_H264_MASK |
646 UVD_SUVD_CGC_GATE__SRE_HEVC_MASK |
647 UVD_SUVD_CGC_GATE__SIT_H264_MASK |
648 UVD_SUVD_CGC_GATE__SIT_HEVC_MASK |
649 UVD_SUVD_CGC_GATE__SCM_H264_MASK |
650 UVD_SUVD_CGC_GATE__SCM_HEVC_MASK |
651 UVD_SUVD_CGC_GATE__SDB_H264_MASK |
652 UVD_SUVD_CGC_GATE__SDB_HEVC_MASK;
653 } else {
654 data &= ~(UVD_CGC_GATE__SYS_MASK |
655 UVD_CGC_GATE__UDEC_MASK |
656 UVD_CGC_GATE__MPEG2_MASK |
657 UVD_CGC_GATE__RBC_MASK |
658 UVD_CGC_GATE__LMI_MC_MASK |
659 UVD_CGC_GATE__LMI_UMC_MASK |
660 UVD_CGC_GATE__IDCT_MASK |
661 UVD_CGC_GATE__MPRD_MASK |
662 UVD_CGC_GATE__MPC_MASK |
663 UVD_CGC_GATE__LBSI_MASK |
664 UVD_CGC_GATE__LRBBM_MASK |
665 UVD_CGC_GATE__UDEC_RE_MASK |
666 UVD_CGC_GATE__UDEC_CM_MASK |
667 UVD_CGC_GATE__UDEC_IT_MASK |
668 UVD_CGC_GATE__UDEC_DB_MASK |
669 UVD_CGC_GATE__UDEC_MP_MASK |
670 UVD_CGC_GATE__WCB_MASK |
671 UVD_CGC_GATE__VCPU_MASK |
672 UVD_CGC_GATE__SCPU_MASK);
673 data1 &= ~(UVD_SUVD_CGC_GATE__SRE_MASK |
674 UVD_SUVD_CGC_GATE__SIT_MASK |
675 UVD_SUVD_CGC_GATE__SMP_MASK |
676 UVD_SUVD_CGC_GATE__SCM_MASK |
677 UVD_SUVD_CGC_GATE__SDB_MASK |
678 UVD_SUVD_CGC_GATE__SRE_H264_MASK |
679 UVD_SUVD_CGC_GATE__SRE_HEVC_MASK |
680 UVD_SUVD_CGC_GATE__SIT_H264_MASK |
681 UVD_SUVD_CGC_GATE__SIT_HEVC_MASK |
682 UVD_SUVD_CGC_GATE__SCM_H264_MASK |
683 UVD_SUVD_CGC_GATE__SCM_HEVC_MASK |
684 UVD_SUVD_CGC_GATE__SDB_H264_MASK |
685 UVD_SUVD_CGC_GATE__SDB_HEVC_MASK);
686 }
687 WREG32(mmUVD_CGC_GATE, data);
688 WREG32(mmUVD_SUVD_CGC_GATE, data1);
689}
690#endif
691
692/**
693 * uvd_v6_0_start - start UVD block
694 *
695 * @adev: amdgpu_device pointer
696 *
697 * Setup and start the UVD block
698 */
699static int uvd_v6_0_start(struct amdgpu_device *adev)
700{
701 struct amdgpu_ring *ring = &adev->uvd.inst->ring;
702 uint32_t rb_bufsz, tmp;
703 uint32_t lmi_swap_cntl;
704 uint32_t mp_swap_cntl;
705 int i, j, r;
706
707 /* disable DPG */
708 WREG32_P(mmUVD_POWER_STATUS, 0, ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
709
710 /* disable byte swapping */
711 lmi_swap_cntl = 0;
712 mp_swap_cntl = 0;
713
714 uvd_v6_0_mc_resume(adev);
715
716 /* disable interupt */
717 WREG32_FIELD(UVD_MASTINT_EN, VCPU_EN, 0);
718
719 /* stall UMC and register bus before resetting VCPU */
720 WREG32_FIELD(UVD_LMI_CTRL2, STALL_ARB_UMC, 1);
721 mdelay(1);
722
723 /* put LMI, VCPU, RBC etc... into reset */
724 WREG32(mmUVD_SOFT_RESET,
725 UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
726 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
727 UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
728 UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
729 UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
730 UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
731 UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
732 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
733 mdelay(5);
734
735 /* take UVD block out of reset */
736 WREG32_FIELD(SRBM_SOFT_RESET, SOFT_RESET_UVD, 0);
737 mdelay(5);
738
739 /* initialize UVD memory controller */
740 WREG32(mmUVD_LMI_CTRL,
741 (0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
742 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
743 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
744 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
745 UVD_LMI_CTRL__REQ_MODE_MASK |
746 UVD_LMI_CTRL__DISABLE_ON_FWV_FAIL_MASK);
747
748#ifdef __BIG_ENDIAN
749 /* swap (8 in 32) RB and IB */
750 lmi_swap_cntl = 0xa;
751 mp_swap_cntl = 0;
752#endif
753 WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
754 WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
755
756 WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040);
757 WREG32(mmUVD_MPC_SET_MUXA1, 0x0);
758 WREG32(mmUVD_MPC_SET_MUXB0, 0x40c2040);
759 WREG32(mmUVD_MPC_SET_MUXB1, 0x0);
760 WREG32(mmUVD_MPC_SET_ALU, 0);
761 WREG32(mmUVD_MPC_SET_MUX, 0x88);
762
763 /* take all subblocks out of reset, except VCPU */
764 WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
765 mdelay(5);
766
767 /* enable VCPU clock */
768 WREG32(mmUVD_VCPU_CNTL, UVD_VCPU_CNTL__CLK_EN_MASK);
769
770 /* enable UMC */
771 WREG32_FIELD(UVD_LMI_CTRL2, STALL_ARB_UMC, 0);
772
773 /* boot up the VCPU */
774 WREG32(mmUVD_SOFT_RESET, 0);
775 mdelay(10);
776
777 for (i = 0; i < 10; ++i) {
778 uint32_t status;
779
780 for (j = 0; j < 100; ++j) {
781 status = RREG32(mmUVD_STATUS);
782 if (status & 2)
783 break;
784 mdelay(10);
785 }
786 r = 0;
787 if (status & 2)
788 break;
789
790 DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
791 WREG32_FIELD(UVD_SOFT_RESET, VCPU_SOFT_RESET, 1);
792 mdelay(10);
793 WREG32_FIELD(UVD_SOFT_RESET, VCPU_SOFT_RESET, 0);
794 mdelay(10);
795 r = -1;
796 }
797
798 if (r) {
799 DRM_ERROR("UVD not responding, giving up!!!\n");
800 return r;
801 }
802 /* enable master interrupt */
803 WREG32_P(mmUVD_MASTINT_EN,
804 (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
805 ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
806
807 /* clear the bit 4 of UVD_STATUS */
808 WREG32_P(mmUVD_STATUS, 0, ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
809
810 /* force RBC into idle state */
811 rb_bufsz = order_base_2(ring->ring_size);
812 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
813 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
814 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
815 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
816 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
817 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
818 WREG32(mmUVD_RBC_RB_CNTL, tmp);
819
820 /* set the write pointer delay */
821 WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0);
822
823 /* set the wb address */
824 WREG32(mmUVD_RBC_RB_RPTR_ADDR, (upper_32_bits(ring->gpu_addr) >> 2));
825
826 /* programm the RB_BASE for ring buffer */
827 WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
828 lower_32_bits(ring->gpu_addr));
829 WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
830 upper_32_bits(ring->gpu_addr));
831
832 /* Initialize the ring buffer's read and write pointers */
833 WREG32(mmUVD_RBC_RB_RPTR, 0);
834
835 ring->wptr = RREG32(mmUVD_RBC_RB_RPTR);
836 WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
837
838 WREG32_FIELD(UVD_RBC_RB_CNTL, RB_NO_FETCH, 0);
839
840 if (uvd_v6_0_enc_support(adev)) {
841 ring = &adev->uvd.inst->ring_enc[0];
842 WREG32(mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
843 WREG32(mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
844 WREG32(mmUVD_RB_BASE_LO, ring->gpu_addr);
845 WREG32(mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
846 WREG32(mmUVD_RB_SIZE, ring->ring_size / 4);
847
848 ring = &adev->uvd.inst->ring_enc[1];
849 WREG32(mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
850 WREG32(mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
851 WREG32(mmUVD_RB_BASE_LO2, ring->gpu_addr);
852 WREG32(mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
853 WREG32(mmUVD_RB_SIZE2, ring->ring_size / 4);
854 }
855
856 return 0;
857}
858
859/**
860 * uvd_v6_0_stop - stop UVD block
861 *
862 * @adev: amdgpu_device pointer
863 *
864 * stop the UVD block
865 */
866static void uvd_v6_0_stop(struct amdgpu_device *adev)
867{
868 /* force RBC into idle state */
869 WREG32(mmUVD_RBC_RB_CNTL, 0x11010101);
870
871 /* Stall UMC and register bus before resetting VCPU */
872 WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
873 mdelay(1);
874
875 /* put VCPU into reset */
876 WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
877 mdelay(5);
878
879 /* disable VCPU clock */
880 WREG32(mmUVD_VCPU_CNTL, 0x0);
881
882 /* Unstall UMC and register bus */
883 WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
884
885 WREG32(mmUVD_STATUS, 0);
886}
887
888/**
889 * uvd_v6_0_ring_emit_fence - emit an fence & trap command
890 *
891 * @ring: amdgpu_ring pointer
892 * @fence: fence to emit
893 *
894 * Write a fence and a trap command to the ring.
895 */
896static void uvd_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
897 unsigned flags)
898{
899 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
900
901 amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
902 amdgpu_ring_write(ring, seq);
903 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
904 amdgpu_ring_write(ring, addr & 0xffffffff);
905 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
906 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
907 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
908 amdgpu_ring_write(ring, 0);
909
910 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
911 amdgpu_ring_write(ring, 0);
912 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
913 amdgpu_ring_write(ring, 0);
914 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
915 amdgpu_ring_write(ring, 2);
916}
917
918/**
919 * uvd_v6_0_enc_ring_emit_fence - emit an enc fence & trap command
920 *
921 * @ring: amdgpu_ring pointer
922 * @fence: fence to emit
923 *
924 * Write enc a fence and a trap command to the ring.
925 */
926static void uvd_v6_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
927 u64 seq, unsigned flags)
928{
929 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
930
931 amdgpu_ring_write(ring, HEVC_ENC_CMD_FENCE);
932 amdgpu_ring_write(ring, addr);
933 amdgpu_ring_write(ring, upper_32_bits(addr));
934 amdgpu_ring_write(ring, seq);
935 amdgpu_ring_write(ring, HEVC_ENC_CMD_TRAP);
936}
937
938/**
939 * uvd_v6_0_ring_emit_hdp_flush - skip HDP flushing
940 *
941 * @ring: amdgpu_ring pointer
942 */
943static void uvd_v6_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
944{
945 /* The firmware doesn't seem to like touching registers at this point. */
946}
947
948/**
949 * uvd_v6_0_ring_test_ring - register write test
950 *
951 * @ring: amdgpu_ring pointer
952 *
953 * Test if we can successfully write to the context register
954 */
955static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring)
956{
957 struct amdgpu_device *adev = ring->adev;
958 uint32_t tmp = 0;
959 unsigned i;
960 int r;
961
962 WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD);
963 r = amdgpu_ring_alloc(ring, 3);
964 if (r)
965 return r;
966
967 amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
968 amdgpu_ring_write(ring, 0xDEADBEEF);
969 amdgpu_ring_commit(ring);
970 for (i = 0; i < adev->usec_timeout; i++) {
971 tmp = RREG32(mmUVD_CONTEXT_ID);
972 if (tmp == 0xDEADBEEF)
973 break;
974 udelay(1);
975 }
976
977 if (i >= adev->usec_timeout)
978 r = -ETIMEDOUT;
979
980 return r;
981}
982
983/**
984 * uvd_v6_0_ring_emit_ib - execute indirect buffer
985 *
986 * @ring: amdgpu_ring pointer
987 * @ib: indirect buffer to execute
988 *
989 * Write ring commands to execute the indirect buffer
990 */
991static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
992 struct amdgpu_job *job,
993 struct amdgpu_ib *ib,
994 uint32_t flags)
995{
996 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
997
998 amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_VMID, 0));
999 amdgpu_ring_write(ring, vmid);
1000
1001 amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0));
1002 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1003 amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH, 0));
1004 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1005 amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0));
1006 amdgpu_ring_write(ring, ib->length_dw);
1007}
1008
1009/**
1010 * uvd_v6_0_enc_ring_emit_ib - enc execute indirect buffer
1011 *
1012 * @ring: amdgpu_ring pointer
1013 * @ib: indirect buffer to execute
1014 *
1015 * Write enc ring commands to execute the indirect buffer
1016 */
1017static void uvd_v6_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
1018 struct amdgpu_job *job,
1019 struct amdgpu_ib *ib,
1020 uint32_t flags)
1021{
1022 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1023
1024 amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM);
1025 amdgpu_ring_write(ring, vmid);
1026 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1027 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1028 amdgpu_ring_write(ring, ib->length_dw);
1029}
1030
1031static void uvd_v6_0_ring_emit_wreg(struct amdgpu_ring *ring,
1032 uint32_t reg, uint32_t val)
1033{
1034 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
1035 amdgpu_ring_write(ring, reg << 2);
1036 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
1037 amdgpu_ring_write(ring, val);
1038 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
1039 amdgpu_ring_write(ring, 0x8);
1040}
1041
1042static void uvd_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
1043 unsigned vmid, uint64_t pd_addr)
1044{
1045 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1046
1047 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
1048 amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
1049 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
1050 amdgpu_ring_write(ring, 0);
1051 amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0));
1052 amdgpu_ring_write(ring, 1 << vmid); /* mask */
1053 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
1054 amdgpu_ring_write(ring, 0xC);
1055}
1056
1057static void uvd_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
1058{
1059 uint32_t seq = ring->fence_drv.sync_seq;
1060 uint64_t addr = ring->fence_drv.gpu_addr;
1061
1062 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
1063 amdgpu_ring_write(ring, lower_32_bits(addr));
1064 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
1065 amdgpu_ring_write(ring, upper_32_bits(addr));
1066 amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0));
1067 amdgpu_ring_write(ring, 0xffffffff); /* mask */
1068 amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH9, 0));
1069 amdgpu_ring_write(ring, seq);
1070 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
1071 amdgpu_ring_write(ring, 0xE);
1072}
1073
1074static void uvd_v6_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
1075{
1076 int i;
1077
1078 WARN_ON(ring->wptr % 2 || count % 2);
1079
1080 for (i = 0; i < count / 2; i++) {
1081 amdgpu_ring_write(ring, PACKET0(mmUVD_NO_OP, 0));
1082 amdgpu_ring_write(ring, 0);
1083 }
1084}
1085
1086static void uvd_v6_0_enc_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
1087{
1088 uint32_t seq = ring->fence_drv.sync_seq;
1089 uint64_t addr = ring->fence_drv.gpu_addr;
1090
1091 amdgpu_ring_write(ring, HEVC_ENC_CMD_WAIT_GE);
1092 amdgpu_ring_write(ring, lower_32_bits(addr));
1093 amdgpu_ring_write(ring, upper_32_bits(addr));
1094 amdgpu_ring_write(ring, seq);
1095}
1096
1097static void uvd_v6_0_enc_ring_insert_end(struct amdgpu_ring *ring)
1098{
1099 amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
1100}
1101
1102static void uvd_v6_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
1103 unsigned int vmid, uint64_t pd_addr)
1104{
1105 amdgpu_ring_write(ring, HEVC_ENC_CMD_UPDATE_PTB);
1106 amdgpu_ring_write(ring, vmid);
1107 amdgpu_ring_write(ring, pd_addr >> 12);
1108
1109 amdgpu_ring_write(ring, HEVC_ENC_CMD_FLUSH_TLB);
1110 amdgpu_ring_write(ring, vmid);
1111}
1112
1113static bool uvd_v6_0_is_idle(void *handle)
1114{
1115 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1116
1117 return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
1118}
1119
1120static int uvd_v6_0_wait_for_idle(void *handle)
1121{
1122 unsigned i;
1123 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1124
1125 for (i = 0; i < adev->usec_timeout; i++) {
1126 if (uvd_v6_0_is_idle(handle))
1127 return 0;
1128 }
1129 return -ETIMEDOUT;
1130}
1131
1132#define AMDGPU_UVD_STATUS_BUSY_MASK 0xfd
1133static bool uvd_v6_0_check_soft_reset(void *handle)
1134{
1135 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1136 u32 srbm_soft_reset = 0;
1137 u32 tmp = RREG32(mmSRBM_STATUS);
1138
1139 if (REG_GET_FIELD(tmp, SRBM_STATUS, UVD_RQ_PENDING) ||
1140 REG_GET_FIELD(tmp, SRBM_STATUS, UVD_BUSY) ||
1141 (RREG32(mmUVD_STATUS) & AMDGPU_UVD_STATUS_BUSY_MASK))
1142 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_UVD, 1);
1143
1144 if (srbm_soft_reset) {
1145 adev->uvd.inst->srbm_soft_reset = srbm_soft_reset;
1146 return true;
1147 } else {
1148 adev->uvd.inst->srbm_soft_reset = 0;
1149 return false;
1150 }
1151}
1152
1153static int uvd_v6_0_pre_soft_reset(void *handle)
1154{
1155 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1156
1157 if (!adev->uvd.inst->srbm_soft_reset)
1158 return 0;
1159
1160 uvd_v6_0_stop(adev);
1161 return 0;
1162}
1163
1164static int uvd_v6_0_soft_reset(void *handle)
1165{
1166 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1167 u32 srbm_soft_reset;
1168
1169 if (!adev->uvd.inst->srbm_soft_reset)
1170 return 0;
1171 srbm_soft_reset = adev->uvd.inst->srbm_soft_reset;
1172
1173 if (srbm_soft_reset) {
1174 u32 tmp;
1175
1176 tmp = RREG32(mmSRBM_SOFT_RESET);
1177 tmp |= srbm_soft_reset;
1178 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1179 WREG32(mmSRBM_SOFT_RESET, tmp);
1180 tmp = RREG32(mmSRBM_SOFT_RESET);
1181
1182 udelay(50);
1183
1184 tmp &= ~srbm_soft_reset;
1185 WREG32(mmSRBM_SOFT_RESET, tmp);
1186 tmp = RREG32(mmSRBM_SOFT_RESET);
1187
1188 /* Wait a little for things to settle down */
1189 udelay(50);
1190 }
1191
1192 return 0;
1193}
1194
1195static int uvd_v6_0_post_soft_reset(void *handle)
1196{
1197 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1198
1199 if (!adev->uvd.inst->srbm_soft_reset)
1200 return 0;
1201
1202 mdelay(5);
1203
1204 return uvd_v6_0_start(adev);
1205}
1206
1207static int uvd_v6_0_set_interrupt_state(struct amdgpu_device *adev,
1208 struct amdgpu_irq_src *source,
1209 unsigned type,
1210 enum amdgpu_interrupt_state state)
1211{
1212 // TODO
1213 return 0;
1214}
1215
1216static int uvd_v6_0_process_interrupt(struct amdgpu_device *adev,
1217 struct amdgpu_irq_src *source,
1218 struct amdgpu_iv_entry *entry)
1219{
1220 bool int_handled = true;
1221 DRM_DEBUG("IH: UVD TRAP\n");
1222
1223 switch (entry->src_id) {
1224 case 124:
1225 amdgpu_fence_process(&adev->uvd.inst->ring);
1226 break;
1227 case 119:
1228 if (likely(uvd_v6_0_enc_support(adev)))
1229 amdgpu_fence_process(&adev->uvd.inst->ring_enc[0]);
1230 else
1231 int_handled = false;
1232 break;
1233 case 120:
1234 if (likely(uvd_v6_0_enc_support(adev)))
1235 amdgpu_fence_process(&adev->uvd.inst->ring_enc[1]);
1236 else
1237 int_handled = false;
1238 break;
1239 }
1240
1241 if (false == int_handled)
1242 DRM_ERROR("Unhandled interrupt: %d %d\n",
1243 entry->src_id, entry->src_data[0]);
1244
1245 return 0;
1246}
1247
1248static void uvd_v6_0_enable_clock_gating(struct amdgpu_device *adev, bool enable)
1249{
1250 uint32_t data1, data3;
1251
1252 data1 = RREG32(mmUVD_SUVD_CGC_GATE);
1253 data3 = RREG32(mmUVD_CGC_GATE);
1254
1255 data1 |= UVD_SUVD_CGC_GATE__SRE_MASK |
1256 UVD_SUVD_CGC_GATE__SIT_MASK |
1257 UVD_SUVD_CGC_GATE__SMP_MASK |
1258 UVD_SUVD_CGC_GATE__SCM_MASK |
1259 UVD_SUVD_CGC_GATE__SDB_MASK |
1260 UVD_SUVD_CGC_GATE__SRE_H264_MASK |
1261 UVD_SUVD_CGC_GATE__SRE_HEVC_MASK |
1262 UVD_SUVD_CGC_GATE__SIT_H264_MASK |
1263 UVD_SUVD_CGC_GATE__SIT_HEVC_MASK |
1264 UVD_SUVD_CGC_GATE__SCM_H264_MASK |
1265 UVD_SUVD_CGC_GATE__SCM_HEVC_MASK |
1266 UVD_SUVD_CGC_GATE__SDB_H264_MASK |
1267 UVD_SUVD_CGC_GATE__SDB_HEVC_MASK;
1268
1269 if (enable) {
1270 data3 |= (UVD_CGC_GATE__SYS_MASK |
1271 UVD_CGC_GATE__UDEC_MASK |
1272 UVD_CGC_GATE__MPEG2_MASK |
1273 UVD_CGC_GATE__RBC_MASK |
1274 UVD_CGC_GATE__LMI_MC_MASK |
1275 UVD_CGC_GATE__LMI_UMC_MASK |
1276 UVD_CGC_GATE__IDCT_MASK |
1277 UVD_CGC_GATE__MPRD_MASK |
1278 UVD_CGC_GATE__MPC_MASK |
1279 UVD_CGC_GATE__LBSI_MASK |
1280 UVD_CGC_GATE__LRBBM_MASK |
1281 UVD_CGC_GATE__UDEC_RE_MASK |
1282 UVD_CGC_GATE__UDEC_CM_MASK |
1283 UVD_CGC_GATE__UDEC_IT_MASK |
1284 UVD_CGC_GATE__UDEC_DB_MASK |
1285 UVD_CGC_GATE__UDEC_MP_MASK |
1286 UVD_CGC_GATE__WCB_MASK |
1287 UVD_CGC_GATE__JPEG_MASK |
1288 UVD_CGC_GATE__SCPU_MASK |
1289 UVD_CGC_GATE__JPEG2_MASK);
1290 /* only in pg enabled, we can gate clock to vcpu*/
1291 if (adev->pg_flags & AMD_PG_SUPPORT_UVD)
1292 data3 |= UVD_CGC_GATE__VCPU_MASK;
1293
1294 data3 &= ~UVD_CGC_GATE__REGS_MASK;
1295 } else {
1296 data3 = 0;
1297 }
1298
1299 WREG32(mmUVD_SUVD_CGC_GATE, data1);
1300 WREG32(mmUVD_CGC_GATE, data3);
1301}
1302
1303static void uvd_v6_0_set_sw_clock_gating(struct amdgpu_device *adev)
1304{
1305 uint32_t data, data2;
1306
1307 data = RREG32(mmUVD_CGC_CTRL);
1308 data2 = RREG32(mmUVD_SUVD_CGC_CTRL);
1309
1310
1311 data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK |
1312 UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
1313
1314
1315 data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
1316 (1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) |
1317 (4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY));
1318
1319 data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
1320 UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
1321 UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
1322 UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
1323 UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
1324 UVD_CGC_CTRL__SYS_MODE_MASK |
1325 UVD_CGC_CTRL__UDEC_MODE_MASK |
1326 UVD_CGC_CTRL__MPEG2_MODE_MASK |
1327 UVD_CGC_CTRL__REGS_MODE_MASK |
1328 UVD_CGC_CTRL__RBC_MODE_MASK |
1329 UVD_CGC_CTRL__LMI_MC_MODE_MASK |
1330 UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
1331 UVD_CGC_CTRL__IDCT_MODE_MASK |
1332 UVD_CGC_CTRL__MPRD_MODE_MASK |
1333 UVD_CGC_CTRL__MPC_MODE_MASK |
1334 UVD_CGC_CTRL__LBSI_MODE_MASK |
1335 UVD_CGC_CTRL__LRBBM_MODE_MASK |
1336 UVD_CGC_CTRL__WCB_MODE_MASK |
1337 UVD_CGC_CTRL__VCPU_MODE_MASK |
1338 UVD_CGC_CTRL__JPEG_MODE_MASK |
1339 UVD_CGC_CTRL__SCPU_MODE_MASK |
1340 UVD_CGC_CTRL__JPEG2_MODE_MASK);
1341 data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK |
1342 UVD_SUVD_CGC_CTRL__SIT_MODE_MASK |
1343 UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
1344 UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
1345 UVD_SUVD_CGC_CTRL__SDB_MODE_MASK);
1346
1347 WREG32(mmUVD_CGC_CTRL, data);
1348 WREG32(mmUVD_SUVD_CGC_CTRL, data2);
1349}
1350
1351#if 0
1352static void uvd_v6_0_set_hw_clock_gating(struct amdgpu_device *adev)
1353{
1354 uint32_t data, data1, cgc_flags, suvd_flags;
1355
1356 data = RREG32(mmUVD_CGC_GATE);
1357 data1 = RREG32(mmUVD_SUVD_CGC_GATE);
1358
1359 cgc_flags = UVD_CGC_GATE__SYS_MASK |
1360 UVD_CGC_GATE__UDEC_MASK |
1361 UVD_CGC_GATE__MPEG2_MASK |
1362 UVD_CGC_GATE__RBC_MASK |
1363 UVD_CGC_GATE__LMI_MC_MASK |
1364 UVD_CGC_GATE__IDCT_MASK |
1365 UVD_CGC_GATE__MPRD_MASK |
1366 UVD_CGC_GATE__MPC_MASK |
1367 UVD_CGC_GATE__LBSI_MASK |
1368 UVD_CGC_GATE__LRBBM_MASK |
1369 UVD_CGC_GATE__UDEC_RE_MASK |
1370 UVD_CGC_GATE__UDEC_CM_MASK |
1371 UVD_CGC_GATE__UDEC_IT_MASK |
1372 UVD_CGC_GATE__UDEC_DB_MASK |
1373 UVD_CGC_GATE__UDEC_MP_MASK |
1374 UVD_CGC_GATE__WCB_MASK |
1375 UVD_CGC_GATE__VCPU_MASK |
1376 UVD_CGC_GATE__SCPU_MASK |
1377 UVD_CGC_GATE__JPEG_MASK |
1378 UVD_CGC_GATE__JPEG2_MASK;
1379
1380 suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
1381 UVD_SUVD_CGC_GATE__SIT_MASK |
1382 UVD_SUVD_CGC_GATE__SMP_MASK |
1383 UVD_SUVD_CGC_GATE__SCM_MASK |
1384 UVD_SUVD_CGC_GATE__SDB_MASK;
1385
1386 data |= cgc_flags;
1387 data1 |= suvd_flags;
1388
1389 WREG32(mmUVD_CGC_GATE, data);
1390 WREG32(mmUVD_SUVD_CGC_GATE, data1);
1391}
1392#endif
1393
1394static void uvd_v6_0_enable_mgcg(struct amdgpu_device *adev,
1395 bool enable)
1396{
1397 u32 orig, data;
1398
1399 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) {
1400 data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
1401 data |= 0xfff;
1402 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
1403
1404 orig = data = RREG32(mmUVD_CGC_CTRL);
1405 data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
1406 if (orig != data)
1407 WREG32(mmUVD_CGC_CTRL, data);
1408 } else {
1409 data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
1410 data &= ~0xfff;
1411 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
1412
1413 orig = data = RREG32(mmUVD_CGC_CTRL);
1414 data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
1415 if (orig != data)
1416 WREG32(mmUVD_CGC_CTRL, data);
1417 }
1418}
1419
1420static int uvd_v6_0_set_clockgating_state(void *handle,
1421 enum amd_clockgating_state state)
1422{
1423 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1424 bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
1425
1426 if (enable) {
1427 /* wait for STATUS to clear */
1428 if (uvd_v6_0_wait_for_idle(handle))
1429 return -EBUSY;
1430 uvd_v6_0_enable_clock_gating(adev, true);
1431 /* enable HW gates because UVD is idle */
1432/* uvd_v6_0_set_hw_clock_gating(adev); */
1433 } else {
1434 /* disable HW gating and enable Sw gating */
1435 uvd_v6_0_enable_clock_gating(adev, false);
1436 }
1437 uvd_v6_0_set_sw_clock_gating(adev);
1438 return 0;
1439}
1440
1441static int uvd_v6_0_set_powergating_state(void *handle,
1442 enum amd_powergating_state state)
1443{
1444 /* This doesn't actually powergate the UVD block.
1445 * That's done in the dpm code via the SMC. This
1446 * just re-inits the block as necessary. The actual
1447 * gating still happens in the dpm code. We should
1448 * revisit this when there is a cleaner line between
1449 * the smc and the hw blocks
1450 */
1451 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1452 int ret = 0;
1453
1454 WREG32(mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK);
1455
1456 if (state == AMD_PG_STATE_GATE) {
1457 uvd_v6_0_stop(adev);
1458 } else {
1459 ret = uvd_v6_0_start(adev);
1460 if (ret)
1461 goto out;
1462 }
1463
1464out:
1465 return ret;
1466}
1467
1468static void uvd_v6_0_get_clockgating_state(void *handle, u32 *flags)
1469{
1470 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1471 int data;
1472
1473 mutex_lock(&adev->pm.mutex);
1474
1475 if (adev->flags & AMD_IS_APU)
1476 data = RREG32_SMC(ixCURRENT_PG_STATUS_APU);
1477 else
1478 data = RREG32_SMC(ixCURRENT_PG_STATUS);
1479
1480 if (data & CURRENT_PG_STATUS__UVD_PG_STATUS_MASK) {
1481 DRM_INFO("Cannot get clockgating state when UVD is powergated.\n");
1482 goto out;
1483 }
1484
1485 /* AMD_CG_SUPPORT_UVD_MGCG */
1486 data = RREG32(mmUVD_CGC_CTRL);
1487 if (data & UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK)
1488 *flags |= AMD_CG_SUPPORT_UVD_MGCG;
1489
1490out:
1491 mutex_unlock(&adev->pm.mutex);
1492}
1493
1494static const struct amd_ip_funcs uvd_v6_0_ip_funcs = {
1495 .name = "uvd_v6_0",
1496 .early_init = uvd_v6_0_early_init,
1497 .late_init = NULL,
1498 .sw_init = uvd_v6_0_sw_init,
1499 .sw_fini = uvd_v6_0_sw_fini,
1500 .hw_init = uvd_v6_0_hw_init,
1501 .hw_fini = uvd_v6_0_hw_fini,
1502 .suspend = uvd_v6_0_suspend,
1503 .resume = uvd_v6_0_resume,
1504 .is_idle = uvd_v6_0_is_idle,
1505 .wait_for_idle = uvd_v6_0_wait_for_idle,
1506 .check_soft_reset = uvd_v6_0_check_soft_reset,
1507 .pre_soft_reset = uvd_v6_0_pre_soft_reset,
1508 .soft_reset = uvd_v6_0_soft_reset,
1509 .post_soft_reset = uvd_v6_0_post_soft_reset,
1510 .set_clockgating_state = uvd_v6_0_set_clockgating_state,
1511 .set_powergating_state = uvd_v6_0_set_powergating_state,
1512 .get_clockgating_state = uvd_v6_0_get_clockgating_state,
1513};
1514
1515static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = {
1516 .type = AMDGPU_RING_TYPE_UVD,
1517 .align_mask = 0xf,
1518 .support_64bit_ptrs = false,
1519 .no_user_fence = true,
1520 .get_rptr = uvd_v6_0_ring_get_rptr,
1521 .get_wptr = uvd_v6_0_ring_get_wptr,
1522 .set_wptr = uvd_v6_0_ring_set_wptr,
1523 .parse_cs = amdgpu_uvd_ring_parse_cs,
1524 .emit_frame_size =
1525 6 + /* hdp invalidate */
1526 10 + /* uvd_v6_0_ring_emit_pipeline_sync */
1527 14, /* uvd_v6_0_ring_emit_fence x1 no user fence */
1528 .emit_ib_size = 8, /* uvd_v6_0_ring_emit_ib */
1529 .emit_ib = uvd_v6_0_ring_emit_ib,
1530 .emit_fence = uvd_v6_0_ring_emit_fence,
1531 .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
1532 .test_ring = uvd_v6_0_ring_test_ring,
1533 .test_ib = amdgpu_uvd_ring_test_ib,
1534 .insert_nop = uvd_v6_0_ring_insert_nop,
1535 .pad_ib = amdgpu_ring_generic_pad_ib,
1536 .begin_use = amdgpu_uvd_ring_begin_use,
1537 .end_use = amdgpu_uvd_ring_end_use,
1538 .emit_wreg = uvd_v6_0_ring_emit_wreg,
1539};
1540
1541static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
1542 .type = AMDGPU_RING_TYPE_UVD,
1543 .align_mask = 0xf,
1544 .support_64bit_ptrs = false,
1545 .no_user_fence = true,
1546 .get_rptr = uvd_v6_0_ring_get_rptr,
1547 .get_wptr = uvd_v6_0_ring_get_wptr,
1548 .set_wptr = uvd_v6_0_ring_set_wptr,
1549 .emit_frame_size =
1550 6 + /* hdp invalidate */
1551 10 + /* uvd_v6_0_ring_emit_pipeline_sync */
1552 VI_FLUSH_GPU_TLB_NUM_WREG * 6 + 8 + /* uvd_v6_0_ring_emit_vm_flush */
1553 14 + 14, /* uvd_v6_0_ring_emit_fence x2 vm fence */
1554 .emit_ib_size = 8, /* uvd_v6_0_ring_emit_ib */
1555 .emit_ib = uvd_v6_0_ring_emit_ib,
1556 .emit_fence = uvd_v6_0_ring_emit_fence,
1557 .emit_vm_flush = uvd_v6_0_ring_emit_vm_flush,
1558 .emit_pipeline_sync = uvd_v6_0_ring_emit_pipeline_sync,
1559 .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
1560 .test_ring = uvd_v6_0_ring_test_ring,
1561 .test_ib = amdgpu_uvd_ring_test_ib,
1562 .insert_nop = uvd_v6_0_ring_insert_nop,
1563 .pad_ib = amdgpu_ring_generic_pad_ib,
1564 .begin_use = amdgpu_uvd_ring_begin_use,
1565 .end_use = amdgpu_uvd_ring_end_use,
1566 .emit_wreg = uvd_v6_0_ring_emit_wreg,
1567};
1568
1569static const struct amdgpu_ring_funcs uvd_v6_0_enc_ring_vm_funcs = {
1570 .type = AMDGPU_RING_TYPE_UVD_ENC,
1571 .align_mask = 0x3f,
1572 .nop = HEVC_ENC_CMD_NO_OP,
1573 .support_64bit_ptrs = false,
1574 .no_user_fence = true,
1575 .get_rptr = uvd_v6_0_enc_ring_get_rptr,
1576 .get_wptr = uvd_v6_0_enc_ring_get_wptr,
1577 .set_wptr = uvd_v6_0_enc_ring_set_wptr,
1578 .emit_frame_size =
1579 4 + /* uvd_v6_0_enc_ring_emit_pipeline_sync */
1580 5 + /* uvd_v6_0_enc_ring_emit_vm_flush */
1581 5 + 5 + /* uvd_v6_0_enc_ring_emit_fence x2 vm fence */
1582 1, /* uvd_v6_0_enc_ring_insert_end */
1583 .emit_ib_size = 5, /* uvd_v6_0_enc_ring_emit_ib */
1584 .emit_ib = uvd_v6_0_enc_ring_emit_ib,
1585 .emit_fence = uvd_v6_0_enc_ring_emit_fence,
1586 .emit_vm_flush = uvd_v6_0_enc_ring_emit_vm_flush,
1587 .emit_pipeline_sync = uvd_v6_0_enc_ring_emit_pipeline_sync,
1588 .test_ring = uvd_v6_0_enc_ring_test_ring,
1589 .test_ib = uvd_v6_0_enc_ring_test_ib,
1590 .insert_nop = amdgpu_ring_insert_nop,
1591 .insert_end = uvd_v6_0_enc_ring_insert_end,
1592 .pad_ib = amdgpu_ring_generic_pad_ib,
1593 .begin_use = amdgpu_uvd_ring_begin_use,
1594 .end_use = amdgpu_uvd_ring_end_use,
1595};
1596
1597static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev)
1598{
1599 if (adev->asic_type >= CHIP_POLARIS10) {
1600 adev->uvd.inst->ring.funcs = &uvd_v6_0_ring_vm_funcs;
1601 DRM_INFO("UVD is enabled in VM mode\n");
1602 } else {
1603 adev->uvd.inst->ring.funcs = &uvd_v6_0_ring_phys_funcs;
1604 DRM_INFO("UVD is enabled in physical mode\n");
1605 }
1606}
1607
1608static void uvd_v6_0_set_enc_ring_funcs(struct amdgpu_device *adev)
1609{
1610 int i;
1611
1612 for (i = 0; i < adev->uvd.num_enc_rings; ++i)
1613 adev->uvd.inst->ring_enc[i].funcs = &uvd_v6_0_enc_ring_vm_funcs;
1614
1615 DRM_INFO("UVD ENC is enabled in VM mode\n");
1616}
1617
1618static const struct amdgpu_irq_src_funcs uvd_v6_0_irq_funcs = {
1619 .set = uvd_v6_0_set_interrupt_state,
1620 .process = uvd_v6_0_process_interrupt,
1621};
1622
1623static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev)
1624{
1625 if (uvd_v6_0_enc_support(adev))
1626 adev->uvd.inst->irq.num_types = adev->uvd.num_enc_rings + 1;
1627 else
1628 adev->uvd.inst->irq.num_types = 1;
1629
1630 adev->uvd.inst->irq.funcs = &uvd_v6_0_irq_funcs;
1631}
1632
1633const struct amdgpu_ip_block_version uvd_v6_0_ip_block =
1634{
1635 .type = AMD_IP_BLOCK_TYPE_UVD,
1636 .major = 6,
1637 .minor = 0,
1638 .rev = 0,
1639 .funcs = &uvd_v6_0_ip_funcs,
1640};
1641
1642const struct amdgpu_ip_block_version uvd_v6_2_ip_block =
1643{
1644 .type = AMD_IP_BLOCK_TYPE_UVD,
1645 .major = 6,
1646 .minor = 2,
1647 .rev = 0,
1648 .funcs = &uvd_v6_0_ip_funcs,
1649};
1650
1651const struct amdgpu_ip_block_version uvd_v6_3_ip_block =
1652{
1653 .type = AMD_IP_BLOCK_TYPE_UVD,
1654 .major = 6,
1655 .minor = 3,
1656 .rev = 0,
1657 .funcs = &uvd_v6_0_ip_funcs,
1658};
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Christian König <christian.koenig@amd.com>
23 */
24
25#include <linux/firmware.h>
26
27#include "amdgpu.h"
28#include "amdgpu_uvd.h"
29#include "vid.h"
30#include "uvd/uvd_6_0_d.h"
31#include "uvd/uvd_6_0_sh_mask.h"
32#include "oss/oss_2_0_d.h"
33#include "oss/oss_2_0_sh_mask.h"
34#include "smu/smu_7_1_3_d.h"
35#include "smu/smu_7_1_3_sh_mask.h"
36#include "bif/bif_5_1_d.h"
37#include "gmc/gmc_8_1_d.h"
38#include "vi.h"
39#include "ivsrcid/ivsrcid_vislands30.h"
40
41/* Polaris10/11/12 firmware version */
42#define FW_1_130_16 ((1 << 24) | (130 << 16) | (16 << 8))
43
44static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev);
45static void uvd_v6_0_set_enc_ring_funcs(struct amdgpu_device *adev);
46
47static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev);
48static int uvd_v6_0_start(struct amdgpu_device *adev);
49static void uvd_v6_0_stop(struct amdgpu_device *adev);
50static void uvd_v6_0_set_sw_clock_gating(struct amdgpu_device *adev);
51static int uvd_v6_0_set_clockgating_state(void *handle,
52 enum amd_clockgating_state state);
53static void uvd_v6_0_enable_mgcg(struct amdgpu_device *adev,
54 bool enable);
55
56/**
57* uvd_v6_0_enc_support - get encode support status
58*
59* @adev: amdgpu_device pointer
60*
61* Returns the current hardware encode support status
62*/
63static inline bool uvd_v6_0_enc_support(struct amdgpu_device *adev)
64{
65 return ((adev->asic_type >= CHIP_POLARIS10) &&
66 (adev->asic_type <= CHIP_VEGAM) &&
67 (!adev->uvd.fw_version || adev->uvd.fw_version >= FW_1_130_16));
68}
69
70/**
71 * uvd_v6_0_ring_get_rptr - get read pointer
72 *
73 * @ring: amdgpu_ring pointer
74 *
75 * Returns the current hardware read pointer
76 */
77static uint64_t uvd_v6_0_ring_get_rptr(struct amdgpu_ring *ring)
78{
79 struct amdgpu_device *adev = ring->adev;
80
81 return RREG32(mmUVD_RBC_RB_RPTR);
82}
83
84/**
85 * uvd_v6_0_enc_ring_get_rptr - get enc read pointer
86 *
87 * @ring: amdgpu_ring pointer
88 *
89 * Returns the current hardware enc read pointer
90 */
91static uint64_t uvd_v6_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
92{
93 struct amdgpu_device *adev = ring->adev;
94
95 if (ring == &adev->uvd.inst->ring_enc[0])
96 return RREG32(mmUVD_RB_RPTR);
97 else
98 return RREG32(mmUVD_RB_RPTR2);
99}
100/**
101 * uvd_v6_0_ring_get_wptr - get write pointer
102 *
103 * @ring: amdgpu_ring pointer
104 *
105 * Returns the current hardware write pointer
106 */
107static uint64_t uvd_v6_0_ring_get_wptr(struct amdgpu_ring *ring)
108{
109 struct amdgpu_device *adev = ring->adev;
110
111 return RREG32(mmUVD_RBC_RB_WPTR);
112}
113
114/**
115 * uvd_v6_0_enc_ring_get_wptr - get enc write pointer
116 *
117 * @ring: amdgpu_ring pointer
118 *
119 * Returns the current hardware enc write pointer
120 */
121static uint64_t uvd_v6_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
122{
123 struct amdgpu_device *adev = ring->adev;
124
125 if (ring == &adev->uvd.inst->ring_enc[0])
126 return RREG32(mmUVD_RB_WPTR);
127 else
128 return RREG32(mmUVD_RB_WPTR2);
129}
130
131/**
132 * uvd_v6_0_ring_set_wptr - set write pointer
133 *
134 * @ring: amdgpu_ring pointer
135 *
136 * Commits the write pointer to the hardware
137 */
138static void uvd_v6_0_ring_set_wptr(struct amdgpu_ring *ring)
139{
140 struct amdgpu_device *adev = ring->adev;
141
142 WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
143}
144
145/**
146 * uvd_v6_0_enc_ring_set_wptr - set enc write pointer
147 *
148 * @ring: amdgpu_ring pointer
149 *
150 * Commits the enc write pointer to the hardware
151 */
152static void uvd_v6_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
153{
154 struct amdgpu_device *adev = ring->adev;
155
156 if (ring == &adev->uvd.inst->ring_enc[0])
157 WREG32(mmUVD_RB_WPTR,
158 lower_32_bits(ring->wptr));
159 else
160 WREG32(mmUVD_RB_WPTR2,
161 lower_32_bits(ring->wptr));
162}
163
164/**
165 * uvd_v6_0_enc_ring_test_ring - test if UVD ENC ring is working
166 *
167 * @ring: the engine to test on
168 *
169 */
170static int uvd_v6_0_enc_ring_test_ring(struct amdgpu_ring *ring)
171{
172 struct amdgpu_device *adev = ring->adev;
173 uint32_t rptr;
174 unsigned i;
175 int r;
176
177 r = amdgpu_ring_alloc(ring, 16);
178 if (r)
179 return r;
180
181 rptr = amdgpu_ring_get_rptr(ring);
182
183 amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
184 amdgpu_ring_commit(ring);
185
186 for (i = 0; i < adev->usec_timeout; i++) {
187 if (amdgpu_ring_get_rptr(ring) != rptr)
188 break;
189 udelay(1);
190 }
191
192 if (i >= adev->usec_timeout)
193 r = -ETIMEDOUT;
194
195 return r;
196}
197
198/**
199 * uvd_v6_0_enc_get_create_msg - generate a UVD ENC create msg
200 *
201 * @ring: ring we should submit the msg to
202 * @handle: session handle to use
203 * @bo: amdgpu object for which we query the offset
204 * @fence: optional fence to return
205 *
206 * Open up a stream for HW test
207 */
208static int uvd_v6_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
209 struct amdgpu_bo *bo,
210 struct dma_fence **fence)
211{
212 const unsigned ib_size_dw = 16;
213 struct amdgpu_job *job;
214 struct amdgpu_ib *ib;
215 struct dma_fence *f = NULL;
216 uint64_t addr;
217 int i, r;
218
219 r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4,
220 AMDGPU_IB_POOL_DIRECT, &job);
221 if (r)
222 return r;
223
224 ib = &job->ibs[0];
225 addr = amdgpu_bo_gpu_offset(bo);
226
227 ib->length_dw = 0;
228 ib->ptr[ib->length_dw++] = 0x00000018;
229 ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
230 ib->ptr[ib->length_dw++] = handle;
231 ib->ptr[ib->length_dw++] = 0x00010000;
232 ib->ptr[ib->length_dw++] = upper_32_bits(addr);
233 ib->ptr[ib->length_dw++] = addr;
234
235 ib->ptr[ib->length_dw++] = 0x00000014;
236 ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
237 ib->ptr[ib->length_dw++] = 0x0000001c;
238 ib->ptr[ib->length_dw++] = 0x00000001;
239 ib->ptr[ib->length_dw++] = 0x00000000;
240
241 ib->ptr[ib->length_dw++] = 0x00000008;
242 ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */
243
244 for (i = ib->length_dw; i < ib_size_dw; ++i)
245 ib->ptr[i] = 0x0;
246
247 r = amdgpu_job_submit_direct(job, ring, &f);
248 if (r)
249 goto err;
250
251 if (fence)
252 *fence = dma_fence_get(f);
253 dma_fence_put(f);
254 return 0;
255
256err:
257 amdgpu_job_free(job);
258 return r;
259}
260
261/**
262 * uvd_v6_0_enc_get_destroy_msg - generate a UVD ENC destroy msg
263 *
264 * @ring: ring we should submit the msg to
265 * @handle: session handle to use
266 * @bo: amdgpu object for which we query the offset
267 * @fence: optional fence to return
268 *
269 * Close up a stream for HW test or if userspace failed to do so
270 */
271static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring,
272 uint32_t handle,
273 struct amdgpu_bo *bo,
274 struct dma_fence **fence)
275{
276 const unsigned ib_size_dw = 16;
277 struct amdgpu_job *job;
278 struct amdgpu_ib *ib;
279 struct dma_fence *f = NULL;
280 uint64_t addr;
281 int i, r;
282
283 r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4,
284 AMDGPU_IB_POOL_DIRECT, &job);
285 if (r)
286 return r;
287
288 ib = &job->ibs[0];
289 addr = amdgpu_bo_gpu_offset(bo);
290
291 ib->length_dw = 0;
292 ib->ptr[ib->length_dw++] = 0x00000018;
293 ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
294 ib->ptr[ib->length_dw++] = handle;
295 ib->ptr[ib->length_dw++] = 0x00010000;
296 ib->ptr[ib->length_dw++] = upper_32_bits(addr);
297 ib->ptr[ib->length_dw++] = addr;
298
299 ib->ptr[ib->length_dw++] = 0x00000014;
300 ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
301 ib->ptr[ib->length_dw++] = 0x0000001c;
302 ib->ptr[ib->length_dw++] = 0x00000001;
303 ib->ptr[ib->length_dw++] = 0x00000000;
304
305 ib->ptr[ib->length_dw++] = 0x00000008;
306 ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */
307
308 for (i = ib->length_dw; i < ib_size_dw; ++i)
309 ib->ptr[i] = 0x0;
310
311 r = amdgpu_job_submit_direct(job, ring, &f);
312 if (r)
313 goto err;
314
315 if (fence)
316 *fence = dma_fence_get(f);
317 dma_fence_put(f);
318 return 0;
319
320err:
321 amdgpu_job_free(job);
322 return r;
323}
324
325/**
326 * uvd_v6_0_enc_ring_test_ib - test if UVD ENC IBs are working
327 *
328 * @ring: the engine to test on
329 * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
330 *
331 */
332static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
333{
334 struct dma_fence *fence = NULL;
335 struct amdgpu_bo *bo = ring->adev->uvd.ib_bo;
336 long r;
337
338 r = uvd_v6_0_enc_get_create_msg(ring, 1, bo, NULL);
339 if (r)
340 goto error;
341
342 r = uvd_v6_0_enc_get_destroy_msg(ring, 1, bo, &fence);
343 if (r)
344 goto error;
345
346 r = dma_fence_wait_timeout(fence, false, timeout);
347 if (r == 0)
348 r = -ETIMEDOUT;
349 else if (r > 0)
350 r = 0;
351
352error:
353 dma_fence_put(fence);
354 return r;
355}
356
357static int uvd_v6_0_early_init(struct amdgpu_ip_block *ip_block)
358{
359 struct amdgpu_device *adev = ip_block->adev;
360 adev->uvd.num_uvd_inst = 1;
361
362 if (!(adev->flags & AMD_IS_APU) &&
363 (RREG32_SMC(ixCC_HARVEST_FUSES) & CC_HARVEST_FUSES__UVD_DISABLE_MASK))
364 return -ENOENT;
365
366 uvd_v6_0_set_ring_funcs(adev);
367
368 if (uvd_v6_0_enc_support(adev)) {
369 adev->uvd.num_enc_rings = 2;
370 uvd_v6_0_set_enc_ring_funcs(adev);
371 }
372
373 uvd_v6_0_set_irq_funcs(adev);
374
375 return 0;
376}
377
378static int uvd_v6_0_sw_init(struct amdgpu_ip_block *ip_block)
379{
380 struct amdgpu_ring *ring;
381 int i, r;
382 struct amdgpu_device *adev = ip_block->adev;
383
384 /* UVD TRAP */
385 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_UVD_SYSTEM_MESSAGE, &adev->uvd.inst->irq);
386 if (r)
387 return r;
388
389 /* UVD ENC TRAP */
390 if (uvd_v6_0_enc_support(adev)) {
391 for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
392 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + VISLANDS30_IV_SRCID_UVD_ENC_GEN_PURP, &adev->uvd.inst->irq);
393 if (r)
394 return r;
395 }
396 }
397
398 r = amdgpu_uvd_sw_init(adev);
399 if (r)
400 return r;
401
402 if (!uvd_v6_0_enc_support(adev)) {
403 for (i = 0; i < adev->uvd.num_enc_rings; ++i)
404 adev->uvd.inst->ring_enc[i].funcs = NULL;
405
406 adev->uvd.inst->irq.num_types = 1;
407 adev->uvd.num_enc_rings = 0;
408
409 DRM_INFO("UVD ENC is disabled\n");
410 }
411
412 ring = &adev->uvd.inst->ring;
413 sprintf(ring->name, "uvd");
414 r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0,
415 AMDGPU_RING_PRIO_DEFAULT, NULL);
416 if (r)
417 return r;
418
419 r = amdgpu_uvd_resume(adev);
420 if (r)
421 return r;
422
423 if (uvd_v6_0_enc_support(adev)) {
424 for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
425 ring = &adev->uvd.inst->ring_enc[i];
426 sprintf(ring->name, "uvd_enc%d", i);
427 r = amdgpu_ring_init(adev, ring, 512,
428 &adev->uvd.inst->irq, 0,
429 AMDGPU_RING_PRIO_DEFAULT, NULL);
430 if (r)
431 return r;
432 }
433 }
434
435 return r;
436}
437
438static int uvd_v6_0_sw_fini(struct amdgpu_ip_block *ip_block)
439{
440 int i, r;
441 struct amdgpu_device *adev = ip_block->adev;
442
443 r = amdgpu_uvd_suspend(adev);
444 if (r)
445 return r;
446
447 if (uvd_v6_0_enc_support(adev)) {
448 for (i = 0; i < adev->uvd.num_enc_rings; ++i)
449 amdgpu_ring_fini(&adev->uvd.inst->ring_enc[i]);
450 }
451
452 return amdgpu_uvd_sw_fini(adev);
453}
454
455/**
456 * uvd_v6_0_hw_init - start and test UVD block
457 *
458 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
459 *
460 * Initialize the hardware, boot up the VCPU and do some testing
461 */
462static int uvd_v6_0_hw_init(struct amdgpu_ip_block *ip_block)
463{
464 struct amdgpu_device *adev = ip_block->adev;
465 struct amdgpu_ring *ring = &adev->uvd.inst->ring;
466 uint32_t tmp;
467 int i, r;
468
469 amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
470 uvd_v6_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
471 uvd_v6_0_enable_mgcg(adev, true);
472
473 r = amdgpu_ring_test_helper(ring);
474 if (r)
475 goto done;
476
477 r = amdgpu_ring_alloc(ring, 10);
478 if (r) {
479 DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r);
480 goto done;
481 }
482
483 tmp = PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0);
484 amdgpu_ring_write(ring, tmp);
485 amdgpu_ring_write(ring, 0xFFFFF);
486
487 tmp = PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0);
488 amdgpu_ring_write(ring, tmp);
489 amdgpu_ring_write(ring, 0xFFFFF);
490
491 tmp = PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0);
492 amdgpu_ring_write(ring, tmp);
493 amdgpu_ring_write(ring, 0xFFFFF);
494
495 /* Clear timeout status bits */
496 amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0));
497 amdgpu_ring_write(ring, 0x8);
498
499 amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0));
500 amdgpu_ring_write(ring, 3);
501
502 amdgpu_ring_commit(ring);
503
504 if (uvd_v6_0_enc_support(adev)) {
505 for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
506 ring = &adev->uvd.inst->ring_enc[i];
507 r = amdgpu_ring_test_helper(ring);
508 if (r)
509 goto done;
510 }
511 }
512
513done:
514 if (!r) {
515 if (uvd_v6_0_enc_support(adev))
516 DRM_INFO("UVD and UVD ENC initialized successfully.\n");
517 else
518 DRM_INFO("UVD initialized successfully.\n");
519 }
520
521 return r;
522}
523
524/**
525 * uvd_v6_0_hw_fini - stop the hardware block
526 *
527 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
528 *
529 * Stop the UVD block, mark ring as not ready any more
530 */
531static int uvd_v6_0_hw_fini(struct amdgpu_ip_block *ip_block)
532{
533 struct amdgpu_device *adev = ip_block->adev;
534
535 cancel_delayed_work_sync(&adev->uvd.idle_work);
536
537 if (RREG32(mmUVD_STATUS) != 0)
538 uvd_v6_0_stop(adev);
539
540 return 0;
541}
542
543static int uvd_v6_0_prepare_suspend(struct amdgpu_ip_block *ip_block)
544{
545 struct amdgpu_device *adev = ip_block->adev;
546
547 return amdgpu_uvd_prepare_suspend(adev);
548}
549
550static int uvd_v6_0_suspend(struct amdgpu_ip_block *ip_block)
551{
552 int r;
553 struct amdgpu_device *adev = ip_block->adev;
554
555 /*
556 * Proper cleanups before halting the HW engine:
557 * - cancel the delayed idle work
558 * - enable powergating
559 * - enable clockgating
560 * - disable dpm
561 *
562 * TODO: to align with the VCN implementation, move the
563 * jobs for clockgating/powergating/dpm setting to
564 * ->set_powergating_state().
565 */
566 cancel_delayed_work_sync(&adev->uvd.idle_work);
567
568 if (adev->pm.dpm_enabled) {
569 amdgpu_dpm_enable_uvd(adev, false);
570 } else {
571 amdgpu_asic_set_uvd_clocks(adev, 0, 0);
572 /* shutdown the UVD block */
573 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
574 AMD_PG_STATE_GATE);
575 amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
576 AMD_CG_STATE_GATE);
577 }
578
579 r = uvd_v6_0_hw_fini(ip_block);
580 if (r)
581 return r;
582
583 return amdgpu_uvd_suspend(adev);
584}
585
586static int uvd_v6_0_resume(struct amdgpu_ip_block *ip_block)
587{
588 int r;
589
590 r = amdgpu_uvd_resume(ip_block->adev);
591 if (r)
592 return r;
593
594 return uvd_v6_0_hw_init(ip_block);
595}
596
597/**
598 * uvd_v6_0_mc_resume - memory controller programming
599 *
600 * @adev: amdgpu_device pointer
601 *
602 * Let the UVD memory controller know it's offsets
603 */
604static void uvd_v6_0_mc_resume(struct amdgpu_device *adev)
605{
606 uint64_t offset;
607 uint32_t size;
608
609 /* program memory controller bits 0-27 */
610 WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
611 lower_32_bits(adev->uvd.inst->gpu_addr));
612 WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
613 upper_32_bits(adev->uvd.inst->gpu_addr));
614
615 offset = AMDGPU_UVD_FIRMWARE_OFFSET;
616 size = AMDGPU_UVD_FIRMWARE_SIZE(adev);
617 WREG32(mmUVD_VCPU_CACHE_OFFSET0, offset >> 3);
618 WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
619
620 offset += size;
621 size = AMDGPU_UVD_HEAP_SIZE;
622 WREG32(mmUVD_VCPU_CACHE_OFFSET1, offset >> 3);
623 WREG32(mmUVD_VCPU_CACHE_SIZE1, size);
624
625 offset += size;
626 size = AMDGPU_UVD_STACK_SIZE +
627 (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles);
628 WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3);
629 WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
630
631 WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
632 WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
633 WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
634
635 WREG32(mmUVD_GP_SCRATCH4, adev->uvd.max_handles);
636}
637
638#if 0
639static void cz_set_uvd_clock_gating_branches(struct amdgpu_device *adev,
640 bool enable)
641{
642 u32 data, data1;
643
644 data = RREG32(mmUVD_CGC_GATE);
645 data1 = RREG32(mmUVD_SUVD_CGC_GATE);
646 if (enable) {
647 data |= UVD_CGC_GATE__SYS_MASK |
648 UVD_CGC_GATE__UDEC_MASK |
649 UVD_CGC_GATE__MPEG2_MASK |
650 UVD_CGC_GATE__RBC_MASK |
651 UVD_CGC_GATE__LMI_MC_MASK |
652 UVD_CGC_GATE__IDCT_MASK |
653 UVD_CGC_GATE__MPRD_MASK |
654 UVD_CGC_GATE__MPC_MASK |
655 UVD_CGC_GATE__LBSI_MASK |
656 UVD_CGC_GATE__LRBBM_MASK |
657 UVD_CGC_GATE__UDEC_RE_MASK |
658 UVD_CGC_GATE__UDEC_CM_MASK |
659 UVD_CGC_GATE__UDEC_IT_MASK |
660 UVD_CGC_GATE__UDEC_DB_MASK |
661 UVD_CGC_GATE__UDEC_MP_MASK |
662 UVD_CGC_GATE__WCB_MASK |
663 UVD_CGC_GATE__VCPU_MASK |
664 UVD_CGC_GATE__SCPU_MASK;
665 data1 |= UVD_SUVD_CGC_GATE__SRE_MASK |
666 UVD_SUVD_CGC_GATE__SIT_MASK |
667 UVD_SUVD_CGC_GATE__SMP_MASK |
668 UVD_SUVD_CGC_GATE__SCM_MASK |
669 UVD_SUVD_CGC_GATE__SDB_MASK |
670 UVD_SUVD_CGC_GATE__SRE_H264_MASK |
671 UVD_SUVD_CGC_GATE__SRE_HEVC_MASK |
672 UVD_SUVD_CGC_GATE__SIT_H264_MASK |
673 UVD_SUVD_CGC_GATE__SIT_HEVC_MASK |
674 UVD_SUVD_CGC_GATE__SCM_H264_MASK |
675 UVD_SUVD_CGC_GATE__SCM_HEVC_MASK |
676 UVD_SUVD_CGC_GATE__SDB_H264_MASK |
677 UVD_SUVD_CGC_GATE__SDB_HEVC_MASK;
678 } else {
679 data &= ~(UVD_CGC_GATE__SYS_MASK |
680 UVD_CGC_GATE__UDEC_MASK |
681 UVD_CGC_GATE__MPEG2_MASK |
682 UVD_CGC_GATE__RBC_MASK |
683 UVD_CGC_GATE__LMI_MC_MASK |
684 UVD_CGC_GATE__LMI_UMC_MASK |
685 UVD_CGC_GATE__IDCT_MASK |
686 UVD_CGC_GATE__MPRD_MASK |
687 UVD_CGC_GATE__MPC_MASK |
688 UVD_CGC_GATE__LBSI_MASK |
689 UVD_CGC_GATE__LRBBM_MASK |
690 UVD_CGC_GATE__UDEC_RE_MASK |
691 UVD_CGC_GATE__UDEC_CM_MASK |
692 UVD_CGC_GATE__UDEC_IT_MASK |
693 UVD_CGC_GATE__UDEC_DB_MASK |
694 UVD_CGC_GATE__UDEC_MP_MASK |
695 UVD_CGC_GATE__WCB_MASK |
696 UVD_CGC_GATE__VCPU_MASK |
697 UVD_CGC_GATE__SCPU_MASK);
698 data1 &= ~(UVD_SUVD_CGC_GATE__SRE_MASK |
699 UVD_SUVD_CGC_GATE__SIT_MASK |
700 UVD_SUVD_CGC_GATE__SMP_MASK |
701 UVD_SUVD_CGC_GATE__SCM_MASK |
702 UVD_SUVD_CGC_GATE__SDB_MASK |
703 UVD_SUVD_CGC_GATE__SRE_H264_MASK |
704 UVD_SUVD_CGC_GATE__SRE_HEVC_MASK |
705 UVD_SUVD_CGC_GATE__SIT_H264_MASK |
706 UVD_SUVD_CGC_GATE__SIT_HEVC_MASK |
707 UVD_SUVD_CGC_GATE__SCM_H264_MASK |
708 UVD_SUVD_CGC_GATE__SCM_HEVC_MASK |
709 UVD_SUVD_CGC_GATE__SDB_H264_MASK |
710 UVD_SUVD_CGC_GATE__SDB_HEVC_MASK);
711 }
712 WREG32(mmUVD_CGC_GATE, data);
713 WREG32(mmUVD_SUVD_CGC_GATE, data1);
714}
715#endif
716
717/**
718 * uvd_v6_0_start - start UVD block
719 *
720 * @adev: amdgpu_device pointer
721 *
722 * Setup and start the UVD block
723 */
724static int uvd_v6_0_start(struct amdgpu_device *adev)
725{
726 struct amdgpu_ring *ring = &adev->uvd.inst->ring;
727 uint32_t rb_bufsz, tmp;
728 uint32_t lmi_swap_cntl;
729 uint32_t mp_swap_cntl;
730 int i, j, r;
731
732 /* disable DPG */
733 WREG32_P(mmUVD_POWER_STATUS, 0, ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
734
735 /* disable byte swapping */
736 lmi_swap_cntl = 0;
737 mp_swap_cntl = 0;
738
739 uvd_v6_0_mc_resume(adev);
740
741 /* disable interupt */
742 WREG32_FIELD(UVD_MASTINT_EN, VCPU_EN, 0);
743
744 /* stall UMC and register bus before resetting VCPU */
745 WREG32_FIELD(UVD_LMI_CTRL2, STALL_ARB_UMC, 1);
746 mdelay(1);
747
748 /* put LMI, VCPU, RBC etc... into reset */
749 WREG32(mmUVD_SOFT_RESET,
750 UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
751 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
752 UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
753 UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
754 UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
755 UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
756 UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
757 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
758 mdelay(5);
759
760 /* take UVD block out of reset */
761 WREG32_FIELD(SRBM_SOFT_RESET, SOFT_RESET_UVD, 0);
762 mdelay(5);
763
764 /* initialize UVD memory controller */
765 WREG32(mmUVD_LMI_CTRL,
766 (0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
767 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
768 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
769 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
770 UVD_LMI_CTRL__REQ_MODE_MASK |
771 UVD_LMI_CTRL__DISABLE_ON_FWV_FAIL_MASK);
772
773#ifdef __BIG_ENDIAN
774 /* swap (8 in 32) RB and IB */
775 lmi_swap_cntl = 0xa;
776 mp_swap_cntl = 0;
777#endif
778 WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
779 WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
780
781 WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040);
782 WREG32(mmUVD_MPC_SET_MUXA1, 0x0);
783 WREG32(mmUVD_MPC_SET_MUXB0, 0x40c2040);
784 WREG32(mmUVD_MPC_SET_MUXB1, 0x0);
785 WREG32(mmUVD_MPC_SET_ALU, 0);
786 WREG32(mmUVD_MPC_SET_MUX, 0x88);
787
788 /* take all subblocks out of reset, except VCPU */
789 WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
790 mdelay(5);
791
792 /* enable VCPU clock */
793 WREG32(mmUVD_VCPU_CNTL, UVD_VCPU_CNTL__CLK_EN_MASK);
794
795 /* enable UMC */
796 WREG32_FIELD(UVD_LMI_CTRL2, STALL_ARB_UMC, 0);
797
798 /* boot up the VCPU */
799 WREG32(mmUVD_SOFT_RESET, 0);
800 mdelay(10);
801
802 for (i = 0; i < 10; ++i) {
803 uint32_t status;
804
805 for (j = 0; j < 100; ++j) {
806 status = RREG32(mmUVD_STATUS);
807 if (status & 2)
808 break;
809 mdelay(10);
810 }
811 r = 0;
812 if (status & 2)
813 break;
814
815 DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
816 WREG32_FIELD(UVD_SOFT_RESET, VCPU_SOFT_RESET, 1);
817 mdelay(10);
818 WREG32_FIELD(UVD_SOFT_RESET, VCPU_SOFT_RESET, 0);
819 mdelay(10);
820 r = -1;
821 }
822
823 if (r) {
824 DRM_ERROR("UVD not responding, giving up!!!\n");
825 return r;
826 }
827 /* enable master interrupt */
828 WREG32_P(mmUVD_MASTINT_EN,
829 (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
830 ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
831
832 /* clear the bit 4 of UVD_STATUS */
833 WREG32_P(mmUVD_STATUS, 0, ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
834
835 /* force RBC into idle state */
836 rb_bufsz = order_base_2(ring->ring_size);
837 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
838 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
839 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
840 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
841 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
842 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
843 WREG32(mmUVD_RBC_RB_CNTL, tmp);
844
845 /* set the write pointer delay */
846 WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0);
847
848 /* set the wb address */
849 WREG32(mmUVD_RBC_RB_RPTR_ADDR, (upper_32_bits(ring->gpu_addr) >> 2));
850
851 /* program the RB_BASE for ring buffer */
852 WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
853 lower_32_bits(ring->gpu_addr));
854 WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
855 upper_32_bits(ring->gpu_addr));
856
857 /* Initialize the ring buffer's read and write pointers */
858 WREG32(mmUVD_RBC_RB_RPTR, 0);
859
860 ring->wptr = RREG32(mmUVD_RBC_RB_RPTR);
861 WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
862
863 WREG32_FIELD(UVD_RBC_RB_CNTL, RB_NO_FETCH, 0);
864
865 if (uvd_v6_0_enc_support(adev)) {
866 ring = &adev->uvd.inst->ring_enc[0];
867 WREG32(mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
868 WREG32(mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
869 WREG32(mmUVD_RB_BASE_LO, ring->gpu_addr);
870 WREG32(mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
871 WREG32(mmUVD_RB_SIZE, ring->ring_size / 4);
872
873 ring = &adev->uvd.inst->ring_enc[1];
874 WREG32(mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
875 WREG32(mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
876 WREG32(mmUVD_RB_BASE_LO2, ring->gpu_addr);
877 WREG32(mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
878 WREG32(mmUVD_RB_SIZE2, ring->ring_size / 4);
879 }
880
881 return 0;
882}
883
884/**
885 * uvd_v6_0_stop - stop UVD block
886 *
887 * @adev: amdgpu_device pointer
888 *
889 * stop the UVD block
890 */
891static void uvd_v6_0_stop(struct amdgpu_device *adev)
892{
893 /* force RBC into idle state */
894 WREG32(mmUVD_RBC_RB_CNTL, 0x11010101);
895
896 /* Stall UMC and register bus before resetting VCPU */
897 WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
898 mdelay(1);
899
900 /* put VCPU into reset */
901 WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
902 mdelay(5);
903
904 /* disable VCPU clock */
905 WREG32(mmUVD_VCPU_CNTL, 0x0);
906
907 /* Unstall UMC and register bus */
908 WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
909
910 WREG32(mmUVD_STATUS, 0);
911}
912
913/**
914 * uvd_v6_0_ring_emit_fence - emit an fence & trap command
915 *
916 * @ring: amdgpu_ring pointer
917 * @addr: address
918 * @seq: sequence number
919 * @flags: fence related flags
920 *
921 * Write a fence and a trap command to the ring.
922 */
923static void uvd_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
924 unsigned flags)
925{
926 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
927
928 amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
929 amdgpu_ring_write(ring, seq);
930 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
931 amdgpu_ring_write(ring, addr & 0xffffffff);
932 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
933 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
934 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
935 amdgpu_ring_write(ring, 0);
936
937 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
938 amdgpu_ring_write(ring, 0);
939 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
940 amdgpu_ring_write(ring, 0);
941 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
942 amdgpu_ring_write(ring, 2);
943}
944
945/**
946 * uvd_v6_0_enc_ring_emit_fence - emit an enc fence & trap command
947 *
948 * @ring: amdgpu_ring pointer
949 * @addr: address
950 * @seq: sequence number
951 * @flags: fence related flags
952 *
953 * Write enc a fence and a trap command to the ring.
954 */
955static void uvd_v6_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
956 u64 seq, unsigned flags)
957{
958 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
959
960 amdgpu_ring_write(ring, HEVC_ENC_CMD_FENCE);
961 amdgpu_ring_write(ring, addr);
962 amdgpu_ring_write(ring, upper_32_bits(addr));
963 amdgpu_ring_write(ring, seq);
964 amdgpu_ring_write(ring, HEVC_ENC_CMD_TRAP);
965}
966
967/**
968 * uvd_v6_0_ring_emit_hdp_flush - skip HDP flushing
969 *
970 * @ring: amdgpu_ring pointer
971 */
972static void uvd_v6_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
973{
974 /* The firmware doesn't seem to like touching registers at this point. */
975}
976
977/**
978 * uvd_v6_0_ring_test_ring - register write test
979 *
980 * @ring: amdgpu_ring pointer
981 *
982 * Test if we can successfully write to the context register
983 */
984static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring)
985{
986 struct amdgpu_device *adev = ring->adev;
987 uint32_t tmp = 0;
988 unsigned i;
989 int r;
990
991 WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD);
992 r = amdgpu_ring_alloc(ring, 3);
993 if (r)
994 return r;
995
996 amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
997 amdgpu_ring_write(ring, 0xDEADBEEF);
998 amdgpu_ring_commit(ring);
999 for (i = 0; i < adev->usec_timeout; i++) {
1000 tmp = RREG32(mmUVD_CONTEXT_ID);
1001 if (tmp == 0xDEADBEEF)
1002 break;
1003 udelay(1);
1004 }
1005
1006 if (i >= adev->usec_timeout)
1007 r = -ETIMEDOUT;
1008
1009 return r;
1010}
1011
1012/**
1013 * uvd_v6_0_ring_emit_ib - execute indirect buffer
1014 *
1015 * @ring: amdgpu_ring pointer
1016 * @job: job to retrieve vmid from
1017 * @ib: indirect buffer to execute
1018 * @flags: unused
1019 *
1020 * Write ring commands to execute the indirect buffer
1021 */
1022static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
1023 struct amdgpu_job *job,
1024 struct amdgpu_ib *ib,
1025 uint32_t flags)
1026{
1027 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1028
1029 amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_VMID, 0));
1030 amdgpu_ring_write(ring, vmid);
1031
1032 amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0));
1033 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1034 amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH, 0));
1035 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1036 amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0));
1037 amdgpu_ring_write(ring, ib->length_dw);
1038}
1039
1040/**
1041 * uvd_v6_0_enc_ring_emit_ib - enc execute indirect buffer
1042 *
1043 * @ring: amdgpu_ring pointer
1044 * @job: job to retrive vmid from
1045 * @ib: indirect buffer to execute
1046 * @flags: unused
1047 *
1048 * Write enc ring commands to execute the indirect buffer
1049 */
1050static void uvd_v6_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
1051 struct amdgpu_job *job,
1052 struct amdgpu_ib *ib,
1053 uint32_t flags)
1054{
1055 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1056
1057 amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM);
1058 amdgpu_ring_write(ring, vmid);
1059 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1060 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1061 amdgpu_ring_write(ring, ib->length_dw);
1062}
1063
1064static void uvd_v6_0_ring_emit_wreg(struct amdgpu_ring *ring,
1065 uint32_t reg, uint32_t val)
1066{
1067 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
1068 amdgpu_ring_write(ring, reg << 2);
1069 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
1070 amdgpu_ring_write(ring, val);
1071 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
1072 amdgpu_ring_write(ring, 0x8);
1073}
1074
1075static void uvd_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
1076 unsigned vmid, uint64_t pd_addr)
1077{
1078 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1079
1080 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
1081 amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
1082 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
1083 amdgpu_ring_write(ring, 0);
1084 amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0));
1085 amdgpu_ring_write(ring, 1 << vmid); /* mask */
1086 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
1087 amdgpu_ring_write(ring, 0xC);
1088}
1089
1090static void uvd_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
1091{
1092 uint32_t seq = ring->fence_drv.sync_seq;
1093 uint64_t addr = ring->fence_drv.gpu_addr;
1094
1095 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
1096 amdgpu_ring_write(ring, lower_32_bits(addr));
1097 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
1098 amdgpu_ring_write(ring, upper_32_bits(addr));
1099 amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0));
1100 amdgpu_ring_write(ring, 0xffffffff); /* mask */
1101 amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH9, 0));
1102 amdgpu_ring_write(ring, seq);
1103 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
1104 amdgpu_ring_write(ring, 0xE);
1105}
1106
1107static void uvd_v6_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
1108{
1109 int i;
1110
1111 WARN_ON(ring->wptr % 2 || count % 2);
1112
1113 for (i = 0; i < count / 2; i++) {
1114 amdgpu_ring_write(ring, PACKET0(mmUVD_NO_OP, 0));
1115 amdgpu_ring_write(ring, 0);
1116 }
1117}
1118
1119static void uvd_v6_0_enc_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
1120{
1121 uint32_t seq = ring->fence_drv.sync_seq;
1122 uint64_t addr = ring->fence_drv.gpu_addr;
1123
1124 amdgpu_ring_write(ring, HEVC_ENC_CMD_WAIT_GE);
1125 amdgpu_ring_write(ring, lower_32_bits(addr));
1126 amdgpu_ring_write(ring, upper_32_bits(addr));
1127 amdgpu_ring_write(ring, seq);
1128}
1129
1130static void uvd_v6_0_enc_ring_insert_end(struct amdgpu_ring *ring)
1131{
1132 amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
1133}
1134
1135static void uvd_v6_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
1136 unsigned int vmid, uint64_t pd_addr)
1137{
1138 amdgpu_ring_write(ring, HEVC_ENC_CMD_UPDATE_PTB);
1139 amdgpu_ring_write(ring, vmid);
1140 amdgpu_ring_write(ring, pd_addr >> 12);
1141
1142 amdgpu_ring_write(ring, HEVC_ENC_CMD_FLUSH_TLB);
1143 amdgpu_ring_write(ring, vmid);
1144}
1145
1146static bool uvd_v6_0_is_idle(void *handle)
1147{
1148 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1149
1150 return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
1151}
1152
1153static int uvd_v6_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
1154{
1155 unsigned i;
1156 struct amdgpu_device *adev = ip_block->adev;
1157
1158 for (i = 0; i < adev->usec_timeout; i++) {
1159 if (uvd_v6_0_is_idle(adev))
1160 return 0;
1161 }
1162 return -ETIMEDOUT;
1163}
1164
1165#define AMDGPU_UVD_STATUS_BUSY_MASK 0xfd
1166static bool uvd_v6_0_check_soft_reset(struct amdgpu_ip_block *ip_block)
1167{
1168 struct amdgpu_device *adev = ip_block->adev;
1169 u32 srbm_soft_reset = 0;
1170 u32 tmp = RREG32(mmSRBM_STATUS);
1171
1172 if (REG_GET_FIELD(tmp, SRBM_STATUS, UVD_RQ_PENDING) ||
1173 REG_GET_FIELD(tmp, SRBM_STATUS, UVD_BUSY) ||
1174 (RREG32(mmUVD_STATUS) & AMDGPU_UVD_STATUS_BUSY_MASK))
1175 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_UVD, 1);
1176
1177 if (srbm_soft_reset) {
1178 adev->uvd.inst->srbm_soft_reset = srbm_soft_reset;
1179 return true;
1180 } else {
1181 adev->uvd.inst->srbm_soft_reset = 0;
1182 return false;
1183 }
1184}
1185
1186static int uvd_v6_0_pre_soft_reset(struct amdgpu_ip_block *ip_block)
1187{
1188 struct amdgpu_device *adev = ip_block->adev;
1189
1190 if (!adev->uvd.inst->srbm_soft_reset)
1191 return 0;
1192
1193 uvd_v6_0_stop(adev);
1194 return 0;
1195}
1196
1197static int uvd_v6_0_soft_reset(struct amdgpu_ip_block *ip_block)
1198{
1199 struct amdgpu_device *adev = ip_block->adev;
1200 u32 srbm_soft_reset;
1201
1202 if (!adev->uvd.inst->srbm_soft_reset)
1203 return 0;
1204 srbm_soft_reset = adev->uvd.inst->srbm_soft_reset;
1205
1206 if (srbm_soft_reset) {
1207 u32 tmp;
1208
1209 tmp = RREG32(mmSRBM_SOFT_RESET);
1210 tmp |= srbm_soft_reset;
1211 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1212 WREG32(mmSRBM_SOFT_RESET, tmp);
1213 tmp = RREG32(mmSRBM_SOFT_RESET);
1214
1215 udelay(50);
1216
1217 tmp &= ~srbm_soft_reset;
1218 WREG32(mmSRBM_SOFT_RESET, tmp);
1219 tmp = RREG32(mmSRBM_SOFT_RESET);
1220
1221 /* Wait a little for things to settle down */
1222 udelay(50);
1223 }
1224
1225 return 0;
1226}
1227
1228static int uvd_v6_0_post_soft_reset(struct amdgpu_ip_block *ip_block)
1229{
1230 struct amdgpu_device *adev = ip_block->adev;
1231
1232 if (!adev->uvd.inst->srbm_soft_reset)
1233 return 0;
1234
1235 mdelay(5);
1236
1237 return uvd_v6_0_start(adev);
1238}
1239
1240static int uvd_v6_0_set_interrupt_state(struct amdgpu_device *adev,
1241 struct amdgpu_irq_src *source,
1242 unsigned type,
1243 enum amdgpu_interrupt_state state)
1244{
1245 // TODO
1246 return 0;
1247}
1248
1249static int uvd_v6_0_process_interrupt(struct amdgpu_device *adev,
1250 struct amdgpu_irq_src *source,
1251 struct amdgpu_iv_entry *entry)
1252{
1253 bool int_handled = true;
1254 DRM_DEBUG("IH: UVD TRAP\n");
1255
1256 switch (entry->src_id) {
1257 case 124:
1258 amdgpu_fence_process(&adev->uvd.inst->ring);
1259 break;
1260 case 119:
1261 if (likely(uvd_v6_0_enc_support(adev)))
1262 amdgpu_fence_process(&adev->uvd.inst->ring_enc[0]);
1263 else
1264 int_handled = false;
1265 break;
1266 case 120:
1267 if (likely(uvd_v6_0_enc_support(adev)))
1268 amdgpu_fence_process(&adev->uvd.inst->ring_enc[1]);
1269 else
1270 int_handled = false;
1271 break;
1272 }
1273
1274 if (!int_handled)
1275 DRM_ERROR("Unhandled interrupt: %d %d\n",
1276 entry->src_id, entry->src_data[0]);
1277
1278 return 0;
1279}
1280
1281static void uvd_v6_0_enable_clock_gating(struct amdgpu_device *adev, bool enable)
1282{
1283 uint32_t data1, data3;
1284
1285 data1 = RREG32(mmUVD_SUVD_CGC_GATE);
1286 data3 = RREG32(mmUVD_CGC_GATE);
1287
1288 data1 |= UVD_SUVD_CGC_GATE__SRE_MASK |
1289 UVD_SUVD_CGC_GATE__SIT_MASK |
1290 UVD_SUVD_CGC_GATE__SMP_MASK |
1291 UVD_SUVD_CGC_GATE__SCM_MASK |
1292 UVD_SUVD_CGC_GATE__SDB_MASK |
1293 UVD_SUVD_CGC_GATE__SRE_H264_MASK |
1294 UVD_SUVD_CGC_GATE__SRE_HEVC_MASK |
1295 UVD_SUVD_CGC_GATE__SIT_H264_MASK |
1296 UVD_SUVD_CGC_GATE__SIT_HEVC_MASK |
1297 UVD_SUVD_CGC_GATE__SCM_H264_MASK |
1298 UVD_SUVD_CGC_GATE__SCM_HEVC_MASK |
1299 UVD_SUVD_CGC_GATE__SDB_H264_MASK |
1300 UVD_SUVD_CGC_GATE__SDB_HEVC_MASK;
1301
1302 if (enable) {
1303 data3 |= (UVD_CGC_GATE__SYS_MASK |
1304 UVD_CGC_GATE__UDEC_MASK |
1305 UVD_CGC_GATE__MPEG2_MASK |
1306 UVD_CGC_GATE__RBC_MASK |
1307 UVD_CGC_GATE__LMI_MC_MASK |
1308 UVD_CGC_GATE__LMI_UMC_MASK |
1309 UVD_CGC_GATE__IDCT_MASK |
1310 UVD_CGC_GATE__MPRD_MASK |
1311 UVD_CGC_GATE__MPC_MASK |
1312 UVD_CGC_GATE__LBSI_MASK |
1313 UVD_CGC_GATE__LRBBM_MASK |
1314 UVD_CGC_GATE__UDEC_RE_MASK |
1315 UVD_CGC_GATE__UDEC_CM_MASK |
1316 UVD_CGC_GATE__UDEC_IT_MASK |
1317 UVD_CGC_GATE__UDEC_DB_MASK |
1318 UVD_CGC_GATE__UDEC_MP_MASK |
1319 UVD_CGC_GATE__WCB_MASK |
1320 UVD_CGC_GATE__JPEG_MASK |
1321 UVD_CGC_GATE__SCPU_MASK |
1322 UVD_CGC_GATE__JPEG2_MASK);
1323 /* only in pg enabled, we can gate clock to vcpu*/
1324 if (adev->pg_flags & AMD_PG_SUPPORT_UVD)
1325 data3 |= UVD_CGC_GATE__VCPU_MASK;
1326
1327 data3 &= ~UVD_CGC_GATE__REGS_MASK;
1328 } else {
1329 data3 = 0;
1330 }
1331
1332 WREG32(mmUVD_SUVD_CGC_GATE, data1);
1333 WREG32(mmUVD_CGC_GATE, data3);
1334}
1335
1336static void uvd_v6_0_set_sw_clock_gating(struct amdgpu_device *adev)
1337{
1338 uint32_t data, data2;
1339
1340 data = RREG32(mmUVD_CGC_CTRL);
1341 data2 = RREG32(mmUVD_SUVD_CGC_CTRL);
1342
1343
1344 data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK |
1345 UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
1346
1347
1348 data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
1349 (1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) |
1350 (4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY));
1351
1352 data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
1353 UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
1354 UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
1355 UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
1356 UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
1357 UVD_CGC_CTRL__SYS_MODE_MASK |
1358 UVD_CGC_CTRL__UDEC_MODE_MASK |
1359 UVD_CGC_CTRL__MPEG2_MODE_MASK |
1360 UVD_CGC_CTRL__REGS_MODE_MASK |
1361 UVD_CGC_CTRL__RBC_MODE_MASK |
1362 UVD_CGC_CTRL__LMI_MC_MODE_MASK |
1363 UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
1364 UVD_CGC_CTRL__IDCT_MODE_MASK |
1365 UVD_CGC_CTRL__MPRD_MODE_MASK |
1366 UVD_CGC_CTRL__MPC_MODE_MASK |
1367 UVD_CGC_CTRL__LBSI_MODE_MASK |
1368 UVD_CGC_CTRL__LRBBM_MODE_MASK |
1369 UVD_CGC_CTRL__WCB_MODE_MASK |
1370 UVD_CGC_CTRL__VCPU_MODE_MASK |
1371 UVD_CGC_CTRL__JPEG_MODE_MASK |
1372 UVD_CGC_CTRL__SCPU_MODE_MASK |
1373 UVD_CGC_CTRL__JPEG2_MODE_MASK);
1374 data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK |
1375 UVD_SUVD_CGC_CTRL__SIT_MODE_MASK |
1376 UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
1377 UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
1378 UVD_SUVD_CGC_CTRL__SDB_MODE_MASK);
1379
1380 WREG32(mmUVD_CGC_CTRL, data);
1381 WREG32(mmUVD_SUVD_CGC_CTRL, data2);
1382}
1383
1384#if 0
1385static void uvd_v6_0_set_hw_clock_gating(struct amdgpu_device *adev)
1386{
1387 uint32_t data, data1, cgc_flags, suvd_flags;
1388
1389 data = RREG32(mmUVD_CGC_GATE);
1390 data1 = RREG32(mmUVD_SUVD_CGC_GATE);
1391
1392 cgc_flags = UVD_CGC_GATE__SYS_MASK |
1393 UVD_CGC_GATE__UDEC_MASK |
1394 UVD_CGC_GATE__MPEG2_MASK |
1395 UVD_CGC_GATE__RBC_MASK |
1396 UVD_CGC_GATE__LMI_MC_MASK |
1397 UVD_CGC_GATE__IDCT_MASK |
1398 UVD_CGC_GATE__MPRD_MASK |
1399 UVD_CGC_GATE__MPC_MASK |
1400 UVD_CGC_GATE__LBSI_MASK |
1401 UVD_CGC_GATE__LRBBM_MASK |
1402 UVD_CGC_GATE__UDEC_RE_MASK |
1403 UVD_CGC_GATE__UDEC_CM_MASK |
1404 UVD_CGC_GATE__UDEC_IT_MASK |
1405 UVD_CGC_GATE__UDEC_DB_MASK |
1406 UVD_CGC_GATE__UDEC_MP_MASK |
1407 UVD_CGC_GATE__WCB_MASK |
1408 UVD_CGC_GATE__VCPU_MASK |
1409 UVD_CGC_GATE__SCPU_MASK |
1410 UVD_CGC_GATE__JPEG_MASK |
1411 UVD_CGC_GATE__JPEG2_MASK;
1412
1413 suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
1414 UVD_SUVD_CGC_GATE__SIT_MASK |
1415 UVD_SUVD_CGC_GATE__SMP_MASK |
1416 UVD_SUVD_CGC_GATE__SCM_MASK |
1417 UVD_SUVD_CGC_GATE__SDB_MASK;
1418
1419 data |= cgc_flags;
1420 data1 |= suvd_flags;
1421
1422 WREG32(mmUVD_CGC_GATE, data);
1423 WREG32(mmUVD_SUVD_CGC_GATE, data1);
1424}
1425#endif
1426
1427static void uvd_v6_0_enable_mgcg(struct amdgpu_device *adev,
1428 bool enable)
1429{
1430 u32 orig, data;
1431
1432 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) {
1433 data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
1434 data |= 0xfff;
1435 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
1436
1437 orig = data = RREG32(mmUVD_CGC_CTRL);
1438 data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
1439 if (orig != data)
1440 WREG32(mmUVD_CGC_CTRL, data);
1441 } else {
1442 data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
1443 data &= ~0xfff;
1444 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
1445
1446 orig = data = RREG32(mmUVD_CGC_CTRL);
1447 data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
1448 if (orig != data)
1449 WREG32(mmUVD_CGC_CTRL, data);
1450 }
1451}
1452
1453static int uvd_v6_0_set_clockgating_state(void *handle,
1454 enum amd_clockgating_state state)
1455{
1456 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1457 struct amdgpu_ip_block *ip_block;
1458 bool enable = (state == AMD_CG_STATE_GATE);
1459
1460 ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_UVD);
1461 if (!ip_block)
1462 return -EINVAL;
1463
1464 if (enable) {
1465 /* wait for STATUS to clear */
1466 if (uvd_v6_0_wait_for_idle(ip_block))
1467 return -EBUSY;
1468 uvd_v6_0_enable_clock_gating(adev, true);
1469 /* enable HW gates because UVD is idle */
1470/* uvd_v6_0_set_hw_clock_gating(adev); */
1471 } else {
1472 /* disable HW gating and enable Sw gating */
1473 uvd_v6_0_enable_clock_gating(adev, false);
1474 }
1475 uvd_v6_0_set_sw_clock_gating(adev);
1476 return 0;
1477}
1478
1479static int uvd_v6_0_set_powergating_state(void *handle,
1480 enum amd_powergating_state state)
1481{
1482 /* This doesn't actually powergate the UVD block.
1483 * That's done in the dpm code via the SMC. This
1484 * just re-inits the block as necessary. The actual
1485 * gating still happens in the dpm code. We should
1486 * revisit this when there is a cleaner line between
1487 * the smc and the hw blocks
1488 */
1489 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1490 int ret = 0;
1491
1492 WREG32(mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK);
1493
1494 if (state == AMD_PG_STATE_GATE) {
1495 uvd_v6_0_stop(adev);
1496 } else {
1497 ret = uvd_v6_0_start(adev);
1498 if (ret)
1499 goto out;
1500 }
1501
1502out:
1503 return ret;
1504}
1505
1506static void uvd_v6_0_get_clockgating_state(void *handle, u64 *flags)
1507{
1508 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1509 int data;
1510
1511 mutex_lock(&adev->pm.mutex);
1512
1513 if (adev->flags & AMD_IS_APU)
1514 data = RREG32_SMC(ixCURRENT_PG_STATUS_APU);
1515 else
1516 data = RREG32_SMC(ixCURRENT_PG_STATUS);
1517
1518 if (data & CURRENT_PG_STATUS__UVD_PG_STATUS_MASK) {
1519 DRM_INFO("Cannot get clockgating state when UVD is powergated.\n");
1520 goto out;
1521 }
1522
1523 /* AMD_CG_SUPPORT_UVD_MGCG */
1524 data = RREG32(mmUVD_CGC_CTRL);
1525 if (data & UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK)
1526 *flags |= AMD_CG_SUPPORT_UVD_MGCG;
1527
1528out:
1529 mutex_unlock(&adev->pm.mutex);
1530}
1531
1532static const struct amd_ip_funcs uvd_v6_0_ip_funcs = {
1533 .name = "uvd_v6_0",
1534 .early_init = uvd_v6_0_early_init,
1535 .sw_init = uvd_v6_0_sw_init,
1536 .sw_fini = uvd_v6_0_sw_fini,
1537 .hw_init = uvd_v6_0_hw_init,
1538 .hw_fini = uvd_v6_0_hw_fini,
1539 .prepare_suspend = uvd_v6_0_prepare_suspend,
1540 .suspend = uvd_v6_0_suspend,
1541 .resume = uvd_v6_0_resume,
1542 .is_idle = uvd_v6_0_is_idle,
1543 .wait_for_idle = uvd_v6_0_wait_for_idle,
1544 .check_soft_reset = uvd_v6_0_check_soft_reset,
1545 .pre_soft_reset = uvd_v6_0_pre_soft_reset,
1546 .soft_reset = uvd_v6_0_soft_reset,
1547 .post_soft_reset = uvd_v6_0_post_soft_reset,
1548 .set_clockgating_state = uvd_v6_0_set_clockgating_state,
1549 .set_powergating_state = uvd_v6_0_set_powergating_state,
1550 .get_clockgating_state = uvd_v6_0_get_clockgating_state,
1551};
1552
1553static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = {
1554 .type = AMDGPU_RING_TYPE_UVD,
1555 .align_mask = 0xf,
1556 .support_64bit_ptrs = false,
1557 .no_user_fence = true,
1558 .get_rptr = uvd_v6_0_ring_get_rptr,
1559 .get_wptr = uvd_v6_0_ring_get_wptr,
1560 .set_wptr = uvd_v6_0_ring_set_wptr,
1561 .parse_cs = amdgpu_uvd_ring_parse_cs,
1562 .emit_frame_size =
1563 6 + /* hdp invalidate */
1564 10 + /* uvd_v6_0_ring_emit_pipeline_sync */
1565 14, /* uvd_v6_0_ring_emit_fence x1 no user fence */
1566 .emit_ib_size = 8, /* uvd_v6_0_ring_emit_ib */
1567 .emit_ib = uvd_v6_0_ring_emit_ib,
1568 .emit_fence = uvd_v6_0_ring_emit_fence,
1569 .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
1570 .test_ring = uvd_v6_0_ring_test_ring,
1571 .test_ib = amdgpu_uvd_ring_test_ib,
1572 .insert_nop = uvd_v6_0_ring_insert_nop,
1573 .pad_ib = amdgpu_ring_generic_pad_ib,
1574 .begin_use = amdgpu_uvd_ring_begin_use,
1575 .end_use = amdgpu_uvd_ring_end_use,
1576 .emit_wreg = uvd_v6_0_ring_emit_wreg,
1577};
1578
1579static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
1580 .type = AMDGPU_RING_TYPE_UVD,
1581 .align_mask = 0xf,
1582 .support_64bit_ptrs = false,
1583 .no_user_fence = true,
1584 .get_rptr = uvd_v6_0_ring_get_rptr,
1585 .get_wptr = uvd_v6_0_ring_get_wptr,
1586 .set_wptr = uvd_v6_0_ring_set_wptr,
1587 .emit_frame_size =
1588 6 + /* hdp invalidate */
1589 10 + /* uvd_v6_0_ring_emit_pipeline_sync */
1590 VI_FLUSH_GPU_TLB_NUM_WREG * 6 + 8 + /* uvd_v6_0_ring_emit_vm_flush */
1591 14 + 14, /* uvd_v6_0_ring_emit_fence x2 vm fence */
1592 .emit_ib_size = 8, /* uvd_v6_0_ring_emit_ib */
1593 .emit_ib = uvd_v6_0_ring_emit_ib,
1594 .emit_fence = uvd_v6_0_ring_emit_fence,
1595 .emit_vm_flush = uvd_v6_0_ring_emit_vm_flush,
1596 .emit_pipeline_sync = uvd_v6_0_ring_emit_pipeline_sync,
1597 .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
1598 .test_ring = uvd_v6_0_ring_test_ring,
1599 .test_ib = amdgpu_uvd_ring_test_ib,
1600 .insert_nop = uvd_v6_0_ring_insert_nop,
1601 .pad_ib = amdgpu_ring_generic_pad_ib,
1602 .begin_use = amdgpu_uvd_ring_begin_use,
1603 .end_use = amdgpu_uvd_ring_end_use,
1604 .emit_wreg = uvd_v6_0_ring_emit_wreg,
1605};
1606
1607static const struct amdgpu_ring_funcs uvd_v6_0_enc_ring_vm_funcs = {
1608 .type = AMDGPU_RING_TYPE_UVD_ENC,
1609 .align_mask = 0x3f,
1610 .nop = HEVC_ENC_CMD_NO_OP,
1611 .support_64bit_ptrs = false,
1612 .no_user_fence = true,
1613 .get_rptr = uvd_v6_0_enc_ring_get_rptr,
1614 .get_wptr = uvd_v6_0_enc_ring_get_wptr,
1615 .set_wptr = uvd_v6_0_enc_ring_set_wptr,
1616 .emit_frame_size =
1617 4 + /* uvd_v6_0_enc_ring_emit_pipeline_sync */
1618 5 + /* uvd_v6_0_enc_ring_emit_vm_flush */
1619 5 + 5 + /* uvd_v6_0_enc_ring_emit_fence x2 vm fence */
1620 1, /* uvd_v6_0_enc_ring_insert_end */
1621 .emit_ib_size = 5, /* uvd_v6_0_enc_ring_emit_ib */
1622 .emit_ib = uvd_v6_0_enc_ring_emit_ib,
1623 .emit_fence = uvd_v6_0_enc_ring_emit_fence,
1624 .emit_vm_flush = uvd_v6_0_enc_ring_emit_vm_flush,
1625 .emit_pipeline_sync = uvd_v6_0_enc_ring_emit_pipeline_sync,
1626 .test_ring = uvd_v6_0_enc_ring_test_ring,
1627 .test_ib = uvd_v6_0_enc_ring_test_ib,
1628 .insert_nop = amdgpu_ring_insert_nop,
1629 .insert_end = uvd_v6_0_enc_ring_insert_end,
1630 .pad_ib = amdgpu_ring_generic_pad_ib,
1631 .begin_use = amdgpu_uvd_ring_begin_use,
1632 .end_use = amdgpu_uvd_ring_end_use,
1633};
1634
1635static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev)
1636{
1637 if (adev->asic_type >= CHIP_POLARIS10) {
1638 adev->uvd.inst->ring.funcs = &uvd_v6_0_ring_vm_funcs;
1639 DRM_INFO("UVD is enabled in VM mode\n");
1640 } else {
1641 adev->uvd.inst->ring.funcs = &uvd_v6_0_ring_phys_funcs;
1642 DRM_INFO("UVD is enabled in physical mode\n");
1643 }
1644}
1645
1646static void uvd_v6_0_set_enc_ring_funcs(struct amdgpu_device *adev)
1647{
1648 int i;
1649
1650 for (i = 0; i < adev->uvd.num_enc_rings; ++i)
1651 adev->uvd.inst->ring_enc[i].funcs = &uvd_v6_0_enc_ring_vm_funcs;
1652
1653 DRM_INFO("UVD ENC is enabled in VM mode\n");
1654}
1655
1656static const struct amdgpu_irq_src_funcs uvd_v6_0_irq_funcs = {
1657 .set = uvd_v6_0_set_interrupt_state,
1658 .process = uvd_v6_0_process_interrupt,
1659};
1660
1661static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev)
1662{
1663 if (uvd_v6_0_enc_support(adev))
1664 adev->uvd.inst->irq.num_types = adev->uvd.num_enc_rings + 1;
1665 else
1666 adev->uvd.inst->irq.num_types = 1;
1667
1668 adev->uvd.inst->irq.funcs = &uvd_v6_0_irq_funcs;
1669}
1670
1671const struct amdgpu_ip_block_version uvd_v6_0_ip_block =
1672{
1673 .type = AMD_IP_BLOCK_TYPE_UVD,
1674 .major = 6,
1675 .minor = 0,
1676 .rev = 0,
1677 .funcs = &uvd_v6_0_ip_funcs,
1678};
1679
1680const struct amdgpu_ip_block_version uvd_v6_2_ip_block =
1681{
1682 .type = AMD_IP_BLOCK_TYPE_UVD,
1683 .major = 6,
1684 .minor = 2,
1685 .rev = 0,
1686 .funcs = &uvd_v6_0_ip_funcs,
1687};
1688
1689const struct amdgpu_ip_block_version uvd_v6_3_ip_block =
1690{
1691 .type = AMD_IP_BLOCK_TYPE_UVD,
1692 .major = 6,
1693 .minor = 3,
1694 .rev = 0,
1695 .funcs = &uvd_v6_0_ip_funcs,
1696};