Loading...
Note: File does not exist in v3.15.
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Christian König <christian.koenig@amd.com>
23 */
24
25#include <linux/firmware.h>
26#include <drm/drmP.h>
27#include "amdgpu.h"
28#include "amdgpu_uvd.h"
29#include "vid.h"
30#include "uvd/uvd_6_0_d.h"
31#include "uvd/uvd_6_0_sh_mask.h"
32#include "oss/oss_2_0_d.h"
33#include "oss/oss_2_0_sh_mask.h"
34#include "smu/smu_7_1_3_d.h"
35#include "smu/smu_7_1_3_sh_mask.h"
36#include "bif/bif_5_1_d.h"
37#include "gmc/gmc_8_1_d.h"
38#include "vi.h"
39
40/* Polaris10/11/12 firmware version */
41#define FW_1_130_16 ((1 << 24) | (130 << 16) | (16 << 8))
42
43static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev);
44static void uvd_v6_0_set_enc_ring_funcs(struct amdgpu_device *adev);
45
46static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev);
47static int uvd_v6_0_start(struct amdgpu_device *adev);
48static void uvd_v6_0_stop(struct amdgpu_device *adev);
49static void uvd_v6_0_set_sw_clock_gating(struct amdgpu_device *adev);
50static int uvd_v6_0_set_clockgating_state(void *handle,
51 enum amd_clockgating_state state);
52static void uvd_v6_0_enable_mgcg(struct amdgpu_device *adev,
53 bool enable);
54
55/**
56* uvd_v6_0_enc_support - get encode support status
57*
58* @adev: amdgpu_device pointer
59*
60* Returns the current hardware encode support status
61*/
62static inline bool uvd_v6_0_enc_support(struct amdgpu_device *adev)
63{
64 return ((adev->asic_type >= CHIP_POLARIS10) &&
65 (adev->asic_type <= CHIP_POLARIS12) &&
66 (!adev->uvd.fw_version || adev->uvd.fw_version >= FW_1_130_16));
67}
68
69/**
70 * uvd_v6_0_ring_get_rptr - get read pointer
71 *
72 * @ring: amdgpu_ring pointer
73 *
74 * Returns the current hardware read pointer
75 */
76static uint64_t uvd_v6_0_ring_get_rptr(struct amdgpu_ring *ring)
77{
78 struct amdgpu_device *adev = ring->adev;
79
80 return RREG32(mmUVD_RBC_RB_RPTR);
81}
82
83/**
84 * uvd_v6_0_enc_ring_get_rptr - get enc read pointer
85 *
86 * @ring: amdgpu_ring pointer
87 *
88 * Returns the current hardware enc read pointer
89 */
90static uint64_t uvd_v6_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
91{
92 struct amdgpu_device *adev = ring->adev;
93
94 if (ring == &adev->uvd.ring_enc[0])
95 return RREG32(mmUVD_RB_RPTR);
96 else
97 return RREG32(mmUVD_RB_RPTR2);
98}
99/**
100 * uvd_v6_0_ring_get_wptr - get write pointer
101 *
102 * @ring: amdgpu_ring pointer
103 *
104 * Returns the current hardware write pointer
105 */
106static uint64_t uvd_v6_0_ring_get_wptr(struct amdgpu_ring *ring)
107{
108 struct amdgpu_device *adev = ring->adev;
109
110 return RREG32(mmUVD_RBC_RB_WPTR);
111}
112
113/**
114 * uvd_v6_0_enc_ring_get_wptr - get enc write pointer
115 *
116 * @ring: amdgpu_ring pointer
117 *
118 * Returns the current hardware enc write pointer
119 */
120static uint64_t uvd_v6_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
121{
122 struct amdgpu_device *adev = ring->adev;
123
124 if (ring == &adev->uvd.ring_enc[0])
125 return RREG32(mmUVD_RB_WPTR);
126 else
127 return RREG32(mmUVD_RB_WPTR2);
128}
129
130/**
131 * uvd_v6_0_ring_set_wptr - set write pointer
132 *
133 * @ring: amdgpu_ring pointer
134 *
135 * Commits the write pointer to the hardware
136 */
137static void uvd_v6_0_ring_set_wptr(struct amdgpu_ring *ring)
138{
139 struct amdgpu_device *adev = ring->adev;
140
141 WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
142}
143
144/**
145 * uvd_v6_0_enc_ring_set_wptr - set enc write pointer
146 *
147 * @ring: amdgpu_ring pointer
148 *
149 * Commits the enc write pointer to the hardware
150 */
151static void uvd_v6_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
152{
153 struct amdgpu_device *adev = ring->adev;
154
155 if (ring == &adev->uvd.ring_enc[0])
156 WREG32(mmUVD_RB_WPTR,
157 lower_32_bits(ring->wptr));
158 else
159 WREG32(mmUVD_RB_WPTR2,
160 lower_32_bits(ring->wptr));
161}
162
163/**
164 * uvd_v6_0_enc_ring_test_ring - test if UVD ENC ring is working
165 *
166 * @ring: the engine to test on
167 *
168 */
169static int uvd_v6_0_enc_ring_test_ring(struct amdgpu_ring *ring)
170{
171 struct amdgpu_device *adev = ring->adev;
172 uint32_t rptr = amdgpu_ring_get_rptr(ring);
173 unsigned i;
174 int r;
175
176 r = amdgpu_ring_alloc(ring, 16);
177 if (r) {
178 DRM_ERROR("amdgpu: uvd enc failed to lock ring %d (%d).\n",
179 ring->idx, r);
180 return r;
181 }
182 amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
183 amdgpu_ring_commit(ring);
184
185 for (i = 0; i < adev->usec_timeout; i++) {
186 if (amdgpu_ring_get_rptr(ring) != rptr)
187 break;
188 DRM_UDELAY(1);
189 }
190
191 if (i < adev->usec_timeout) {
192 DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
193 ring->idx, i);
194 } else {
195 DRM_ERROR("amdgpu: ring %d test failed\n",
196 ring->idx);
197 r = -ETIMEDOUT;
198 }
199
200 return r;
201}
202
203/**
204 * uvd_v6_0_enc_get_create_msg - generate a UVD ENC create msg
205 *
206 * @adev: amdgpu_device pointer
207 * @ring: ring we should submit the msg to
208 * @handle: session handle to use
209 * @fence: optional fence to return
210 *
211 * Open up a stream for HW test
212 */
213static int uvd_v6_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
214 struct dma_fence **fence)
215{
216 const unsigned ib_size_dw = 16;
217 struct amdgpu_job *job;
218 struct amdgpu_ib *ib;
219 struct dma_fence *f = NULL;
220 uint64_t dummy;
221 int i, r;
222
223 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
224 if (r)
225 return r;
226
227 ib = &job->ibs[0];
228 dummy = ib->gpu_addr + 1024;
229
230 ib->length_dw = 0;
231 ib->ptr[ib->length_dw++] = 0x00000018;
232 ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
233 ib->ptr[ib->length_dw++] = handle;
234 ib->ptr[ib->length_dw++] = 0x00010000;
235 ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
236 ib->ptr[ib->length_dw++] = dummy;
237
238 ib->ptr[ib->length_dw++] = 0x00000014;
239 ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
240 ib->ptr[ib->length_dw++] = 0x0000001c;
241 ib->ptr[ib->length_dw++] = 0x00000001;
242 ib->ptr[ib->length_dw++] = 0x00000000;
243
244 ib->ptr[ib->length_dw++] = 0x00000008;
245 ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */
246
247 for (i = ib->length_dw; i < ib_size_dw; ++i)
248 ib->ptr[i] = 0x0;
249
250 r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
251 job->fence = dma_fence_get(f);
252 if (r)
253 goto err;
254
255 amdgpu_job_free(job);
256 if (fence)
257 *fence = dma_fence_get(f);
258 dma_fence_put(f);
259 return 0;
260
261err:
262 amdgpu_job_free(job);
263 return r;
264}
265
266/**
267 * uvd_v6_0_enc_get_destroy_msg - generate a UVD ENC destroy msg
268 *
269 * @adev: amdgpu_device pointer
270 * @ring: ring we should submit the msg to
271 * @handle: session handle to use
272 * @fence: optional fence to return
273 *
274 * Close up a stream for HW test or if userspace failed to do so
275 */
276static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring,
277 uint32_t handle,
278 bool direct, struct dma_fence **fence)
279{
280 const unsigned ib_size_dw = 16;
281 struct amdgpu_job *job;
282 struct amdgpu_ib *ib;
283 struct dma_fence *f = NULL;
284 uint64_t dummy;
285 int i, r;
286
287 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
288 if (r)
289 return r;
290
291 ib = &job->ibs[0];
292 dummy = ib->gpu_addr + 1024;
293
294 ib->length_dw = 0;
295 ib->ptr[ib->length_dw++] = 0x00000018;
296 ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
297 ib->ptr[ib->length_dw++] = handle;
298 ib->ptr[ib->length_dw++] = 0x00010000;
299 ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
300 ib->ptr[ib->length_dw++] = dummy;
301
302 ib->ptr[ib->length_dw++] = 0x00000014;
303 ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
304 ib->ptr[ib->length_dw++] = 0x0000001c;
305 ib->ptr[ib->length_dw++] = 0x00000001;
306 ib->ptr[ib->length_dw++] = 0x00000000;
307
308 ib->ptr[ib->length_dw++] = 0x00000008;
309 ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */
310
311 for (i = ib->length_dw; i < ib_size_dw; ++i)
312 ib->ptr[i] = 0x0;
313
314 if (direct) {
315 r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
316 job->fence = dma_fence_get(f);
317 if (r)
318 goto err;
319
320 amdgpu_job_free(job);
321 } else {
322 r = amdgpu_job_submit(job, ring, &ring->adev->vce.entity,
323 AMDGPU_FENCE_OWNER_UNDEFINED, &f);
324 if (r)
325 goto err;
326 }
327
328 if (fence)
329 *fence = dma_fence_get(f);
330 dma_fence_put(f);
331 return 0;
332
333err:
334 amdgpu_job_free(job);
335 return r;
336}
337
338/**
339 * uvd_v6_0_enc_ring_test_ib - test if UVD ENC IBs are working
340 *
341 * @ring: the engine to test on
342 *
343 */
344static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
345{
346 struct dma_fence *fence = NULL;
347 long r;
348
349 r = uvd_v6_0_enc_get_create_msg(ring, 1, NULL);
350 if (r) {
351 DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r);
352 goto error;
353 }
354
355 r = uvd_v6_0_enc_get_destroy_msg(ring, 1, true, &fence);
356 if (r) {
357 DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
358 goto error;
359 }
360
361 r = dma_fence_wait_timeout(fence, false, timeout);
362 if (r == 0) {
363 DRM_ERROR("amdgpu: IB test timed out.\n");
364 r = -ETIMEDOUT;
365 } else if (r < 0) {
366 DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
367 } else {
368 DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
369 r = 0;
370 }
371error:
372 dma_fence_put(fence);
373 return r;
374}
375static int uvd_v6_0_early_init(void *handle)
376{
377 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
378
379 if (!(adev->flags & AMD_IS_APU) &&
380 (RREG32_SMC(ixCC_HARVEST_FUSES) & CC_HARVEST_FUSES__UVD_DISABLE_MASK))
381 return -ENOENT;
382
383 uvd_v6_0_set_ring_funcs(adev);
384
385 if (uvd_v6_0_enc_support(adev)) {
386 adev->uvd.num_enc_rings = 2;
387 uvd_v6_0_set_enc_ring_funcs(adev);
388 }
389
390 uvd_v6_0_set_irq_funcs(adev);
391
392 return 0;
393}
394
395static int uvd_v6_0_sw_init(void *handle)
396{
397 struct amdgpu_ring *ring;
398 int i, r;
399 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
400
401 /* UVD TRAP */
402 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 124, &adev->uvd.irq);
403 if (r)
404 return r;
405
406 /* UVD ENC TRAP */
407 if (uvd_v6_0_enc_support(adev)) {
408 for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
409 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i + 119, &adev->uvd.irq);
410 if (r)
411 return r;
412 }
413 }
414
415 r = amdgpu_uvd_sw_init(adev);
416 if (r)
417 return r;
418
419 if (!uvd_v6_0_enc_support(adev)) {
420 for (i = 0; i < adev->uvd.num_enc_rings; ++i)
421 adev->uvd.ring_enc[i].funcs = NULL;
422
423 adev->uvd.irq.num_types = 1;
424 adev->uvd.num_enc_rings = 0;
425
426 DRM_INFO("UVD ENC is disabled\n");
427 } else {
428 struct drm_sched_rq *rq;
429 ring = &adev->uvd.ring_enc[0];
430 rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
431 r = drm_sched_entity_init(&ring->sched, &adev->uvd.entity_enc,
432 rq, amdgpu_sched_jobs, NULL);
433 if (r) {
434 DRM_ERROR("Failed setting up UVD ENC run queue.\n");
435 return r;
436 }
437 }
438
439 r = amdgpu_uvd_resume(adev);
440 if (r)
441 return r;
442
443 ring = &adev->uvd.ring;
444 sprintf(ring->name, "uvd");
445 r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0);
446 if (r)
447 return r;
448
449 if (uvd_v6_0_enc_support(adev)) {
450 for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
451 ring = &adev->uvd.ring_enc[i];
452 sprintf(ring->name, "uvd_enc%d", i);
453 r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0);
454 if (r)
455 return r;
456 }
457 }
458
459 return r;
460}
461
462static int uvd_v6_0_sw_fini(void *handle)
463{
464 int i, r;
465 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
466
467 r = amdgpu_uvd_suspend(adev);
468 if (r)
469 return r;
470
471 if (uvd_v6_0_enc_support(adev)) {
472 drm_sched_entity_fini(&adev->uvd.ring_enc[0].sched, &adev->uvd.entity_enc);
473
474 for (i = 0; i < adev->uvd.num_enc_rings; ++i)
475 amdgpu_ring_fini(&adev->uvd.ring_enc[i]);
476 }
477
478 return amdgpu_uvd_sw_fini(adev);
479}
480
481/**
482 * uvd_v6_0_hw_init - start and test UVD block
483 *
484 * @adev: amdgpu_device pointer
485 *
486 * Initialize the hardware, boot up the VCPU and do some testing
487 */
488static int uvd_v6_0_hw_init(void *handle)
489{
490 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
491 struct amdgpu_ring *ring = &adev->uvd.ring;
492 uint32_t tmp;
493 int i, r;
494
495 amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
496 uvd_v6_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
497 uvd_v6_0_enable_mgcg(adev, true);
498
499 ring->ready = true;
500 r = amdgpu_ring_test_ring(ring);
501 if (r) {
502 ring->ready = false;
503 goto done;
504 }
505
506 r = amdgpu_ring_alloc(ring, 10);
507 if (r) {
508 DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r);
509 goto done;
510 }
511
512 tmp = PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0);
513 amdgpu_ring_write(ring, tmp);
514 amdgpu_ring_write(ring, 0xFFFFF);
515
516 tmp = PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0);
517 amdgpu_ring_write(ring, tmp);
518 amdgpu_ring_write(ring, 0xFFFFF);
519
520 tmp = PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0);
521 amdgpu_ring_write(ring, tmp);
522 amdgpu_ring_write(ring, 0xFFFFF);
523
524 /* Clear timeout status bits */
525 amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0));
526 amdgpu_ring_write(ring, 0x8);
527
528 amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0));
529 amdgpu_ring_write(ring, 3);
530
531 amdgpu_ring_commit(ring);
532
533 if (uvd_v6_0_enc_support(adev)) {
534 for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
535 ring = &adev->uvd.ring_enc[i];
536 ring->ready = true;
537 r = amdgpu_ring_test_ring(ring);
538 if (r) {
539 ring->ready = false;
540 goto done;
541 }
542 }
543 }
544
545done:
546 if (!r) {
547 if (uvd_v6_0_enc_support(adev))
548 DRM_INFO("UVD and UVD ENC initialized successfully.\n");
549 else
550 DRM_INFO("UVD initialized successfully.\n");
551 }
552
553 return r;
554}
555
556/**
557 * uvd_v6_0_hw_fini - stop the hardware block
558 *
559 * @adev: amdgpu_device pointer
560 *
561 * Stop the UVD block, mark ring as not ready any more
562 */
563static int uvd_v6_0_hw_fini(void *handle)
564{
565 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
566 struct amdgpu_ring *ring = &adev->uvd.ring;
567
568 if (RREG32(mmUVD_STATUS) != 0)
569 uvd_v6_0_stop(adev);
570
571 ring->ready = false;
572
573 return 0;
574}
575
576static int uvd_v6_0_suspend(void *handle)
577{
578 int r;
579 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
580
581 r = uvd_v6_0_hw_fini(adev);
582 if (r)
583 return r;
584
585 return amdgpu_uvd_suspend(adev);
586}
587
588static int uvd_v6_0_resume(void *handle)
589{
590 int r;
591 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
592
593 r = amdgpu_uvd_resume(adev);
594 if (r)
595 return r;
596
597 return uvd_v6_0_hw_init(adev);
598}
599
600/**
601 * uvd_v6_0_mc_resume - memory controller programming
602 *
603 * @adev: amdgpu_device pointer
604 *
605 * Let the UVD memory controller know it's offsets
606 */
607static void uvd_v6_0_mc_resume(struct amdgpu_device *adev)
608{
609 uint64_t offset;
610 uint32_t size;
611
612 /* programm memory controller bits 0-27 */
613 WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
614 lower_32_bits(adev->uvd.gpu_addr));
615 WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
616 upper_32_bits(adev->uvd.gpu_addr));
617
618 offset = AMDGPU_UVD_FIRMWARE_OFFSET;
619 size = AMDGPU_UVD_FIRMWARE_SIZE(adev);
620 WREG32(mmUVD_VCPU_CACHE_OFFSET0, offset >> 3);
621 WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
622
623 offset += size;
624 size = AMDGPU_UVD_HEAP_SIZE;
625 WREG32(mmUVD_VCPU_CACHE_OFFSET1, offset >> 3);
626 WREG32(mmUVD_VCPU_CACHE_SIZE1, size);
627
628 offset += size;
629 size = AMDGPU_UVD_STACK_SIZE +
630 (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles);
631 WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3);
632 WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
633
634 WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
635 WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
636 WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
637
638 WREG32(mmUVD_GP_SCRATCH4, adev->uvd.max_handles);
639}
640
641#if 0
642static void cz_set_uvd_clock_gating_branches(struct amdgpu_device *adev,
643 bool enable)
644{
645 u32 data, data1;
646
647 data = RREG32(mmUVD_CGC_GATE);
648 data1 = RREG32(mmUVD_SUVD_CGC_GATE);
649 if (enable) {
650 data |= UVD_CGC_GATE__SYS_MASK |
651 UVD_CGC_GATE__UDEC_MASK |
652 UVD_CGC_GATE__MPEG2_MASK |
653 UVD_CGC_GATE__RBC_MASK |
654 UVD_CGC_GATE__LMI_MC_MASK |
655 UVD_CGC_GATE__IDCT_MASK |
656 UVD_CGC_GATE__MPRD_MASK |
657 UVD_CGC_GATE__MPC_MASK |
658 UVD_CGC_GATE__LBSI_MASK |
659 UVD_CGC_GATE__LRBBM_MASK |
660 UVD_CGC_GATE__UDEC_RE_MASK |
661 UVD_CGC_GATE__UDEC_CM_MASK |
662 UVD_CGC_GATE__UDEC_IT_MASK |
663 UVD_CGC_GATE__UDEC_DB_MASK |
664 UVD_CGC_GATE__UDEC_MP_MASK |
665 UVD_CGC_GATE__WCB_MASK |
666 UVD_CGC_GATE__VCPU_MASK |
667 UVD_CGC_GATE__SCPU_MASK;
668 data1 |= UVD_SUVD_CGC_GATE__SRE_MASK |
669 UVD_SUVD_CGC_GATE__SIT_MASK |
670 UVD_SUVD_CGC_GATE__SMP_MASK |
671 UVD_SUVD_CGC_GATE__SCM_MASK |
672 UVD_SUVD_CGC_GATE__SDB_MASK |
673 UVD_SUVD_CGC_GATE__SRE_H264_MASK |
674 UVD_SUVD_CGC_GATE__SRE_HEVC_MASK |
675 UVD_SUVD_CGC_GATE__SIT_H264_MASK |
676 UVD_SUVD_CGC_GATE__SIT_HEVC_MASK |
677 UVD_SUVD_CGC_GATE__SCM_H264_MASK |
678 UVD_SUVD_CGC_GATE__SCM_HEVC_MASK |
679 UVD_SUVD_CGC_GATE__SDB_H264_MASK |
680 UVD_SUVD_CGC_GATE__SDB_HEVC_MASK;
681 } else {
682 data &= ~(UVD_CGC_GATE__SYS_MASK |
683 UVD_CGC_GATE__UDEC_MASK |
684 UVD_CGC_GATE__MPEG2_MASK |
685 UVD_CGC_GATE__RBC_MASK |
686 UVD_CGC_GATE__LMI_MC_MASK |
687 UVD_CGC_GATE__LMI_UMC_MASK |
688 UVD_CGC_GATE__IDCT_MASK |
689 UVD_CGC_GATE__MPRD_MASK |
690 UVD_CGC_GATE__MPC_MASK |
691 UVD_CGC_GATE__LBSI_MASK |
692 UVD_CGC_GATE__LRBBM_MASK |
693 UVD_CGC_GATE__UDEC_RE_MASK |
694 UVD_CGC_GATE__UDEC_CM_MASK |
695 UVD_CGC_GATE__UDEC_IT_MASK |
696 UVD_CGC_GATE__UDEC_DB_MASK |
697 UVD_CGC_GATE__UDEC_MP_MASK |
698 UVD_CGC_GATE__WCB_MASK |
699 UVD_CGC_GATE__VCPU_MASK |
700 UVD_CGC_GATE__SCPU_MASK);
701 data1 &= ~(UVD_SUVD_CGC_GATE__SRE_MASK |
702 UVD_SUVD_CGC_GATE__SIT_MASK |
703 UVD_SUVD_CGC_GATE__SMP_MASK |
704 UVD_SUVD_CGC_GATE__SCM_MASK |
705 UVD_SUVD_CGC_GATE__SDB_MASK |
706 UVD_SUVD_CGC_GATE__SRE_H264_MASK |
707 UVD_SUVD_CGC_GATE__SRE_HEVC_MASK |
708 UVD_SUVD_CGC_GATE__SIT_H264_MASK |
709 UVD_SUVD_CGC_GATE__SIT_HEVC_MASK |
710 UVD_SUVD_CGC_GATE__SCM_H264_MASK |
711 UVD_SUVD_CGC_GATE__SCM_HEVC_MASK |
712 UVD_SUVD_CGC_GATE__SDB_H264_MASK |
713 UVD_SUVD_CGC_GATE__SDB_HEVC_MASK);
714 }
715 WREG32(mmUVD_CGC_GATE, data);
716 WREG32(mmUVD_SUVD_CGC_GATE, data1);
717}
718#endif
719
720/**
721 * uvd_v6_0_start - start UVD block
722 *
723 * @adev: amdgpu_device pointer
724 *
725 * Setup and start the UVD block
726 */
727static int uvd_v6_0_start(struct amdgpu_device *adev)
728{
729 struct amdgpu_ring *ring = &adev->uvd.ring;
730 uint32_t rb_bufsz, tmp;
731 uint32_t lmi_swap_cntl;
732 uint32_t mp_swap_cntl;
733 int i, j, r;
734
735 /* disable DPG */
736 WREG32_P(mmUVD_POWER_STATUS, 0, ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
737
738 /* disable byte swapping */
739 lmi_swap_cntl = 0;
740 mp_swap_cntl = 0;
741
742 uvd_v6_0_mc_resume(adev);
743
744 /* disable interupt */
745 WREG32_FIELD(UVD_MASTINT_EN, VCPU_EN, 0);
746
747 /* stall UMC and register bus before resetting VCPU */
748 WREG32_FIELD(UVD_LMI_CTRL2, STALL_ARB_UMC, 1);
749 mdelay(1);
750
751 /* put LMI, VCPU, RBC etc... into reset */
752 WREG32(mmUVD_SOFT_RESET,
753 UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
754 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
755 UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
756 UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
757 UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
758 UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
759 UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
760 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
761 mdelay(5);
762
763 /* take UVD block out of reset */
764 WREG32_FIELD(SRBM_SOFT_RESET, SOFT_RESET_UVD, 0);
765 mdelay(5);
766
767 /* initialize UVD memory controller */
768 WREG32(mmUVD_LMI_CTRL,
769 (0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
770 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
771 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
772 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
773 UVD_LMI_CTRL__REQ_MODE_MASK |
774 UVD_LMI_CTRL__DISABLE_ON_FWV_FAIL_MASK);
775
776#ifdef __BIG_ENDIAN
777 /* swap (8 in 32) RB and IB */
778 lmi_swap_cntl = 0xa;
779 mp_swap_cntl = 0;
780#endif
781 WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
782 WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
783
784 WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040);
785 WREG32(mmUVD_MPC_SET_MUXA1, 0x0);
786 WREG32(mmUVD_MPC_SET_MUXB0, 0x40c2040);
787 WREG32(mmUVD_MPC_SET_MUXB1, 0x0);
788 WREG32(mmUVD_MPC_SET_ALU, 0);
789 WREG32(mmUVD_MPC_SET_MUX, 0x88);
790
791 /* take all subblocks out of reset, except VCPU */
792 WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
793 mdelay(5);
794
795 /* enable VCPU clock */
796 WREG32(mmUVD_VCPU_CNTL, UVD_VCPU_CNTL__CLK_EN_MASK);
797
798 /* enable UMC */
799 WREG32_FIELD(UVD_LMI_CTRL2, STALL_ARB_UMC, 0);
800
801 /* boot up the VCPU */
802 WREG32(mmUVD_SOFT_RESET, 0);
803 mdelay(10);
804
805 for (i = 0; i < 10; ++i) {
806 uint32_t status;
807
808 for (j = 0; j < 100; ++j) {
809 status = RREG32(mmUVD_STATUS);
810 if (status & 2)
811 break;
812 mdelay(10);
813 }
814 r = 0;
815 if (status & 2)
816 break;
817
818 DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
819 WREG32_FIELD(UVD_SOFT_RESET, VCPU_SOFT_RESET, 1);
820 mdelay(10);
821 WREG32_FIELD(UVD_SOFT_RESET, VCPU_SOFT_RESET, 0);
822 mdelay(10);
823 r = -1;
824 }
825
826 if (r) {
827 DRM_ERROR("UVD not responding, giving up!!!\n");
828 return r;
829 }
830 /* enable master interrupt */
831 WREG32_P(mmUVD_MASTINT_EN,
832 (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
833 ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
834
835 /* clear the bit 4 of UVD_STATUS */
836 WREG32_P(mmUVD_STATUS, 0, ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
837
838 /* force RBC into idle state */
839 rb_bufsz = order_base_2(ring->ring_size);
840 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
841 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
842 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
843 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
844 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
845 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
846 WREG32(mmUVD_RBC_RB_CNTL, tmp);
847
848 /* set the write pointer delay */
849 WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0);
850
851 /* set the wb address */
852 WREG32(mmUVD_RBC_RB_RPTR_ADDR, (upper_32_bits(ring->gpu_addr) >> 2));
853
854 /* programm the RB_BASE for ring buffer */
855 WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
856 lower_32_bits(ring->gpu_addr));
857 WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
858 upper_32_bits(ring->gpu_addr));
859
860 /* Initialize the ring buffer's read and write pointers */
861 WREG32(mmUVD_RBC_RB_RPTR, 0);
862
863 ring->wptr = RREG32(mmUVD_RBC_RB_RPTR);
864 WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
865
866 WREG32_FIELD(UVD_RBC_RB_CNTL, RB_NO_FETCH, 0);
867
868 if (uvd_v6_0_enc_support(adev)) {
869 ring = &adev->uvd.ring_enc[0];
870 WREG32(mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
871 WREG32(mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
872 WREG32(mmUVD_RB_BASE_LO, ring->gpu_addr);
873 WREG32(mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
874 WREG32(mmUVD_RB_SIZE, ring->ring_size / 4);
875
876 ring = &adev->uvd.ring_enc[1];
877 WREG32(mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
878 WREG32(mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
879 WREG32(mmUVD_RB_BASE_LO2, ring->gpu_addr);
880 WREG32(mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
881 WREG32(mmUVD_RB_SIZE2, ring->ring_size / 4);
882 }
883
884 return 0;
885}
886
887/**
888 * uvd_v6_0_stop - stop UVD block
889 *
890 * @adev: amdgpu_device pointer
891 *
892 * stop the UVD block
893 */
894static void uvd_v6_0_stop(struct amdgpu_device *adev)
895{
896 /* force RBC into idle state */
897 WREG32(mmUVD_RBC_RB_CNTL, 0x11010101);
898
899 /* Stall UMC and register bus before resetting VCPU */
900 WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
901 mdelay(1);
902
903 /* put VCPU into reset */
904 WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
905 mdelay(5);
906
907 /* disable VCPU clock */
908 WREG32(mmUVD_VCPU_CNTL, 0x0);
909
910 /* Unstall UMC and register bus */
911 WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
912
913 WREG32(mmUVD_STATUS, 0);
914}
915
916/**
917 * uvd_v6_0_ring_emit_fence - emit an fence & trap command
918 *
919 * @ring: amdgpu_ring pointer
920 * @fence: fence to emit
921 *
922 * Write a fence and a trap command to the ring.
923 */
924static void uvd_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
925 unsigned flags)
926{
927 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
928
929 amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
930 amdgpu_ring_write(ring, seq);
931 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
932 amdgpu_ring_write(ring, addr & 0xffffffff);
933 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
934 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
935 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
936 amdgpu_ring_write(ring, 0);
937
938 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
939 amdgpu_ring_write(ring, 0);
940 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
941 amdgpu_ring_write(ring, 0);
942 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
943 amdgpu_ring_write(ring, 2);
944}
945
946/**
947 * uvd_v6_0_enc_ring_emit_fence - emit an enc fence & trap command
948 *
949 * @ring: amdgpu_ring pointer
950 * @fence: fence to emit
951 *
952 * Write enc a fence and a trap command to the ring.
953 */
954static void uvd_v6_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
955 u64 seq, unsigned flags)
956{
957 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
958
959 amdgpu_ring_write(ring, HEVC_ENC_CMD_FENCE);
960 amdgpu_ring_write(ring, addr);
961 amdgpu_ring_write(ring, upper_32_bits(addr));
962 amdgpu_ring_write(ring, seq);
963 amdgpu_ring_write(ring, HEVC_ENC_CMD_TRAP);
964}
965
966/**
967 * uvd_v6_0_ring_test_ring - register write test
968 *
969 * @ring: amdgpu_ring pointer
970 *
971 * Test if we can successfully write to the context register
972 */
973static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring)
974{
975 struct amdgpu_device *adev = ring->adev;
976 uint32_t tmp = 0;
977 unsigned i;
978 int r;
979
980 WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD);
981 r = amdgpu_ring_alloc(ring, 3);
982 if (r) {
983 DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
984 ring->idx, r);
985 return r;
986 }
987 amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
988 amdgpu_ring_write(ring, 0xDEADBEEF);
989 amdgpu_ring_commit(ring);
990 for (i = 0; i < adev->usec_timeout; i++) {
991 tmp = RREG32(mmUVD_CONTEXT_ID);
992 if (tmp == 0xDEADBEEF)
993 break;
994 DRM_UDELAY(1);
995 }
996
997 if (i < adev->usec_timeout) {
998 DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
999 ring->idx, i);
1000 } else {
1001 DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
1002 ring->idx, tmp);
1003 r = -EINVAL;
1004 }
1005 return r;
1006}
1007
1008/**
1009 * uvd_v6_0_ring_emit_ib - execute indirect buffer
1010 *
1011 * @ring: amdgpu_ring pointer
1012 * @ib: indirect buffer to execute
1013 *
1014 * Write ring commands to execute the indirect buffer
1015 */
1016static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
1017 struct amdgpu_ib *ib,
1018 unsigned vmid, bool ctx_switch)
1019{
1020 amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_VMID, 0));
1021 amdgpu_ring_write(ring, vmid);
1022
1023 amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0));
1024 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1025 amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH, 0));
1026 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1027 amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0));
1028 amdgpu_ring_write(ring, ib->length_dw);
1029}
1030
1031/**
1032 * uvd_v6_0_enc_ring_emit_ib - enc execute indirect buffer
1033 *
1034 * @ring: amdgpu_ring pointer
1035 * @ib: indirect buffer to execute
1036 *
1037 * Write enc ring commands to execute the indirect buffer
1038 */
1039static void uvd_v6_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
1040 struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch)
1041{
1042 amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM);
1043 amdgpu_ring_write(ring, vmid);
1044 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1045 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1046 amdgpu_ring_write(ring, ib->length_dw);
1047}
1048
1049static void uvd_v6_0_ring_emit_wreg(struct amdgpu_ring *ring,
1050 uint32_t reg, uint32_t val)
1051{
1052 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
1053 amdgpu_ring_write(ring, reg << 2);
1054 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
1055 amdgpu_ring_write(ring, val);
1056 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
1057 amdgpu_ring_write(ring, 0x8);
1058}
1059
1060static void uvd_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
1061 unsigned vmid, uint64_t pd_addr)
1062{
1063 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1064
1065 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
1066 amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
1067 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
1068 amdgpu_ring_write(ring, 0);
1069 amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0));
1070 amdgpu_ring_write(ring, 1 << vmid); /* mask */
1071 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
1072 amdgpu_ring_write(ring, 0xC);
1073}
1074
1075static void uvd_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
1076{
1077 uint32_t seq = ring->fence_drv.sync_seq;
1078 uint64_t addr = ring->fence_drv.gpu_addr;
1079
1080 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
1081 amdgpu_ring_write(ring, lower_32_bits(addr));
1082 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
1083 amdgpu_ring_write(ring, upper_32_bits(addr));
1084 amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0));
1085 amdgpu_ring_write(ring, 0xffffffff); /* mask */
1086 amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH9, 0));
1087 amdgpu_ring_write(ring, seq);
1088 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
1089 amdgpu_ring_write(ring, 0xE);
1090}
1091
1092static void uvd_v6_0_enc_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
1093{
1094 uint32_t seq = ring->fence_drv.sync_seq;
1095 uint64_t addr = ring->fence_drv.gpu_addr;
1096
1097 amdgpu_ring_write(ring, HEVC_ENC_CMD_WAIT_GE);
1098 amdgpu_ring_write(ring, lower_32_bits(addr));
1099 amdgpu_ring_write(ring, upper_32_bits(addr));
1100 amdgpu_ring_write(ring, seq);
1101}
1102
1103static void uvd_v6_0_enc_ring_insert_end(struct amdgpu_ring *ring)
1104{
1105 amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
1106}
1107
1108static void uvd_v6_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
1109 unsigned int vmid, uint64_t pd_addr)
1110{
1111 amdgpu_ring_write(ring, HEVC_ENC_CMD_UPDATE_PTB);
1112 amdgpu_ring_write(ring, vmid);
1113 amdgpu_ring_write(ring, pd_addr >> 12);
1114
1115 amdgpu_ring_write(ring, HEVC_ENC_CMD_FLUSH_TLB);
1116 amdgpu_ring_write(ring, vmid);
1117}
1118
1119static bool uvd_v6_0_is_idle(void *handle)
1120{
1121 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1122
1123 return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
1124}
1125
1126static int uvd_v6_0_wait_for_idle(void *handle)
1127{
1128 unsigned i;
1129 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1130
1131 for (i = 0; i < adev->usec_timeout; i++) {
1132 if (uvd_v6_0_is_idle(handle))
1133 return 0;
1134 }
1135 return -ETIMEDOUT;
1136}
1137
1138#define AMDGPU_UVD_STATUS_BUSY_MASK 0xfd
1139static bool uvd_v6_0_check_soft_reset(void *handle)
1140{
1141 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1142 u32 srbm_soft_reset = 0;
1143 u32 tmp = RREG32(mmSRBM_STATUS);
1144
1145 if (REG_GET_FIELD(tmp, SRBM_STATUS, UVD_RQ_PENDING) ||
1146 REG_GET_FIELD(tmp, SRBM_STATUS, UVD_BUSY) ||
1147 (RREG32(mmUVD_STATUS) & AMDGPU_UVD_STATUS_BUSY_MASK))
1148 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_UVD, 1);
1149
1150 if (srbm_soft_reset) {
1151 adev->uvd.srbm_soft_reset = srbm_soft_reset;
1152 return true;
1153 } else {
1154 adev->uvd.srbm_soft_reset = 0;
1155 return false;
1156 }
1157}
1158
1159static int uvd_v6_0_pre_soft_reset(void *handle)
1160{
1161 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1162
1163 if (!adev->uvd.srbm_soft_reset)
1164 return 0;
1165
1166 uvd_v6_0_stop(adev);
1167 return 0;
1168}
1169
1170static int uvd_v6_0_soft_reset(void *handle)
1171{
1172 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1173 u32 srbm_soft_reset;
1174
1175 if (!adev->uvd.srbm_soft_reset)
1176 return 0;
1177 srbm_soft_reset = adev->uvd.srbm_soft_reset;
1178
1179 if (srbm_soft_reset) {
1180 u32 tmp;
1181
1182 tmp = RREG32(mmSRBM_SOFT_RESET);
1183 tmp |= srbm_soft_reset;
1184 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1185 WREG32(mmSRBM_SOFT_RESET, tmp);
1186 tmp = RREG32(mmSRBM_SOFT_RESET);
1187
1188 udelay(50);
1189
1190 tmp &= ~srbm_soft_reset;
1191 WREG32(mmSRBM_SOFT_RESET, tmp);
1192 tmp = RREG32(mmSRBM_SOFT_RESET);
1193
1194 /* Wait a little for things to settle down */
1195 udelay(50);
1196 }
1197
1198 return 0;
1199}
1200
1201static int uvd_v6_0_post_soft_reset(void *handle)
1202{
1203 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1204
1205 if (!adev->uvd.srbm_soft_reset)
1206 return 0;
1207
1208 mdelay(5);
1209
1210 return uvd_v6_0_start(adev);
1211}
1212
1213static int uvd_v6_0_set_interrupt_state(struct amdgpu_device *adev,
1214 struct amdgpu_irq_src *source,
1215 unsigned type,
1216 enum amdgpu_interrupt_state state)
1217{
1218 // TODO
1219 return 0;
1220}
1221
1222static int uvd_v6_0_process_interrupt(struct amdgpu_device *adev,
1223 struct amdgpu_irq_src *source,
1224 struct amdgpu_iv_entry *entry)
1225{
1226 bool int_handled = true;
1227 DRM_DEBUG("IH: UVD TRAP\n");
1228
1229 switch (entry->src_id) {
1230 case 124:
1231 amdgpu_fence_process(&adev->uvd.ring);
1232 break;
1233 case 119:
1234 if (likely(uvd_v6_0_enc_support(adev)))
1235 amdgpu_fence_process(&adev->uvd.ring_enc[0]);
1236 else
1237 int_handled = false;
1238 break;
1239 case 120:
1240 if (likely(uvd_v6_0_enc_support(adev)))
1241 amdgpu_fence_process(&adev->uvd.ring_enc[1]);
1242 else
1243 int_handled = false;
1244 break;
1245 }
1246
1247 if (false == int_handled)
1248 DRM_ERROR("Unhandled interrupt: %d %d\n",
1249 entry->src_id, entry->src_data[0]);
1250
1251 return 0;
1252}
1253
1254static void uvd_v6_0_enable_clock_gating(struct amdgpu_device *adev, bool enable)
1255{
1256 uint32_t data1, data3;
1257
1258 data1 = RREG32(mmUVD_SUVD_CGC_GATE);
1259 data3 = RREG32(mmUVD_CGC_GATE);
1260
1261 data1 |= UVD_SUVD_CGC_GATE__SRE_MASK |
1262 UVD_SUVD_CGC_GATE__SIT_MASK |
1263 UVD_SUVD_CGC_GATE__SMP_MASK |
1264 UVD_SUVD_CGC_GATE__SCM_MASK |
1265 UVD_SUVD_CGC_GATE__SDB_MASK |
1266 UVD_SUVD_CGC_GATE__SRE_H264_MASK |
1267 UVD_SUVD_CGC_GATE__SRE_HEVC_MASK |
1268 UVD_SUVD_CGC_GATE__SIT_H264_MASK |
1269 UVD_SUVD_CGC_GATE__SIT_HEVC_MASK |
1270 UVD_SUVD_CGC_GATE__SCM_H264_MASK |
1271 UVD_SUVD_CGC_GATE__SCM_HEVC_MASK |
1272 UVD_SUVD_CGC_GATE__SDB_H264_MASK |
1273 UVD_SUVD_CGC_GATE__SDB_HEVC_MASK;
1274
1275 if (enable) {
1276 data3 |= (UVD_CGC_GATE__SYS_MASK |
1277 UVD_CGC_GATE__UDEC_MASK |
1278 UVD_CGC_GATE__MPEG2_MASK |
1279 UVD_CGC_GATE__RBC_MASK |
1280 UVD_CGC_GATE__LMI_MC_MASK |
1281 UVD_CGC_GATE__LMI_UMC_MASK |
1282 UVD_CGC_GATE__IDCT_MASK |
1283 UVD_CGC_GATE__MPRD_MASK |
1284 UVD_CGC_GATE__MPC_MASK |
1285 UVD_CGC_GATE__LBSI_MASK |
1286 UVD_CGC_GATE__LRBBM_MASK |
1287 UVD_CGC_GATE__UDEC_RE_MASK |
1288 UVD_CGC_GATE__UDEC_CM_MASK |
1289 UVD_CGC_GATE__UDEC_IT_MASK |
1290 UVD_CGC_GATE__UDEC_DB_MASK |
1291 UVD_CGC_GATE__UDEC_MP_MASK |
1292 UVD_CGC_GATE__WCB_MASK |
1293 UVD_CGC_GATE__JPEG_MASK |
1294 UVD_CGC_GATE__SCPU_MASK |
1295 UVD_CGC_GATE__JPEG2_MASK);
1296 /* only in pg enabled, we can gate clock to vcpu*/
1297 if (adev->pg_flags & AMD_PG_SUPPORT_UVD)
1298 data3 |= UVD_CGC_GATE__VCPU_MASK;
1299
1300 data3 &= ~UVD_CGC_GATE__REGS_MASK;
1301 } else {
1302 data3 = 0;
1303 }
1304
1305 WREG32(mmUVD_SUVD_CGC_GATE, data1);
1306 WREG32(mmUVD_CGC_GATE, data3);
1307}
1308
1309static void uvd_v6_0_set_sw_clock_gating(struct amdgpu_device *adev)
1310{
1311 uint32_t data, data2;
1312
1313 data = RREG32(mmUVD_CGC_CTRL);
1314 data2 = RREG32(mmUVD_SUVD_CGC_CTRL);
1315
1316
1317 data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK |
1318 UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
1319
1320
1321 data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
1322 (1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) |
1323 (4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY));
1324
1325 data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
1326 UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
1327 UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
1328 UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
1329 UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
1330 UVD_CGC_CTRL__SYS_MODE_MASK |
1331 UVD_CGC_CTRL__UDEC_MODE_MASK |
1332 UVD_CGC_CTRL__MPEG2_MODE_MASK |
1333 UVD_CGC_CTRL__REGS_MODE_MASK |
1334 UVD_CGC_CTRL__RBC_MODE_MASK |
1335 UVD_CGC_CTRL__LMI_MC_MODE_MASK |
1336 UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
1337 UVD_CGC_CTRL__IDCT_MODE_MASK |
1338 UVD_CGC_CTRL__MPRD_MODE_MASK |
1339 UVD_CGC_CTRL__MPC_MODE_MASK |
1340 UVD_CGC_CTRL__LBSI_MODE_MASK |
1341 UVD_CGC_CTRL__LRBBM_MODE_MASK |
1342 UVD_CGC_CTRL__WCB_MODE_MASK |
1343 UVD_CGC_CTRL__VCPU_MODE_MASK |
1344 UVD_CGC_CTRL__JPEG_MODE_MASK |
1345 UVD_CGC_CTRL__SCPU_MODE_MASK |
1346 UVD_CGC_CTRL__JPEG2_MODE_MASK);
1347 data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK |
1348 UVD_SUVD_CGC_CTRL__SIT_MODE_MASK |
1349 UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
1350 UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
1351 UVD_SUVD_CGC_CTRL__SDB_MODE_MASK);
1352
1353 WREG32(mmUVD_CGC_CTRL, data);
1354 WREG32(mmUVD_SUVD_CGC_CTRL, data2);
1355}
1356
1357#if 0
1358static void uvd_v6_0_set_hw_clock_gating(struct amdgpu_device *adev)
1359{
1360 uint32_t data, data1, cgc_flags, suvd_flags;
1361
1362 data = RREG32(mmUVD_CGC_GATE);
1363 data1 = RREG32(mmUVD_SUVD_CGC_GATE);
1364
1365 cgc_flags = UVD_CGC_GATE__SYS_MASK |
1366 UVD_CGC_GATE__UDEC_MASK |
1367 UVD_CGC_GATE__MPEG2_MASK |
1368 UVD_CGC_GATE__RBC_MASK |
1369 UVD_CGC_GATE__LMI_MC_MASK |
1370 UVD_CGC_GATE__IDCT_MASK |
1371 UVD_CGC_GATE__MPRD_MASK |
1372 UVD_CGC_GATE__MPC_MASK |
1373 UVD_CGC_GATE__LBSI_MASK |
1374 UVD_CGC_GATE__LRBBM_MASK |
1375 UVD_CGC_GATE__UDEC_RE_MASK |
1376 UVD_CGC_GATE__UDEC_CM_MASK |
1377 UVD_CGC_GATE__UDEC_IT_MASK |
1378 UVD_CGC_GATE__UDEC_DB_MASK |
1379 UVD_CGC_GATE__UDEC_MP_MASK |
1380 UVD_CGC_GATE__WCB_MASK |
1381 UVD_CGC_GATE__VCPU_MASK |
1382 UVD_CGC_GATE__SCPU_MASK |
1383 UVD_CGC_GATE__JPEG_MASK |
1384 UVD_CGC_GATE__JPEG2_MASK;
1385
1386 suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
1387 UVD_SUVD_CGC_GATE__SIT_MASK |
1388 UVD_SUVD_CGC_GATE__SMP_MASK |
1389 UVD_SUVD_CGC_GATE__SCM_MASK |
1390 UVD_SUVD_CGC_GATE__SDB_MASK;
1391
1392 data |= cgc_flags;
1393 data1 |= suvd_flags;
1394
1395 WREG32(mmUVD_CGC_GATE, data);
1396 WREG32(mmUVD_SUVD_CGC_GATE, data1);
1397}
1398#endif
1399
1400static void uvd_v6_0_enable_mgcg(struct amdgpu_device *adev,
1401 bool enable)
1402{
1403 u32 orig, data;
1404
1405 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) {
1406 data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
1407 data |= 0xfff;
1408 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
1409
1410 orig = data = RREG32(mmUVD_CGC_CTRL);
1411 data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
1412 if (orig != data)
1413 WREG32(mmUVD_CGC_CTRL, data);
1414 } else {
1415 data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
1416 data &= ~0xfff;
1417 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
1418
1419 orig = data = RREG32(mmUVD_CGC_CTRL);
1420 data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
1421 if (orig != data)
1422 WREG32(mmUVD_CGC_CTRL, data);
1423 }
1424}
1425
1426static int uvd_v6_0_set_clockgating_state(void *handle,
1427 enum amd_clockgating_state state)
1428{
1429 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1430 bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
1431
1432 if (enable) {
1433 /* wait for STATUS to clear */
1434 if (uvd_v6_0_wait_for_idle(handle))
1435 return -EBUSY;
1436 uvd_v6_0_enable_clock_gating(adev, true);
1437 /* enable HW gates because UVD is idle */
1438/* uvd_v6_0_set_hw_clock_gating(adev); */
1439 } else {
1440 /* disable HW gating and enable Sw gating */
1441 uvd_v6_0_enable_clock_gating(adev, false);
1442 }
1443 uvd_v6_0_set_sw_clock_gating(adev);
1444 return 0;
1445}
1446
1447static int uvd_v6_0_set_powergating_state(void *handle,
1448 enum amd_powergating_state state)
1449{
1450 /* This doesn't actually powergate the UVD block.
1451 * That's done in the dpm code via the SMC. This
1452 * just re-inits the block as necessary. The actual
1453 * gating still happens in the dpm code. We should
1454 * revisit this when there is a cleaner line between
1455 * the smc and the hw blocks
1456 */
1457 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1458 int ret = 0;
1459
1460 WREG32(mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK);
1461
1462 if (state == AMD_PG_STATE_GATE) {
1463 uvd_v6_0_stop(adev);
1464 } else {
1465 ret = uvd_v6_0_start(adev);
1466 if (ret)
1467 goto out;
1468 }
1469
1470out:
1471 return ret;
1472}
1473
1474static void uvd_v6_0_get_clockgating_state(void *handle, u32 *flags)
1475{
1476 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1477 int data;
1478
1479 mutex_lock(&adev->pm.mutex);
1480
1481 if (adev->flags & AMD_IS_APU)
1482 data = RREG32_SMC(ixCURRENT_PG_STATUS_APU);
1483 else
1484 data = RREG32_SMC(ixCURRENT_PG_STATUS);
1485
1486 if (data & CURRENT_PG_STATUS__UVD_PG_STATUS_MASK) {
1487 DRM_INFO("Cannot get clockgating state when UVD is powergated.\n");
1488 goto out;
1489 }
1490
1491 /* AMD_CG_SUPPORT_UVD_MGCG */
1492 data = RREG32(mmUVD_CGC_CTRL);
1493 if (data & UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK)
1494 *flags |= AMD_CG_SUPPORT_UVD_MGCG;
1495
1496out:
1497 mutex_unlock(&adev->pm.mutex);
1498}
1499
1500static const struct amd_ip_funcs uvd_v6_0_ip_funcs = {
1501 .name = "uvd_v6_0",
1502 .early_init = uvd_v6_0_early_init,
1503 .late_init = NULL,
1504 .sw_init = uvd_v6_0_sw_init,
1505 .sw_fini = uvd_v6_0_sw_fini,
1506 .hw_init = uvd_v6_0_hw_init,
1507 .hw_fini = uvd_v6_0_hw_fini,
1508 .suspend = uvd_v6_0_suspend,
1509 .resume = uvd_v6_0_resume,
1510 .is_idle = uvd_v6_0_is_idle,
1511 .wait_for_idle = uvd_v6_0_wait_for_idle,
1512 .check_soft_reset = uvd_v6_0_check_soft_reset,
1513 .pre_soft_reset = uvd_v6_0_pre_soft_reset,
1514 .soft_reset = uvd_v6_0_soft_reset,
1515 .post_soft_reset = uvd_v6_0_post_soft_reset,
1516 .set_clockgating_state = uvd_v6_0_set_clockgating_state,
1517 .set_powergating_state = uvd_v6_0_set_powergating_state,
1518 .get_clockgating_state = uvd_v6_0_get_clockgating_state,
1519};
1520
1521static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = {
1522 .type = AMDGPU_RING_TYPE_UVD,
1523 .align_mask = 0xf,
1524 .nop = PACKET0(mmUVD_NO_OP, 0),
1525 .support_64bit_ptrs = false,
1526 .get_rptr = uvd_v6_0_ring_get_rptr,
1527 .get_wptr = uvd_v6_0_ring_get_wptr,
1528 .set_wptr = uvd_v6_0_ring_set_wptr,
1529 .parse_cs = amdgpu_uvd_ring_parse_cs,
1530 .emit_frame_size =
1531 6 + 6 + /* hdp flush / invalidate */
1532 10 + /* uvd_v6_0_ring_emit_pipeline_sync */
1533 14, /* uvd_v6_0_ring_emit_fence x1 no user fence */
1534 .emit_ib_size = 8, /* uvd_v6_0_ring_emit_ib */
1535 .emit_ib = uvd_v6_0_ring_emit_ib,
1536 .emit_fence = uvd_v6_0_ring_emit_fence,
1537 .test_ring = uvd_v6_0_ring_test_ring,
1538 .test_ib = amdgpu_uvd_ring_test_ib,
1539 .insert_nop = amdgpu_ring_insert_nop,
1540 .pad_ib = amdgpu_ring_generic_pad_ib,
1541 .begin_use = amdgpu_uvd_ring_begin_use,
1542 .end_use = amdgpu_uvd_ring_end_use,
1543 .emit_wreg = uvd_v6_0_ring_emit_wreg,
1544};
1545
1546static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
1547 .type = AMDGPU_RING_TYPE_UVD,
1548 .align_mask = 0xf,
1549 .nop = PACKET0(mmUVD_NO_OP, 0),
1550 .support_64bit_ptrs = false,
1551 .get_rptr = uvd_v6_0_ring_get_rptr,
1552 .get_wptr = uvd_v6_0_ring_get_wptr,
1553 .set_wptr = uvd_v6_0_ring_set_wptr,
1554 .emit_frame_size =
1555 6 + 6 + /* hdp flush / invalidate */
1556 10 + /* uvd_v6_0_ring_emit_pipeline_sync */
1557 VI_FLUSH_GPU_TLB_NUM_WREG * 6 + 8 + /* uvd_v6_0_ring_emit_vm_flush */
1558 14 + 14, /* uvd_v6_0_ring_emit_fence x2 vm fence */
1559 .emit_ib_size = 8, /* uvd_v6_0_ring_emit_ib */
1560 .emit_ib = uvd_v6_0_ring_emit_ib,
1561 .emit_fence = uvd_v6_0_ring_emit_fence,
1562 .emit_vm_flush = uvd_v6_0_ring_emit_vm_flush,
1563 .emit_pipeline_sync = uvd_v6_0_ring_emit_pipeline_sync,
1564 .test_ring = uvd_v6_0_ring_test_ring,
1565 .test_ib = amdgpu_uvd_ring_test_ib,
1566 .insert_nop = amdgpu_ring_insert_nop,
1567 .pad_ib = amdgpu_ring_generic_pad_ib,
1568 .begin_use = amdgpu_uvd_ring_begin_use,
1569 .end_use = amdgpu_uvd_ring_end_use,
1570 .emit_wreg = uvd_v6_0_ring_emit_wreg,
1571};
1572
1573static const struct amdgpu_ring_funcs uvd_v6_0_enc_ring_vm_funcs = {
1574 .type = AMDGPU_RING_TYPE_UVD_ENC,
1575 .align_mask = 0x3f,
1576 .nop = HEVC_ENC_CMD_NO_OP,
1577 .support_64bit_ptrs = false,
1578 .get_rptr = uvd_v6_0_enc_ring_get_rptr,
1579 .get_wptr = uvd_v6_0_enc_ring_get_wptr,
1580 .set_wptr = uvd_v6_0_enc_ring_set_wptr,
1581 .emit_frame_size =
1582 4 + /* uvd_v6_0_enc_ring_emit_pipeline_sync */
1583 5 + /* uvd_v6_0_enc_ring_emit_vm_flush */
1584 5 + 5 + /* uvd_v6_0_enc_ring_emit_fence x2 vm fence */
1585 1, /* uvd_v6_0_enc_ring_insert_end */
1586 .emit_ib_size = 5, /* uvd_v6_0_enc_ring_emit_ib */
1587 .emit_ib = uvd_v6_0_enc_ring_emit_ib,
1588 .emit_fence = uvd_v6_0_enc_ring_emit_fence,
1589 .emit_vm_flush = uvd_v6_0_enc_ring_emit_vm_flush,
1590 .emit_pipeline_sync = uvd_v6_0_enc_ring_emit_pipeline_sync,
1591 .test_ring = uvd_v6_0_enc_ring_test_ring,
1592 .test_ib = uvd_v6_0_enc_ring_test_ib,
1593 .insert_nop = amdgpu_ring_insert_nop,
1594 .insert_end = uvd_v6_0_enc_ring_insert_end,
1595 .pad_ib = amdgpu_ring_generic_pad_ib,
1596 .begin_use = amdgpu_uvd_ring_begin_use,
1597 .end_use = amdgpu_uvd_ring_end_use,
1598};
1599
1600static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev)
1601{
1602 if (adev->asic_type >= CHIP_POLARIS10) {
1603 adev->uvd.ring.funcs = &uvd_v6_0_ring_vm_funcs;
1604 DRM_INFO("UVD is enabled in VM mode\n");
1605 } else {
1606 adev->uvd.ring.funcs = &uvd_v6_0_ring_phys_funcs;
1607 DRM_INFO("UVD is enabled in physical mode\n");
1608 }
1609}
1610
1611static void uvd_v6_0_set_enc_ring_funcs(struct amdgpu_device *adev)
1612{
1613 int i;
1614
1615 for (i = 0; i < adev->uvd.num_enc_rings; ++i)
1616 adev->uvd.ring_enc[i].funcs = &uvd_v6_0_enc_ring_vm_funcs;
1617
1618 DRM_INFO("UVD ENC is enabled in VM mode\n");
1619}
1620
1621static const struct amdgpu_irq_src_funcs uvd_v6_0_irq_funcs = {
1622 .set = uvd_v6_0_set_interrupt_state,
1623 .process = uvd_v6_0_process_interrupt,
1624};
1625
1626static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev)
1627{
1628 if (uvd_v6_0_enc_support(adev))
1629 adev->uvd.irq.num_types = adev->uvd.num_enc_rings + 1;
1630 else
1631 adev->uvd.irq.num_types = 1;
1632
1633 adev->uvd.irq.funcs = &uvd_v6_0_irq_funcs;
1634}
1635
1636const struct amdgpu_ip_block_version uvd_v6_0_ip_block =
1637{
1638 .type = AMD_IP_BLOCK_TYPE_UVD,
1639 .major = 6,
1640 .minor = 0,
1641 .rev = 0,
1642 .funcs = &uvd_v6_0_ip_funcs,
1643};
1644
1645const struct amdgpu_ip_block_version uvd_v6_2_ip_block =
1646{
1647 .type = AMD_IP_BLOCK_TYPE_UVD,
1648 .major = 6,
1649 .minor = 2,
1650 .rev = 0,
1651 .funcs = &uvd_v6_0_ip_funcs,
1652};
1653
1654const struct amdgpu_ip_block_version uvd_v6_3_ip_block =
1655{
1656 .type = AMD_IP_BLOCK_TYPE_UVD,
1657 .major = 6,
1658 .minor = 3,
1659 .rev = 0,
1660 .funcs = &uvd_v6_0_ip_funcs,
1661};