Loading...
1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/firmware.h>
25
26#include "amdgpu.h"
27#include "amdgpu_uvd.h"
28#include "amdgpu_cs.h"
29#include "soc15.h"
30#include "soc15d.h"
31#include "soc15_common.h"
32#include "mmsch_v1_0.h"
33
34#include "uvd/uvd_7_0_offset.h"
35#include "uvd/uvd_7_0_sh_mask.h"
36#include "vce/vce_4_0_offset.h"
37#include "vce/vce_4_0_default.h"
38#include "vce/vce_4_0_sh_mask.h"
39#include "nbif/nbif_6_1_offset.h"
40#include "mmhub/mmhub_1_0_offset.h"
41#include "mmhub/mmhub_1_0_sh_mask.h"
42#include "ivsrcid/uvd/irqsrcs_uvd_7_0.h"
43
44#define mmUVD_PG0_CC_UVD_HARVESTING 0x00c7
45#define mmUVD_PG0_CC_UVD_HARVESTING_BASE_IDX 1
46//UVD_PG0_CC_UVD_HARVESTING
47#define UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE__SHIFT 0x1
48#define UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE_MASK 0x00000002L
49
50#define UVD7_MAX_HW_INSTANCES_VEGA20 2
51
52static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev);
53static void uvd_v7_0_set_enc_ring_funcs(struct amdgpu_device *adev);
54static void uvd_v7_0_set_irq_funcs(struct amdgpu_device *adev);
55static int uvd_v7_0_start(struct amdgpu_device *adev);
56static void uvd_v7_0_stop(struct amdgpu_device *adev);
57static int uvd_v7_0_sriov_start(struct amdgpu_device *adev);
58
59static int amdgpu_ih_clientid_uvds[] = {
60 SOC15_IH_CLIENTID_UVD,
61 SOC15_IH_CLIENTID_UVD1
62};
63
64/**
65 * uvd_v7_0_ring_get_rptr - get read pointer
66 *
67 * @ring: amdgpu_ring pointer
68 *
69 * Returns the current hardware read pointer
70 */
71static uint64_t uvd_v7_0_ring_get_rptr(struct amdgpu_ring *ring)
72{
73 struct amdgpu_device *adev = ring->adev;
74
75 return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_RPTR);
76}
77
78/**
79 * uvd_v7_0_enc_ring_get_rptr - get enc read pointer
80 *
81 * @ring: amdgpu_ring pointer
82 *
83 * Returns the current hardware enc read pointer
84 */
85static uint64_t uvd_v7_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
86{
87 struct amdgpu_device *adev = ring->adev;
88
89 if (ring == &adev->uvd.inst[ring->me].ring_enc[0])
90 return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR);
91 else
92 return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR2);
93}
94
95/**
96 * uvd_v7_0_ring_get_wptr - get write pointer
97 *
98 * @ring: amdgpu_ring pointer
99 *
100 * Returns the current hardware write pointer
101 */
102static uint64_t uvd_v7_0_ring_get_wptr(struct amdgpu_ring *ring)
103{
104 struct amdgpu_device *adev = ring->adev;
105
106 return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR);
107}
108
109/**
110 * uvd_v7_0_enc_ring_get_wptr - get enc write pointer
111 *
112 * @ring: amdgpu_ring pointer
113 *
114 * Returns the current hardware enc write pointer
115 */
116static uint64_t uvd_v7_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
117{
118 struct amdgpu_device *adev = ring->adev;
119
120 if (ring->use_doorbell)
121 return *ring->wptr_cpu_addr;
122
123 if (ring == &adev->uvd.inst[ring->me].ring_enc[0])
124 return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR);
125 else
126 return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2);
127}
128
129/**
130 * uvd_v7_0_ring_set_wptr - set write pointer
131 *
132 * @ring: amdgpu_ring pointer
133 *
134 * Commits the write pointer to the hardware
135 */
136static void uvd_v7_0_ring_set_wptr(struct amdgpu_ring *ring)
137{
138 struct amdgpu_device *adev = ring->adev;
139
140 WREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
141}
142
143/**
144 * uvd_v7_0_enc_ring_set_wptr - set enc write pointer
145 *
146 * @ring: amdgpu_ring pointer
147 *
148 * Commits the enc write pointer to the hardware
149 */
150static void uvd_v7_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
151{
152 struct amdgpu_device *adev = ring->adev;
153
154 if (ring->use_doorbell) {
155 /* XXX check if swapping is necessary on BE */
156 *ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
157 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
158 return;
159 }
160
161 if (ring == &adev->uvd.inst[ring->me].ring_enc[0])
162 WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR,
163 lower_32_bits(ring->wptr));
164 else
165 WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2,
166 lower_32_bits(ring->wptr));
167}
168
169/**
170 * uvd_v7_0_enc_ring_test_ring - test if UVD ENC ring is working
171 *
172 * @ring: the engine to test on
173 *
174 */
175static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring)
176{
177 struct amdgpu_device *adev = ring->adev;
178 uint32_t rptr;
179 unsigned i;
180 int r;
181
182 if (amdgpu_sriov_vf(adev))
183 return 0;
184
185 r = amdgpu_ring_alloc(ring, 16);
186 if (r)
187 return r;
188
189 rptr = amdgpu_ring_get_rptr(ring);
190
191 amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
192 amdgpu_ring_commit(ring);
193
194 for (i = 0; i < adev->usec_timeout; i++) {
195 if (amdgpu_ring_get_rptr(ring) != rptr)
196 break;
197 udelay(1);
198 }
199
200 if (i >= adev->usec_timeout)
201 r = -ETIMEDOUT;
202
203 return r;
204}
205
206/**
207 * uvd_v7_0_enc_get_create_msg - generate a UVD ENC create msg
208 *
209 * @ring: ring we should submit the msg to
210 * @handle: session handle to use
211 * @bo: amdgpu object for which we query the offset
212 * @fence: optional fence to return
213 *
214 * Open up a stream for HW test
215 */
216static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, u32 handle,
217 struct amdgpu_bo *bo,
218 struct dma_fence **fence)
219{
220 const unsigned ib_size_dw = 16;
221 struct amdgpu_job *job;
222 struct amdgpu_ib *ib;
223 struct dma_fence *f = NULL;
224 uint64_t addr;
225 int i, r;
226
227 r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4,
228 AMDGPU_IB_POOL_DIRECT, &job);
229 if (r)
230 return r;
231
232 ib = &job->ibs[0];
233 addr = amdgpu_bo_gpu_offset(bo);
234
235 ib->length_dw = 0;
236 ib->ptr[ib->length_dw++] = 0x00000018;
237 ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
238 ib->ptr[ib->length_dw++] = handle;
239 ib->ptr[ib->length_dw++] = 0x00000000;
240 ib->ptr[ib->length_dw++] = upper_32_bits(addr);
241 ib->ptr[ib->length_dw++] = addr;
242
243 ib->ptr[ib->length_dw++] = 0x00000014;
244 ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
245 ib->ptr[ib->length_dw++] = 0x0000001c;
246 ib->ptr[ib->length_dw++] = 0x00000000;
247 ib->ptr[ib->length_dw++] = 0x00000000;
248
249 ib->ptr[ib->length_dw++] = 0x00000008;
250 ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */
251
252 for (i = ib->length_dw; i < ib_size_dw; ++i)
253 ib->ptr[i] = 0x0;
254
255 r = amdgpu_job_submit_direct(job, ring, &f);
256 if (r)
257 goto err;
258
259 if (fence)
260 *fence = dma_fence_get(f);
261 dma_fence_put(f);
262 return 0;
263
264err:
265 amdgpu_job_free(job);
266 return r;
267}
268
269/**
270 * uvd_v7_0_enc_get_destroy_msg - generate a UVD ENC destroy msg
271 *
272 * @ring: ring we should submit the msg to
273 * @handle: session handle to use
274 * @bo: amdgpu object for which we query the offset
275 * @fence: optional fence to return
276 *
277 * Close up a stream for HW test or if userspace failed to do so
278 */
279static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, u32 handle,
280 struct amdgpu_bo *bo,
281 struct dma_fence **fence)
282{
283 const unsigned ib_size_dw = 16;
284 struct amdgpu_job *job;
285 struct amdgpu_ib *ib;
286 struct dma_fence *f = NULL;
287 uint64_t addr;
288 int i, r;
289
290 r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4,
291 AMDGPU_IB_POOL_DIRECT, &job);
292 if (r)
293 return r;
294
295 ib = &job->ibs[0];
296 addr = amdgpu_bo_gpu_offset(bo);
297
298 ib->length_dw = 0;
299 ib->ptr[ib->length_dw++] = 0x00000018;
300 ib->ptr[ib->length_dw++] = 0x00000001;
301 ib->ptr[ib->length_dw++] = handle;
302 ib->ptr[ib->length_dw++] = 0x00000000;
303 ib->ptr[ib->length_dw++] = upper_32_bits(addr);
304 ib->ptr[ib->length_dw++] = addr;
305
306 ib->ptr[ib->length_dw++] = 0x00000014;
307 ib->ptr[ib->length_dw++] = 0x00000002;
308 ib->ptr[ib->length_dw++] = 0x0000001c;
309 ib->ptr[ib->length_dw++] = 0x00000000;
310 ib->ptr[ib->length_dw++] = 0x00000000;
311
312 ib->ptr[ib->length_dw++] = 0x00000008;
313 ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */
314
315 for (i = ib->length_dw; i < ib_size_dw; ++i)
316 ib->ptr[i] = 0x0;
317
318 r = amdgpu_job_submit_direct(job, ring, &f);
319 if (r)
320 goto err;
321
322 if (fence)
323 *fence = dma_fence_get(f);
324 dma_fence_put(f);
325 return 0;
326
327err:
328 amdgpu_job_free(job);
329 return r;
330}
331
332/**
333 * uvd_v7_0_enc_ring_test_ib - test if UVD ENC IBs are working
334 *
335 * @ring: the engine to test on
336 * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
337 *
338 */
339static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
340{
341 struct dma_fence *fence = NULL;
342 struct amdgpu_bo *bo = ring->adev->uvd.ib_bo;
343 long r;
344
345 r = uvd_v7_0_enc_get_create_msg(ring, 1, bo, NULL);
346 if (r)
347 goto error;
348
349 r = uvd_v7_0_enc_get_destroy_msg(ring, 1, bo, &fence);
350 if (r)
351 goto error;
352
353 r = dma_fence_wait_timeout(fence, false, timeout);
354 if (r == 0)
355 r = -ETIMEDOUT;
356 else if (r > 0)
357 r = 0;
358
359error:
360 dma_fence_put(fence);
361 return r;
362}
363
364static int uvd_v7_0_early_init(void *handle)
365{
366 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
367
368 if (adev->asic_type == CHIP_VEGA20) {
369 u32 harvest;
370 int i;
371
372 adev->uvd.num_uvd_inst = UVD7_MAX_HW_INSTANCES_VEGA20;
373 for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
374 harvest = RREG32_SOC15(UVD, i, mmUVD_PG0_CC_UVD_HARVESTING);
375 if (harvest & UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE_MASK) {
376 adev->uvd.harvest_config |= 1 << i;
377 }
378 }
379 if (adev->uvd.harvest_config == (AMDGPU_UVD_HARVEST_UVD0 |
380 AMDGPU_UVD_HARVEST_UVD1))
381 /* both instances are harvested, disable the block */
382 return -ENOENT;
383 } else {
384 adev->uvd.num_uvd_inst = 1;
385 }
386
387 if (amdgpu_sriov_vf(adev))
388 adev->uvd.num_enc_rings = 1;
389 else
390 adev->uvd.num_enc_rings = 2;
391 uvd_v7_0_set_ring_funcs(adev);
392 uvd_v7_0_set_enc_ring_funcs(adev);
393 uvd_v7_0_set_irq_funcs(adev);
394
395 return 0;
396}
397
398static int uvd_v7_0_sw_init(void *handle)
399{
400 struct amdgpu_ring *ring;
401
402 int i, j, r;
403 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
404
405 for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
406 if (adev->uvd.harvest_config & (1 << j))
407 continue;
408 /* UVD TRAP */
409 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_uvds[j], UVD_7_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->uvd.inst[j].irq);
410 if (r)
411 return r;
412
413 /* UVD ENC TRAP */
414 for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
415 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_uvds[j], i + UVD_7_0__SRCID__UVD_ENC_GEN_PURP, &adev->uvd.inst[j].irq);
416 if (r)
417 return r;
418 }
419 }
420
421 r = amdgpu_uvd_sw_init(adev);
422 if (r)
423 return r;
424
425 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
426 const struct common_firmware_header *hdr;
427 hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
428 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].ucode_id = AMDGPU_UCODE_ID_UVD;
429 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].fw = adev->uvd.fw;
430 adev->firmware.fw_size +=
431 ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
432
433 if (adev->uvd.num_uvd_inst == UVD7_MAX_HW_INSTANCES_VEGA20) {
434 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].ucode_id = AMDGPU_UCODE_ID_UVD1;
435 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].fw = adev->uvd.fw;
436 adev->firmware.fw_size +=
437 ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
438 }
439 DRM_INFO("PSP loading UVD firmware\n");
440 }
441
442 for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
443 if (adev->uvd.harvest_config & (1 << j))
444 continue;
445 if (!amdgpu_sriov_vf(adev)) {
446 ring = &adev->uvd.inst[j].ring;
447 ring->vm_hub = AMDGPU_MMHUB0(0);
448 sprintf(ring->name, "uvd_%d", ring->me);
449 r = amdgpu_ring_init(adev, ring, 512,
450 &adev->uvd.inst[j].irq, 0,
451 AMDGPU_RING_PRIO_DEFAULT, NULL);
452 if (r)
453 return r;
454 }
455
456 for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
457 ring = &adev->uvd.inst[j].ring_enc[i];
458 ring->vm_hub = AMDGPU_MMHUB0(0);
459 sprintf(ring->name, "uvd_enc_%d.%d", ring->me, i);
460 if (amdgpu_sriov_vf(adev)) {
461 ring->use_doorbell = true;
462
463 /* currently only use the first enconding ring for
464 * sriov, so set unused location for other unused rings.
465 */
466 if (i == 0)
467 ring->doorbell_index = adev->doorbell_index.uvd_vce.uvd_ring0_1 * 2;
468 else
469 ring->doorbell_index = adev->doorbell_index.uvd_vce.uvd_ring2_3 * 2 + 1;
470 }
471 r = amdgpu_ring_init(adev, ring, 512,
472 &adev->uvd.inst[j].irq, 0,
473 AMDGPU_RING_PRIO_DEFAULT, NULL);
474 if (r)
475 return r;
476 }
477 }
478
479 r = amdgpu_uvd_resume(adev);
480 if (r)
481 return r;
482
483 r = amdgpu_virt_alloc_mm_table(adev);
484 if (r)
485 return r;
486
487 return r;
488}
489
490static int uvd_v7_0_sw_fini(void *handle)
491{
492 int i, j, r;
493 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
494
495 amdgpu_virt_free_mm_table(adev);
496
497 r = amdgpu_uvd_suspend(adev);
498 if (r)
499 return r;
500
501 for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
502 if (adev->uvd.harvest_config & (1 << j))
503 continue;
504 for (i = 0; i < adev->uvd.num_enc_rings; ++i)
505 amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]);
506 }
507 return amdgpu_uvd_sw_fini(adev);
508}
509
510/**
511 * uvd_v7_0_hw_init - start and test UVD block
512 *
513 * @handle: handle used to pass amdgpu_device pointer
514 *
515 * Initialize the hardware, boot up the VCPU and do some testing
516 */
517static int uvd_v7_0_hw_init(void *handle)
518{
519 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
520 struct amdgpu_ring *ring;
521 uint32_t tmp;
522 int i, j, r;
523
524 if (amdgpu_sriov_vf(adev))
525 r = uvd_v7_0_sriov_start(adev);
526 else
527 r = uvd_v7_0_start(adev);
528 if (r)
529 goto done;
530
531 for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
532 if (adev->uvd.harvest_config & (1 << j))
533 continue;
534 ring = &adev->uvd.inst[j].ring;
535
536 if (!amdgpu_sriov_vf(adev)) {
537 r = amdgpu_ring_test_helper(ring);
538 if (r)
539 goto done;
540
541 r = amdgpu_ring_alloc(ring, 10);
542 if (r) {
543 DRM_ERROR("amdgpu: (%d)ring failed to lock UVD ring (%d).\n", j, r);
544 goto done;
545 }
546
547 tmp = PACKET0(SOC15_REG_OFFSET(UVD, j,
548 mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL), 0);
549 amdgpu_ring_write(ring, tmp);
550 amdgpu_ring_write(ring, 0xFFFFF);
551
552 tmp = PACKET0(SOC15_REG_OFFSET(UVD, j,
553 mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL), 0);
554 amdgpu_ring_write(ring, tmp);
555 amdgpu_ring_write(ring, 0xFFFFF);
556
557 tmp = PACKET0(SOC15_REG_OFFSET(UVD, j,
558 mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL), 0);
559 amdgpu_ring_write(ring, tmp);
560 amdgpu_ring_write(ring, 0xFFFFF);
561
562 /* Clear timeout status bits */
563 amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, j,
564 mmUVD_SEMA_TIMEOUT_STATUS), 0));
565 amdgpu_ring_write(ring, 0x8);
566
567 amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, j,
568 mmUVD_SEMA_CNTL), 0));
569 amdgpu_ring_write(ring, 3);
570
571 amdgpu_ring_commit(ring);
572 }
573
574 for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
575 ring = &adev->uvd.inst[j].ring_enc[i];
576 r = amdgpu_ring_test_helper(ring);
577 if (r)
578 goto done;
579 }
580 }
581done:
582 if (!r)
583 DRM_INFO("UVD and UVD ENC initialized successfully.\n");
584
585 return r;
586}
587
588/**
589 * uvd_v7_0_hw_fini - stop the hardware block
590 *
591 * @handle: handle used to pass amdgpu_device pointer
592 *
593 * Stop the UVD block, mark ring as not ready any more
594 */
595static int uvd_v7_0_hw_fini(void *handle)
596{
597 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
598
599 cancel_delayed_work_sync(&adev->uvd.idle_work);
600
601 if (!amdgpu_sriov_vf(adev))
602 uvd_v7_0_stop(adev);
603 else {
604 /* full access mode, so don't touch any UVD register */
605 DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
606 }
607
608 return 0;
609}
610
611static int uvd_v7_0_prepare_suspend(void *handle)
612{
613 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
614
615 return amdgpu_uvd_prepare_suspend(adev);
616}
617
618static int uvd_v7_0_suspend(void *handle)
619{
620 int r;
621 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
622
623 /*
624 * Proper cleanups before halting the HW engine:
625 * - cancel the delayed idle work
626 * - enable powergating
627 * - enable clockgating
628 * - disable dpm
629 *
630 * TODO: to align with the VCN implementation, move the
631 * jobs for clockgating/powergating/dpm setting to
632 * ->set_powergating_state().
633 */
634 cancel_delayed_work_sync(&adev->uvd.idle_work);
635
636 if (adev->pm.dpm_enabled) {
637 amdgpu_dpm_enable_uvd(adev, false);
638 } else {
639 amdgpu_asic_set_uvd_clocks(adev, 0, 0);
640 /* shutdown the UVD block */
641 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
642 AMD_PG_STATE_GATE);
643 amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
644 AMD_CG_STATE_GATE);
645 }
646
647 r = uvd_v7_0_hw_fini(adev);
648 if (r)
649 return r;
650
651 return amdgpu_uvd_suspend(adev);
652}
653
654static int uvd_v7_0_resume(void *handle)
655{
656 int r;
657 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
658
659 r = amdgpu_uvd_resume(adev);
660 if (r)
661 return r;
662
663 return uvd_v7_0_hw_init(adev);
664}
665
666/**
667 * uvd_v7_0_mc_resume - memory controller programming
668 *
669 * @adev: amdgpu_device pointer
670 *
671 * Let the UVD memory controller know it's offsets
672 */
673static void uvd_v7_0_mc_resume(struct amdgpu_device *adev)
674{
675 uint32_t size = AMDGPU_UVD_FIRMWARE_SIZE(adev);
676 uint32_t offset;
677 int i;
678
679 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
680 if (adev->uvd.harvest_config & (1 << i))
681 continue;
682 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
683 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
684 i == 0 ?
685 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_lo :
686 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].tmr_mc_addr_lo);
687 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
688 i == 0 ?
689 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_hi :
690 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].tmr_mc_addr_hi);
691 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0, 0);
692 offset = 0;
693 } else {
694 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
695 lower_32_bits(adev->uvd.inst[i].gpu_addr));
696 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
697 upper_32_bits(adev->uvd.inst[i].gpu_addr));
698 offset = size;
699 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0,
700 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
701 }
702
703 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE0, size);
704
705 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
706 lower_32_bits(adev->uvd.inst[i].gpu_addr + offset));
707 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
708 upper_32_bits(adev->uvd.inst[i].gpu_addr + offset));
709 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET1, (1 << 21));
710 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_UVD_HEAP_SIZE);
711
712 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
713 lower_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
714 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
715 upper_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
716 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET2, (2 << 21));
717 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE2,
718 AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40));
719
720 WREG32_SOC15(UVD, i, mmUVD_UDEC_ADDR_CONFIG,
721 adev->gfx.config.gb_addr_config);
722 WREG32_SOC15(UVD, i, mmUVD_UDEC_DB_ADDR_CONFIG,
723 adev->gfx.config.gb_addr_config);
724 WREG32_SOC15(UVD, i, mmUVD_UDEC_DBW_ADDR_CONFIG,
725 adev->gfx.config.gb_addr_config);
726
727 WREG32_SOC15(UVD, i, mmUVD_GP_SCRATCH4, adev->uvd.max_handles);
728 }
729}
730
731static int uvd_v7_0_mmsch_start(struct amdgpu_device *adev,
732 struct amdgpu_mm_table *table)
733{
734 uint32_t data = 0, loop;
735 uint64_t addr = table->gpu_addr;
736 struct mmsch_v1_0_init_header *header = (struct mmsch_v1_0_init_header *)table->cpu_addr;
737 uint32_t size;
738 int i;
739
740 size = header->header_size + header->vce_table_size + header->uvd_table_size;
741
742 /* 1, write to vce_mmsch_vf_ctx_addr_lo/hi register with GPU mc addr of memory descriptor location */
743 WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_CTX_ADDR_LO, lower_32_bits(addr));
744 WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_CTX_ADDR_HI, upper_32_bits(addr));
745
746 /* 2, update vmid of descriptor */
747 data = RREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_VMID);
748 data &= ~VCE_MMSCH_VF_VMID__VF_CTX_VMID_MASK;
749 data |= (0 << VCE_MMSCH_VF_VMID__VF_CTX_VMID__SHIFT); /* use domain0 for MM scheduler */
750 WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_VMID, data);
751
752 /* 3, notify mmsch about the size of this descriptor */
753 WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_CTX_SIZE, size);
754
755 /* 4, set resp to zero */
756 WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP, 0);
757
758 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
759 if (adev->uvd.harvest_config & (1 << i))
760 continue;
761 WDOORBELL32(adev->uvd.inst[i].ring_enc[0].doorbell_index, 0);
762 *adev->uvd.inst[i].ring_enc[0].wptr_cpu_addr = 0;
763 adev->uvd.inst[i].ring_enc[0].wptr = 0;
764 adev->uvd.inst[i].ring_enc[0].wptr_old = 0;
765 }
766 /* 5, kick off the initialization and wait until VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero */
767 WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_HOST, 0x10000001);
768
769 data = RREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP);
770 loop = 1000;
771 while ((data & 0x10000002) != 0x10000002) {
772 udelay(10);
773 data = RREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP);
774 loop--;
775 if (!loop)
776 break;
777 }
778
779 if (!loop) {
780 dev_err(adev->dev, "failed to init MMSCH, mmVCE_MMSCH_VF_MAILBOX_RESP = %x\n", data);
781 return -EBUSY;
782 }
783
784 return 0;
785}
786
787static int uvd_v7_0_sriov_start(struct amdgpu_device *adev)
788{
789 struct amdgpu_ring *ring;
790 uint32_t offset, size, tmp;
791 uint32_t table_size = 0;
792 struct mmsch_v1_0_cmd_direct_write direct_wt = { {0} };
793 struct mmsch_v1_0_cmd_direct_read_modify_write direct_rd_mod_wt = { {0} };
794 struct mmsch_v1_0_cmd_direct_polling direct_poll = { {0} };
795 struct mmsch_v1_0_cmd_end end = { {0} };
796 uint32_t *init_table = adev->virt.mm_table.cpu_addr;
797 struct mmsch_v1_0_init_header *header = (struct mmsch_v1_0_init_header *)init_table;
798 uint8_t i = 0;
799
800 direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE;
801 direct_rd_mod_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
802 direct_poll.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_POLLING;
803 end.cmd_header.command_type = MMSCH_COMMAND__END;
804
805 if (header->uvd_table_offset == 0 && header->uvd_table_size == 0) {
806 header->version = MMSCH_VERSION;
807 header->header_size = sizeof(struct mmsch_v1_0_init_header) >> 2;
808
809 if (header->vce_table_offset == 0 && header->vce_table_size == 0)
810 header->uvd_table_offset = header->header_size;
811 else
812 header->uvd_table_offset = header->vce_table_size + header->vce_table_offset;
813
814 init_table += header->uvd_table_offset;
815
816 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
817 if (adev->uvd.harvest_config & (1 << i))
818 continue;
819 ring = &adev->uvd.inst[i].ring;
820 ring->wptr = 0;
821 size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4);
822
823 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS),
824 0xFFFFFFFF, 0x00000004);
825 /* mc resume*/
826 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
827 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i,
828 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
829 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_lo);
830 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i,
831 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
832 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_hi);
833 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0);
834 offset = 0;
835 } else {
836 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
837 lower_32_bits(adev->uvd.inst[i].gpu_addr));
838 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
839 upper_32_bits(adev->uvd.inst[i].gpu_addr));
840 offset = size;
841 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0),
842 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
843
844 }
845
846 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE0), size);
847
848 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
849 lower_32_bits(adev->uvd.inst[i].gpu_addr + offset));
850 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
851 upper_32_bits(adev->uvd.inst[i].gpu_addr + offset));
852 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET1), (1 << 21));
853 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_UVD_HEAP_SIZE);
854
855 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
856 lower_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
857 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
858 upper_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
859 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET2), (2 << 21));
860 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE2),
861 AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40));
862
863 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_GP_SCRATCH4), adev->uvd.max_handles);
864 /* mc resume end*/
865
866 /* disable clock gating */
867 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_CGC_CTRL),
868 ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK, 0);
869
870 /* disable interupt */
871 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN),
872 ~UVD_MASTINT_EN__VCPU_EN_MASK, 0);
873
874 /* stall UMC and register bus before resetting VCPU */
875 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2),
876 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
877 UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
878
879 /* put LMI, VCPU, RBC etc... into reset */
880 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_SOFT_RESET),
881 (uint32_t)(UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
882 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
883 UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
884 UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
885 UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
886 UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
887 UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
888 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK));
889
890 /* initialize UVD memory controller */
891 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL),
892 (uint32_t)((0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
893 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
894 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
895 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
896 UVD_LMI_CTRL__REQ_MODE_MASK |
897 0x00100000L));
898
899 /* take all subblocks out of reset, except VCPU */
900 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_SOFT_RESET),
901 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
902
903 /* enable VCPU clock */
904 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL),
905 UVD_VCPU_CNTL__CLK_EN_MASK);
906
907 /* enable master interrupt */
908 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN),
909 ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
910 (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
911
912 /* clear the bit 4 of UVD_STATUS */
913 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS),
914 ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT), 0);
915
916 /* force RBC into idle state */
917 size = order_base_2(ring->ring_size);
918 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, size);
919 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
920 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RBC_RB_CNTL), tmp);
921
922 ring = &adev->uvd.inst[i].ring_enc[0];
923 ring->wptr = 0;
924 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_LO), ring->gpu_addr);
925 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_HI), upper_32_bits(ring->gpu_addr));
926 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_SIZE), ring->ring_size / 4);
927
928 /* boot up the VCPU */
929 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_SOFT_RESET), 0);
930
931 /* enable UMC */
932 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2),
933 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, 0);
934
935 MMSCH_V1_0_INSERT_DIRECT_POLL(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS), 0x02, 0x02);
936 }
937 /* add end packet */
938 memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end));
939 table_size += sizeof(struct mmsch_v1_0_cmd_end) / 4;
940 header->uvd_table_size = table_size;
941
942 }
943 return uvd_v7_0_mmsch_start(adev, &adev->virt.mm_table);
944}
945
946/**
947 * uvd_v7_0_start - start UVD block
948 *
949 * @adev: amdgpu_device pointer
950 *
951 * Setup and start the UVD block
952 */
953static int uvd_v7_0_start(struct amdgpu_device *adev)
954{
955 struct amdgpu_ring *ring;
956 uint32_t rb_bufsz, tmp;
957 uint32_t lmi_swap_cntl;
958 uint32_t mp_swap_cntl;
959 int i, j, k, r;
960
961 for (k = 0; k < adev->uvd.num_uvd_inst; ++k) {
962 if (adev->uvd.harvest_config & (1 << k))
963 continue;
964 /* disable DPG */
965 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_POWER_STATUS), 0,
966 ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
967 }
968
969 /* disable byte swapping */
970 lmi_swap_cntl = 0;
971 mp_swap_cntl = 0;
972
973 uvd_v7_0_mc_resume(adev);
974
975 for (k = 0; k < adev->uvd.num_uvd_inst; ++k) {
976 if (adev->uvd.harvest_config & (1 << k))
977 continue;
978 ring = &adev->uvd.inst[k].ring;
979 /* disable clock gating */
980 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_CGC_CTRL), 0,
981 ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK);
982
983 /* disable interupt */
984 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_MASTINT_EN), 0,
985 ~UVD_MASTINT_EN__VCPU_EN_MASK);
986
987 /* stall UMC and register bus before resetting VCPU */
988 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_LMI_CTRL2),
989 UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
990 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
991 mdelay(1);
992
993 /* put LMI, VCPU, RBC etc... into reset */
994 WREG32_SOC15(UVD, k, mmUVD_SOFT_RESET,
995 UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
996 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
997 UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
998 UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
999 UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
1000 UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
1001 UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
1002 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
1003 mdelay(5);
1004
1005 /* initialize UVD memory controller */
1006 WREG32_SOC15(UVD, k, mmUVD_LMI_CTRL,
1007 (0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
1008 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
1009 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
1010 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
1011 UVD_LMI_CTRL__REQ_MODE_MASK |
1012 0x00100000L);
1013
1014#ifdef __BIG_ENDIAN
1015 /* swap (8 in 32) RB and IB */
1016 lmi_swap_cntl = 0xa;
1017 mp_swap_cntl = 0;
1018#endif
1019 WREG32_SOC15(UVD, k, mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
1020 WREG32_SOC15(UVD, k, mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
1021
1022 WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXA0, 0x40c2040);
1023 WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXA1, 0x0);
1024 WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXB0, 0x40c2040);
1025 WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXB1, 0x0);
1026 WREG32_SOC15(UVD, k, mmUVD_MPC_SET_ALU, 0);
1027 WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUX, 0x88);
1028
1029 /* take all subblocks out of reset, except VCPU */
1030 WREG32_SOC15(UVD, k, mmUVD_SOFT_RESET,
1031 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1032 mdelay(5);
1033
1034 /* enable VCPU clock */
1035 WREG32_SOC15(UVD, k, mmUVD_VCPU_CNTL,
1036 UVD_VCPU_CNTL__CLK_EN_MASK);
1037
1038 /* enable UMC */
1039 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_LMI_CTRL2), 0,
1040 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1041
1042 /* boot up the VCPU */
1043 WREG32_SOC15(UVD, k, mmUVD_SOFT_RESET, 0);
1044 mdelay(10);
1045
1046 for (i = 0; i < 10; ++i) {
1047 uint32_t status;
1048
1049 for (j = 0; j < 100; ++j) {
1050 status = RREG32_SOC15(UVD, k, mmUVD_STATUS);
1051 if (status & 2)
1052 break;
1053 mdelay(10);
1054 }
1055 r = 0;
1056 if (status & 2)
1057 break;
1058
1059 DRM_ERROR("UVD(%d) not responding, trying to reset the VCPU!!!\n", k);
1060 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_SOFT_RESET),
1061 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
1062 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1063 mdelay(10);
1064 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_SOFT_RESET), 0,
1065 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1066 mdelay(10);
1067 r = -1;
1068 }
1069
1070 if (r) {
1071 DRM_ERROR("UVD(%d) not responding, giving up!!!\n", k);
1072 return r;
1073 }
1074 /* enable master interrupt */
1075 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_MASTINT_EN),
1076 (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
1077 ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
1078
1079 /* clear the bit 4 of UVD_STATUS */
1080 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_STATUS), 0,
1081 ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
1082
1083 /* force RBC into idle state */
1084 rb_bufsz = order_base_2(ring->ring_size);
1085 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
1086 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1087 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1088 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
1089 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1090 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1091 WREG32_SOC15(UVD, k, mmUVD_RBC_RB_CNTL, tmp);
1092
1093 /* set the write pointer delay */
1094 WREG32_SOC15(UVD, k, mmUVD_RBC_RB_WPTR_CNTL, 0);
1095
1096 /* set the wb address */
1097 WREG32_SOC15(UVD, k, mmUVD_RBC_RB_RPTR_ADDR,
1098 (upper_32_bits(ring->gpu_addr) >> 2));
1099
1100 /* program the RB_BASE for ring buffer */
1101 WREG32_SOC15(UVD, k, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
1102 lower_32_bits(ring->gpu_addr));
1103 WREG32_SOC15(UVD, k, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
1104 upper_32_bits(ring->gpu_addr));
1105
1106 /* Initialize the ring buffer's read and write pointers */
1107 WREG32_SOC15(UVD, k, mmUVD_RBC_RB_RPTR, 0);
1108
1109 ring->wptr = RREG32_SOC15(UVD, k, mmUVD_RBC_RB_RPTR);
1110 WREG32_SOC15(UVD, k, mmUVD_RBC_RB_WPTR,
1111 lower_32_bits(ring->wptr));
1112
1113 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_RBC_RB_CNTL), 0,
1114 ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
1115
1116 ring = &adev->uvd.inst[k].ring_enc[0];
1117 WREG32_SOC15(UVD, k, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1118 WREG32_SOC15(UVD, k, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1119 WREG32_SOC15(UVD, k, mmUVD_RB_BASE_LO, ring->gpu_addr);
1120 WREG32_SOC15(UVD, k, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1121 WREG32_SOC15(UVD, k, mmUVD_RB_SIZE, ring->ring_size / 4);
1122
1123 ring = &adev->uvd.inst[k].ring_enc[1];
1124 WREG32_SOC15(UVD, k, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1125 WREG32_SOC15(UVD, k, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1126 WREG32_SOC15(UVD, k, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1127 WREG32_SOC15(UVD, k, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1128 WREG32_SOC15(UVD, k, mmUVD_RB_SIZE2, ring->ring_size / 4);
1129 }
1130 return 0;
1131}
1132
1133/**
1134 * uvd_v7_0_stop - stop UVD block
1135 *
1136 * @adev: amdgpu_device pointer
1137 *
1138 * stop the UVD block
1139 */
1140static void uvd_v7_0_stop(struct amdgpu_device *adev)
1141{
1142 uint8_t i = 0;
1143
1144 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
1145 if (adev->uvd.harvest_config & (1 << i))
1146 continue;
1147 /* force RBC into idle state */
1148 WREG32_SOC15(UVD, i, mmUVD_RBC_RB_CNTL, 0x11010101);
1149
1150 /* Stall UMC and register bus before resetting VCPU */
1151 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2),
1152 UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
1153 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1154 mdelay(1);
1155
1156 /* put VCPU into reset */
1157 WREG32_SOC15(UVD, i, mmUVD_SOFT_RESET,
1158 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1159 mdelay(5);
1160
1161 /* disable VCPU clock */
1162 WREG32_SOC15(UVD, i, mmUVD_VCPU_CNTL, 0x0);
1163
1164 /* Unstall UMC and register bus */
1165 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2), 0,
1166 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1167 }
1168}
1169
1170/**
1171 * uvd_v7_0_ring_emit_fence - emit an fence & trap command
1172 *
1173 * @ring: amdgpu_ring pointer
1174 * @addr: address
1175 * @seq: sequence number
1176 * @flags: fence related flags
1177 *
1178 * Write a fence and a trap command to the ring.
1179 */
1180static void uvd_v7_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
1181 unsigned flags)
1182{
1183 struct amdgpu_device *adev = ring->adev;
1184
1185 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1186
1187 amdgpu_ring_write(ring,
1188 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_CONTEXT_ID), 0));
1189 amdgpu_ring_write(ring, seq);
1190 amdgpu_ring_write(ring,
1191 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
1192 amdgpu_ring_write(ring, addr & 0xffffffff);
1193 amdgpu_ring_write(ring,
1194 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
1195 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
1196 amdgpu_ring_write(ring,
1197 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
1198 amdgpu_ring_write(ring, 0);
1199
1200 amdgpu_ring_write(ring,
1201 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
1202 amdgpu_ring_write(ring, 0);
1203 amdgpu_ring_write(ring,
1204 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
1205 amdgpu_ring_write(ring, 0);
1206 amdgpu_ring_write(ring,
1207 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
1208 amdgpu_ring_write(ring, 2);
1209}
1210
1211/**
1212 * uvd_v7_0_enc_ring_emit_fence - emit an enc fence & trap command
1213 *
1214 * @ring: amdgpu_ring pointer
1215 * @addr: address
1216 * @seq: sequence number
1217 * @flags: fence related flags
1218 *
1219 * Write enc a fence and a trap command to the ring.
1220 */
1221static void uvd_v7_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
1222 u64 seq, unsigned flags)
1223{
1224
1225 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1226
1227 amdgpu_ring_write(ring, HEVC_ENC_CMD_FENCE);
1228 amdgpu_ring_write(ring, addr);
1229 amdgpu_ring_write(ring, upper_32_bits(addr));
1230 amdgpu_ring_write(ring, seq);
1231 amdgpu_ring_write(ring, HEVC_ENC_CMD_TRAP);
1232}
1233
1234/**
1235 * uvd_v7_0_ring_emit_hdp_flush - skip HDP flushing
1236 *
1237 * @ring: amdgpu_ring pointer
1238 */
1239static void uvd_v7_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
1240{
1241 /* The firmware doesn't seem to like touching registers at this point. */
1242}
1243
1244/**
1245 * uvd_v7_0_ring_test_ring - register write test
1246 *
1247 * @ring: amdgpu_ring pointer
1248 *
1249 * Test if we can successfully write to the context register
1250 */
1251static int uvd_v7_0_ring_test_ring(struct amdgpu_ring *ring)
1252{
1253 struct amdgpu_device *adev = ring->adev;
1254 uint32_t tmp = 0;
1255 unsigned i;
1256 int r;
1257
1258 WREG32_SOC15(UVD, ring->me, mmUVD_CONTEXT_ID, 0xCAFEDEAD);
1259 r = amdgpu_ring_alloc(ring, 3);
1260 if (r)
1261 return r;
1262
1263 amdgpu_ring_write(ring,
1264 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_CONTEXT_ID), 0));
1265 amdgpu_ring_write(ring, 0xDEADBEEF);
1266 amdgpu_ring_commit(ring);
1267 for (i = 0; i < adev->usec_timeout; i++) {
1268 tmp = RREG32_SOC15(UVD, ring->me, mmUVD_CONTEXT_ID);
1269 if (tmp == 0xDEADBEEF)
1270 break;
1271 udelay(1);
1272 }
1273
1274 if (i >= adev->usec_timeout)
1275 r = -ETIMEDOUT;
1276
1277 return r;
1278}
1279
1280/**
1281 * uvd_v7_0_ring_patch_cs_in_place - Patch the IB for command submission.
1282 *
1283 * @p: the CS parser with the IBs
1284 * @job: which job this ib is in
1285 * @ib: which IB to patch
1286 *
1287 */
1288static int uvd_v7_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
1289 struct amdgpu_job *job,
1290 struct amdgpu_ib *ib)
1291{
1292 struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched);
1293 unsigned i;
1294
1295 /* No patching necessary for the first instance */
1296 if (!ring->me)
1297 return 0;
1298
1299 for (i = 0; i < ib->length_dw; i += 2) {
1300 uint32_t reg = amdgpu_ib_get_value(ib, i);
1301
1302 reg -= p->adev->reg_offset[UVD_HWIP][0][1];
1303 reg += p->adev->reg_offset[UVD_HWIP][1][1];
1304
1305 amdgpu_ib_set_value(ib, i, reg);
1306 }
1307 return 0;
1308}
1309
1310/**
1311 * uvd_v7_0_ring_emit_ib - execute indirect buffer
1312 *
1313 * @ring: amdgpu_ring pointer
1314 * @job: job to retrieve vmid from
1315 * @ib: indirect buffer to execute
1316 * @flags: unused
1317 *
1318 * Write ring commands to execute the indirect buffer
1319 */
1320static void uvd_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
1321 struct amdgpu_job *job,
1322 struct amdgpu_ib *ib,
1323 uint32_t flags)
1324{
1325 struct amdgpu_device *adev = ring->adev;
1326 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1327
1328 amdgpu_ring_write(ring,
1329 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_VMID), 0));
1330 amdgpu_ring_write(ring, vmid);
1331
1332 amdgpu_ring_write(ring,
1333 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_64BIT_BAR_LOW), 0));
1334 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1335 amdgpu_ring_write(ring,
1336 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH), 0));
1337 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1338 amdgpu_ring_write(ring,
1339 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_RBC_IB_SIZE), 0));
1340 amdgpu_ring_write(ring, ib->length_dw);
1341}
1342
1343/**
1344 * uvd_v7_0_enc_ring_emit_ib - enc execute indirect buffer
1345 *
1346 * @ring: amdgpu_ring pointer
1347 * @job: job to retrive vmid from
1348 * @ib: indirect buffer to execute
1349 * @flags: unused
1350 *
1351 * Write enc ring commands to execute the indirect buffer
1352 */
1353static void uvd_v7_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
1354 struct amdgpu_job *job,
1355 struct amdgpu_ib *ib,
1356 uint32_t flags)
1357{
1358 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1359
1360 amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM);
1361 amdgpu_ring_write(ring, vmid);
1362 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1363 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1364 amdgpu_ring_write(ring, ib->length_dw);
1365}
1366
1367static void uvd_v7_0_ring_emit_wreg(struct amdgpu_ring *ring,
1368 uint32_t reg, uint32_t val)
1369{
1370 struct amdgpu_device *adev = ring->adev;
1371
1372 amdgpu_ring_write(ring,
1373 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
1374 amdgpu_ring_write(ring, reg << 2);
1375 amdgpu_ring_write(ring,
1376 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
1377 amdgpu_ring_write(ring, val);
1378 amdgpu_ring_write(ring,
1379 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
1380 amdgpu_ring_write(ring, 8);
1381}
1382
1383static void uvd_v7_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
1384 uint32_t val, uint32_t mask)
1385{
1386 struct amdgpu_device *adev = ring->adev;
1387
1388 amdgpu_ring_write(ring,
1389 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
1390 amdgpu_ring_write(ring, reg << 2);
1391 amdgpu_ring_write(ring,
1392 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
1393 amdgpu_ring_write(ring, val);
1394 amdgpu_ring_write(ring,
1395 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GP_SCRATCH8), 0));
1396 amdgpu_ring_write(ring, mask);
1397 amdgpu_ring_write(ring,
1398 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
1399 amdgpu_ring_write(ring, 12);
1400}
1401
1402static void uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
1403 unsigned vmid, uint64_t pd_addr)
1404{
1405 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub];
1406 uint32_t data0, data1, mask;
1407
1408 pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1409
1410 /* wait for reg writes */
1411 data0 = hub->ctx0_ptb_addr_lo32 + vmid * hub->ctx_addr_distance;
1412 data1 = lower_32_bits(pd_addr);
1413 mask = 0xffffffff;
1414 uvd_v7_0_ring_emit_reg_wait(ring, data0, data1, mask);
1415}
1416
1417static void uvd_v7_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
1418{
1419 struct amdgpu_device *adev = ring->adev;
1420 int i;
1421
1422 WARN_ON(ring->wptr % 2 || count % 2);
1423
1424 for (i = 0; i < count / 2; i++) {
1425 amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_NO_OP), 0));
1426 amdgpu_ring_write(ring, 0);
1427 }
1428}
1429
1430static void uvd_v7_0_enc_ring_insert_end(struct amdgpu_ring *ring)
1431{
1432 amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
1433}
1434
1435static void uvd_v7_0_enc_ring_emit_reg_wait(struct amdgpu_ring *ring,
1436 uint32_t reg, uint32_t val,
1437 uint32_t mask)
1438{
1439 amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WAIT);
1440 amdgpu_ring_write(ring, reg << 2);
1441 amdgpu_ring_write(ring, mask);
1442 amdgpu_ring_write(ring, val);
1443}
1444
1445static void uvd_v7_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
1446 unsigned int vmid, uint64_t pd_addr)
1447{
1448 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub];
1449
1450 pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1451
1452 /* wait for reg writes */
1453 uvd_v7_0_enc_ring_emit_reg_wait(ring, hub->ctx0_ptb_addr_lo32 +
1454 vmid * hub->ctx_addr_distance,
1455 lower_32_bits(pd_addr), 0xffffffff);
1456}
1457
1458static void uvd_v7_0_enc_ring_emit_wreg(struct amdgpu_ring *ring,
1459 uint32_t reg, uint32_t val)
1460{
1461 amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE);
1462 amdgpu_ring_write(ring, reg << 2);
1463 amdgpu_ring_write(ring, val);
1464}
1465
1466#if 0
1467static bool uvd_v7_0_is_idle(void *handle)
1468{
1469 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1470
1471 return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
1472}
1473
1474static int uvd_v7_0_wait_for_idle(void *handle)
1475{
1476 unsigned i;
1477 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1478
1479 for (i = 0; i < adev->usec_timeout; i++) {
1480 if (uvd_v7_0_is_idle(handle))
1481 return 0;
1482 }
1483 return -ETIMEDOUT;
1484}
1485
1486#define AMDGPU_UVD_STATUS_BUSY_MASK 0xfd
1487static bool uvd_v7_0_check_soft_reset(void *handle)
1488{
1489 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1490 u32 srbm_soft_reset = 0;
1491 u32 tmp = RREG32(mmSRBM_STATUS);
1492
1493 if (REG_GET_FIELD(tmp, SRBM_STATUS, UVD_RQ_PENDING) ||
1494 REG_GET_FIELD(tmp, SRBM_STATUS, UVD_BUSY) ||
1495 (RREG32_SOC15(UVD, ring->me, mmUVD_STATUS) &
1496 AMDGPU_UVD_STATUS_BUSY_MASK))
1497 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1498 SRBM_SOFT_RESET, SOFT_RESET_UVD, 1);
1499
1500 if (srbm_soft_reset) {
1501 adev->uvd.inst[ring->me].srbm_soft_reset = srbm_soft_reset;
1502 return true;
1503 } else {
1504 adev->uvd.inst[ring->me].srbm_soft_reset = 0;
1505 return false;
1506 }
1507}
1508
1509static int uvd_v7_0_pre_soft_reset(void *handle)
1510{
1511 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1512
1513 if (!adev->uvd.inst[ring->me].srbm_soft_reset)
1514 return 0;
1515
1516 uvd_v7_0_stop(adev);
1517 return 0;
1518}
1519
1520static int uvd_v7_0_soft_reset(void *handle)
1521{
1522 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1523 u32 srbm_soft_reset;
1524
1525 if (!adev->uvd.inst[ring->me].srbm_soft_reset)
1526 return 0;
1527 srbm_soft_reset = adev->uvd.inst[ring->me].srbm_soft_reset;
1528
1529 if (srbm_soft_reset) {
1530 u32 tmp;
1531
1532 tmp = RREG32(mmSRBM_SOFT_RESET);
1533 tmp |= srbm_soft_reset;
1534 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1535 WREG32(mmSRBM_SOFT_RESET, tmp);
1536 tmp = RREG32(mmSRBM_SOFT_RESET);
1537
1538 udelay(50);
1539
1540 tmp &= ~srbm_soft_reset;
1541 WREG32(mmSRBM_SOFT_RESET, tmp);
1542 tmp = RREG32(mmSRBM_SOFT_RESET);
1543
1544 /* Wait a little for things to settle down */
1545 udelay(50);
1546 }
1547
1548 return 0;
1549}
1550
1551static int uvd_v7_0_post_soft_reset(void *handle)
1552{
1553 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1554
1555 if (!adev->uvd.inst[ring->me].srbm_soft_reset)
1556 return 0;
1557
1558 mdelay(5);
1559
1560 return uvd_v7_0_start(adev);
1561}
1562#endif
1563
1564static int uvd_v7_0_set_interrupt_state(struct amdgpu_device *adev,
1565 struct amdgpu_irq_src *source,
1566 unsigned type,
1567 enum amdgpu_interrupt_state state)
1568{
1569 // TODO
1570 return 0;
1571}
1572
1573static int uvd_v7_0_process_interrupt(struct amdgpu_device *adev,
1574 struct amdgpu_irq_src *source,
1575 struct amdgpu_iv_entry *entry)
1576{
1577 uint32_t ip_instance;
1578
1579 switch (entry->client_id) {
1580 case SOC15_IH_CLIENTID_UVD:
1581 ip_instance = 0;
1582 break;
1583 case SOC15_IH_CLIENTID_UVD1:
1584 ip_instance = 1;
1585 break;
1586 default:
1587 DRM_ERROR("Unhandled client id: %d\n", entry->client_id);
1588 return 0;
1589 }
1590
1591 DRM_DEBUG("IH: UVD TRAP\n");
1592
1593 switch (entry->src_id) {
1594 case 124:
1595 amdgpu_fence_process(&adev->uvd.inst[ip_instance].ring);
1596 break;
1597 case 119:
1598 amdgpu_fence_process(&adev->uvd.inst[ip_instance].ring_enc[0]);
1599 break;
1600 case 120:
1601 if (!amdgpu_sriov_vf(adev))
1602 amdgpu_fence_process(&adev->uvd.inst[ip_instance].ring_enc[1]);
1603 break;
1604 default:
1605 DRM_ERROR("Unhandled interrupt: %d %d\n",
1606 entry->src_id, entry->src_data[0]);
1607 break;
1608 }
1609
1610 return 0;
1611}
1612
1613#if 0
1614static void uvd_v7_0_set_sw_clock_gating(struct amdgpu_device *adev)
1615{
1616 uint32_t data, data1, data2, suvd_flags;
1617
1618 data = RREG32_SOC15(UVD, ring->me, mmUVD_CGC_CTRL);
1619 data1 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE);
1620 data2 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_CTRL);
1621
1622 data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK |
1623 UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
1624
1625 suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
1626 UVD_SUVD_CGC_GATE__SIT_MASK |
1627 UVD_SUVD_CGC_GATE__SMP_MASK |
1628 UVD_SUVD_CGC_GATE__SCM_MASK |
1629 UVD_SUVD_CGC_GATE__SDB_MASK;
1630
1631 data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
1632 (1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) |
1633 (4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY));
1634
1635 data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
1636 UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
1637 UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
1638 UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
1639 UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
1640 UVD_CGC_CTRL__SYS_MODE_MASK |
1641 UVD_CGC_CTRL__UDEC_MODE_MASK |
1642 UVD_CGC_CTRL__MPEG2_MODE_MASK |
1643 UVD_CGC_CTRL__REGS_MODE_MASK |
1644 UVD_CGC_CTRL__RBC_MODE_MASK |
1645 UVD_CGC_CTRL__LMI_MC_MODE_MASK |
1646 UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
1647 UVD_CGC_CTRL__IDCT_MODE_MASK |
1648 UVD_CGC_CTRL__MPRD_MODE_MASK |
1649 UVD_CGC_CTRL__MPC_MODE_MASK |
1650 UVD_CGC_CTRL__LBSI_MODE_MASK |
1651 UVD_CGC_CTRL__LRBBM_MODE_MASK |
1652 UVD_CGC_CTRL__WCB_MODE_MASK |
1653 UVD_CGC_CTRL__VCPU_MODE_MASK |
1654 UVD_CGC_CTRL__JPEG_MODE_MASK |
1655 UVD_CGC_CTRL__JPEG2_MODE_MASK |
1656 UVD_CGC_CTRL__SCPU_MODE_MASK);
1657 data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK |
1658 UVD_SUVD_CGC_CTRL__SIT_MODE_MASK |
1659 UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
1660 UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
1661 UVD_SUVD_CGC_CTRL__SDB_MODE_MASK);
1662 data1 |= suvd_flags;
1663
1664 WREG32_SOC15(UVD, ring->me, mmUVD_CGC_CTRL, data);
1665 WREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE, 0);
1666 WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE, data1);
1667 WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_CTRL, data2);
1668}
1669
1670static void uvd_v7_0_set_hw_clock_gating(struct amdgpu_device *adev)
1671{
1672 uint32_t data, data1, cgc_flags, suvd_flags;
1673
1674 data = RREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE);
1675 data1 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE);
1676
1677 cgc_flags = UVD_CGC_GATE__SYS_MASK |
1678 UVD_CGC_GATE__UDEC_MASK |
1679 UVD_CGC_GATE__MPEG2_MASK |
1680 UVD_CGC_GATE__RBC_MASK |
1681 UVD_CGC_GATE__LMI_MC_MASK |
1682 UVD_CGC_GATE__IDCT_MASK |
1683 UVD_CGC_GATE__MPRD_MASK |
1684 UVD_CGC_GATE__MPC_MASK |
1685 UVD_CGC_GATE__LBSI_MASK |
1686 UVD_CGC_GATE__LRBBM_MASK |
1687 UVD_CGC_GATE__UDEC_RE_MASK |
1688 UVD_CGC_GATE__UDEC_CM_MASK |
1689 UVD_CGC_GATE__UDEC_IT_MASK |
1690 UVD_CGC_GATE__UDEC_DB_MASK |
1691 UVD_CGC_GATE__UDEC_MP_MASK |
1692 UVD_CGC_GATE__WCB_MASK |
1693 UVD_CGC_GATE__VCPU_MASK |
1694 UVD_CGC_GATE__SCPU_MASK |
1695 UVD_CGC_GATE__JPEG_MASK |
1696 UVD_CGC_GATE__JPEG2_MASK;
1697
1698 suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
1699 UVD_SUVD_CGC_GATE__SIT_MASK |
1700 UVD_SUVD_CGC_GATE__SMP_MASK |
1701 UVD_SUVD_CGC_GATE__SCM_MASK |
1702 UVD_SUVD_CGC_GATE__SDB_MASK;
1703
1704 data |= cgc_flags;
1705 data1 |= suvd_flags;
1706
1707 WREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE, data);
1708 WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE, data1);
1709}
1710
1711static void uvd_v7_0_set_bypass_mode(struct amdgpu_device *adev, bool enable)
1712{
1713 u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
1714
1715 if (enable)
1716 tmp |= (GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
1717 GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
1718 else
1719 tmp &= ~(GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
1720 GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
1721
1722 WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp);
1723}
1724
1725
1726static int uvd_v7_0_set_clockgating_state(void *handle,
1727 enum amd_clockgating_state state)
1728{
1729 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1730 bool enable = (state == AMD_CG_STATE_GATE);
1731
1732 uvd_v7_0_set_bypass_mode(adev, enable);
1733
1734 if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
1735 return 0;
1736
1737 if (enable) {
1738 /* disable HW gating and enable Sw gating */
1739 uvd_v7_0_set_sw_clock_gating(adev);
1740 } else {
1741 /* wait for STATUS to clear */
1742 if (uvd_v7_0_wait_for_idle(handle))
1743 return -EBUSY;
1744
1745 /* enable HW gates because UVD is idle */
1746 /* uvd_v7_0_set_hw_clock_gating(adev); */
1747 }
1748
1749 return 0;
1750}
1751
1752static int uvd_v7_0_set_powergating_state(void *handle,
1753 enum amd_powergating_state state)
1754{
1755 /* This doesn't actually powergate the UVD block.
1756 * That's done in the dpm code via the SMC. This
1757 * just re-inits the block as necessary. The actual
1758 * gating still happens in the dpm code. We should
1759 * revisit this when there is a cleaner line between
1760 * the smc and the hw blocks
1761 */
1762 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1763
1764 if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
1765 return 0;
1766
1767 WREG32_SOC15(UVD, ring->me, mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK);
1768
1769 if (state == AMD_PG_STATE_GATE) {
1770 uvd_v7_0_stop(adev);
1771 return 0;
1772 } else {
1773 return uvd_v7_0_start(adev);
1774 }
1775}
1776#endif
1777
1778static int uvd_v7_0_set_clockgating_state(void *handle,
1779 enum amd_clockgating_state state)
1780{
1781 /* needed for driver unload*/
1782 return 0;
1783}
1784
1785const struct amd_ip_funcs uvd_v7_0_ip_funcs = {
1786 .name = "uvd_v7_0",
1787 .early_init = uvd_v7_0_early_init,
1788 .late_init = NULL,
1789 .sw_init = uvd_v7_0_sw_init,
1790 .sw_fini = uvd_v7_0_sw_fini,
1791 .hw_init = uvd_v7_0_hw_init,
1792 .hw_fini = uvd_v7_0_hw_fini,
1793 .prepare_suspend = uvd_v7_0_prepare_suspend,
1794 .suspend = uvd_v7_0_suspend,
1795 .resume = uvd_v7_0_resume,
1796 .is_idle = NULL /* uvd_v7_0_is_idle */,
1797 .wait_for_idle = NULL /* uvd_v7_0_wait_for_idle */,
1798 .check_soft_reset = NULL /* uvd_v7_0_check_soft_reset */,
1799 .pre_soft_reset = NULL /* uvd_v7_0_pre_soft_reset */,
1800 .soft_reset = NULL /* uvd_v7_0_soft_reset */,
1801 .post_soft_reset = NULL /* uvd_v7_0_post_soft_reset */,
1802 .set_clockgating_state = uvd_v7_0_set_clockgating_state,
1803 .set_powergating_state = NULL /* uvd_v7_0_set_powergating_state */,
1804};
1805
1806static const struct amdgpu_ring_funcs uvd_v7_0_ring_vm_funcs = {
1807 .type = AMDGPU_RING_TYPE_UVD,
1808 .align_mask = 0xf,
1809 .support_64bit_ptrs = false,
1810 .no_user_fence = true,
1811 .get_rptr = uvd_v7_0_ring_get_rptr,
1812 .get_wptr = uvd_v7_0_ring_get_wptr,
1813 .set_wptr = uvd_v7_0_ring_set_wptr,
1814 .patch_cs_in_place = uvd_v7_0_ring_patch_cs_in_place,
1815 .emit_frame_size =
1816 6 + /* hdp invalidate */
1817 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
1818 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
1819 8 + /* uvd_v7_0_ring_emit_vm_flush */
1820 14 + 14, /* uvd_v7_0_ring_emit_fence x2 vm fence */
1821 .emit_ib_size = 8, /* uvd_v7_0_ring_emit_ib */
1822 .emit_ib = uvd_v7_0_ring_emit_ib,
1823 .emit_fence = uvd_v7_0_ring_emit_fence,
1824 .emit_vm_flush = uvd_v7_0_ring_emit_vm_flush,
1825 .emit_hdp_flush = uvd_v7_0_ring_emit_hdp_flush,
1826 .test_ring = uvd_v7_0_ring_test_ring,
1827 .test_ib = amdgpu_uvd_ring_test_ib,
1828 .insert_nop = uvd_v7_0_ring_insert_nop,
1829 .pad_ib = amdgpu_ring_generic_pad_ib,
1830 .begin_use = amdgpu_uvd_ring_begin_use,
1831 .end_use = amdgpu_uvd_ring_end_use,
1832 .emit_wreg = uvd_v7_0_ring_emit_wreg,
1833 .emit_reg_wait = uvd_v7_0_ring_emit_reg_wait,
1834 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1835};
1836
1837static const struct amdgpu_ring_funcs uvd_v7_0_enc_ring_vm_funcs = {
1838 .type = AMDGPU_RING_TYPE_UVD_ENC,
1839 .align_mask = 0x3f,
1840 .nop = HEVC_ENC_CMD_NO_OP,
1841 .support_64bit_ptrs = false,
1842 .no_user_fence = true,
1843 .get_rptr = uvd_v7_0_enc_ring_get_rptr,
1844 .get_wptr = uvd_v7_0_enc_ring_get_wptr,
1845 .set_wptr = uvd_v7_0_enc_ring_set_wptr,
1846 .emit_frame_size =
1847 3 + 3 + /* hdp flush / invalidate */
1848 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1849 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
1850 4 + /* uvd_v7_0_enc_ring_emit_vm_flush */
1851 5 + 5 + /* uvd_v7_0_enc_ring_emit_fence x2 vm fence */
1852 1, /* uvd_v7_0_enc_ring_insert_end */
1853 .emit_ib_size = 5, /* uvd_v7_0_enc_ring_emit_ib */
1854 .emit_ib = uvd_v7_0_enc_ring_emit_ib,
1855 .emit_fence = uvd_v7_0_enc_ring_emit_fence,
1856 .emit_vm_flush = uvd_v7_0_enc_ring_emit_vm_flush,
1857 .test_ring = uvd_v7_0_enc_ring_test_ring,
1858 .test_ib = uvd_v7_0_enc_ring_test_ib,
1859 .insert_nop = amdgpu_ring_insert_nop,
1860 .insert_end = uvd_v7_0_enc_ring_insert_end,
1861 .pad_ib = amdgpu_ring_generic_pad_ib,
1862 .begin_use = amdgpu_uvd_ring_begin_use,
1863 .end_use = amdgpu_uvd_ring_end_use,
1864 .emit_wreg = uvd_v7_0_enc_ring_emit_wreg,
1865 .emit_reg_wait = uvd_v7_0_enc_ring_emit_reg_wait,
1866 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1867};
1868
1869static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev)
1870{
1871 int i;
1872
1873 for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
1874 if (adev->uvd.harvest_config & (1 << i))
1875 continue;
1876 adev->uvd.inst[i].ring.funcs = &uvd_v7_0_ring_vm_funcs;
1877 adev->uvd.inst[i].ring.me = i;
1878 DRM_INFO("UVD(%d) is enabled in VM mode\n", i);
1879 }
1880}
1881
1882static void uvd_v7_0_set_enc_ring_funcs(struct amdgpu_device *adev)
1883{
1884 int i, j;
1885
1886 for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
1887 if (adev->uvd.harvest_config & (1 << j))
1888 continue;
1889 for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
1890 adev->uvd.inst[j].ring_enc[i].funcs = &uvd_v7_0_enc_ring_vm_funcs;
1891 adev->uvd.inst[j].ring_enc[i].me = j;
1892 }
1893
1894 DRM_INFO("UVD(%d) ENC is enabled in VM mode\n", j);
1895 }
1896}
1897
1898static const struct amdgpu_irq_src_funcs uvd_v7_0_irq_funcs = {
1899 .set = uvd_v7_0_set_interrupt_state,
1900 .process = uvd_v7_0_process_interrupt,
1901};
1902
1903static void uvd_v7_0_set_irq_funcs(struct amdgpu_device *adev)
1904{
1905 int i;
1906
1907 for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
1908 if (adev->uvd.harvest_config & (1 << i))
1909 continue;
1910 adev->uvd.inst[i].irq.num_types = adev->uvd.num_enc_rings + 1;
1911 adev->uvd.inst[i].irq.funcs = &uvd_v7_0_irq_funcs;
1912 }
1913}
1914
1915const struct amdgpu_ip_block_version uvd_v7_0_ip_block = {
1916 .type = AMD_IP_BLOCK_TYPE_UVD,
1917 .major = 7,
1918 .minor = 0,
1919 .rev = 0,
1920 .funcs = &uvd_v7_0_ip_funcs,
1921};
1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/firmware.h>
25
26#include "amdgpu.h"
27#include "amdgpu_uvd.h"
28#include "soc15.h"
29#include "soc15d.h"
30#include "soc15_common.h"
31#include "mmsch_v1_0.h"
32
33#include "uvd/uvd_7_0_offset.h"
34#include "uvd/uvd_7_0_sh_mask.h"
35#include "vce/vce_4_0_offset.h"
36#include "vce/vce_4_0_default.h"
37#include "vce/vce_4_0_sh_mask.h"
38#include "nbif/nbif_6_1_offset.h"
39#include "hdp/hdp_4_0_offset.h"
40#include "mmhub/mmhub_1_0_offset.h"
41#include "mmhub/mmhub_1_0_sh_mask.h"
42#include "ivsrcid/uvd/irqsrcs_uvd_7_0.h"
43
44#define mmUVD_PG0_CC_UVD_HARVESTING 0x00c7
45#define mmUVD_PG0_CC_UVD_HARVESTING_BASE_IDX 1
46//UVD_PG0_CC_UVD_HARVESTING
47#define UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE__SHIFT 0x1
48#define UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE_MASK 0x00000002L
49
50#define UVD7_MAX_HW_INSTANCES_VEGA20 2
51
52static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev);
53static void uvd_v7_0_set_enc_ring_funcs(struct amdgpu_device *adev);
54static void uvd_v7_0_set_irq_funcs(struct amdgpu_device *adev);
55static int uvd_v7_0_start(struct amdgpu_device *adev);
56static void uvd_v7_0_stop(struct amdgpu_device *adev);
57static int uvd_v7_0_sriov_start(struct amdgpu_device *adev);
58
59static int amdgpu_ih_clientid_uvds[] = {
60 SOC15_IH_CLIENTID_UVD,
61 SOC15_IH_CLIENTID_UVD1
62};
63
64/**
65 * uvd_v7_0_ring_get_rptr - get read pointer
66 *
67 * @ring: amdgpu_ring pointer
68 *
69 * Returns the current hardware read pointer
70 */
71static uint64_t uvd_v7_0_ring_get_rptr(struct amdgpu_ring *ring)
72{
73 struct amdgpu_device *adev = ring->adev;
74
75 return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_RPTR);
76}
77
78/**
79 * uvd_v7_0_enc_ring_get_rptr - get enc read pointer
80 *
81 * @ring: amdgpu_ring pointer
82 *
83 * Returns the current hardware enc read pointer
84 */
85static uint64_t uvd_v7_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
86{
87 struct amdgpu_device *adev = ring->adev;
88
89 if (ring == &adev->uvd.inst[ring->me].ring_enc[0])
90 return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR);
91 else
92 return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR2);
93}
94
95/**
96 * uvd_v7_0_ring_get_wptr - get write pointer
97 *
98 * @ring: amdgpu_ring pointer
99 *
100 * Returns the current hardware write pointer
101 */
102static uint64_t uvd_v7_0_ring_get_wptr(struct amdgpu_ring *ring)
103{
104 struct amdgpu_device *adev = ring->adev;
105
106 return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR);
107}
108
109/**
110 * uvd_v7_0_enc_ring_get_wptr - get enc write pointer
111 *
112 * @ring: amdgpu_ring pointer
113 *
114 * Returns the current hardware enc write pointer
115 */
116static uint64_t uvd_v7_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
117{
118 struct amdgpu_device *adev = ring->adev;
119
120 if (ring->use_doorbell)
121 return adev->wb.wb[ring->wptr_offs];
122
123 if (ring == &adev->uvd.inst[ring->me].ring_enc[0])
124 return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR);
125 else
126 return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2);
127}
128
129/**
130 * uvd_v7_0_ring_set_wptr - set write pointer
131 *
132 * @ring: amdgpu_ring pointer
133 *
134 * Commits the write pointer to the hardware
135 */
136static void uvd_v7_0_ring_set_wptr(struct amdgpu_ring *ring)
137{
138 struct amdgpu_device *adev = ring->adev;
139
140 WREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
141}
142
143/**
144 * uvd_v7_0_enc_ring_set_wptr - set enc write pointer
145 *
146 * @ring: amdgpu_ring pointer
147 *
148 * Commits the enc write pointer to the hardware
149 */
150static void uvd_v7_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
151{
152 struct amdgpu_device *adev = ring->adev;
153
154 if (ring->use_doorbell) {
155 /* XXX check if swapping is necessary on BE */
156 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
157 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
158 return;
159 }
160
161 if (ring == &adev->uvd.inst[ring->me].ring_enc[0])
162 WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR,
163 lower_32_bits(ring->wptr));
164 else
165 WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2,
166 lower_32_bits(ring->wptr));
167}
168
169/**
170 * uvd_v7_0_enc_ring_test_ring - test if UVD ENC ring is working
171 *
172 * @ring: the engine to test on
173 *
174 */
175static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring)
176{
177 struct amdgpu_device *adev = ring->adev;
178 uint32_t rptr;
179 unsigned i;
180 int r;
181
182 if (amdgpu_sriov_vf(adev))
183 return 0;
184
185 r = amdgpu_ring_alloc(ring, 16);
186 if (r)
187 return r;
188
189 rptr = amdgpu_ring_get_rptr(ring);
190
191 amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
192 amdgpu_ring_commit(ring);
193
194 for (i = 0; i < adev->usec_timeout; i++) {
195 if (amdgpu_ring_get_rptr(ring) != rptr)
196 break;
197 udelay(1);
198 }
199
200 if (i >= adev->usec_timeout)
201 r = -ETIMEDOUT;
202
203 return r;
204}
205
206/**
207 * uvd_v7_0_enc_get_create_msg - generate a UVD ENC create msg
208 *
209 * @adev: amdgpu_device pointer
210 * @ring: ring we should submit the msg to
211 * @handle: session handle to use
212 * @fence: optional fence to return
213 *
214 * Open up a stream for HW test
215 */
216static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
217 struct amdgpu_bo *bo,
218 struct dma_fence **fence)
219{
220 const unsigned ib_size_dw = 16;
221 struct amdgpu_job *job;
222 struct amdgpu_ib *ib;
223 struct dma_fence *f = NULL;
224 uint64_t addr;
225 int i, r;
226
227 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
228 if (r)
229 return r;
230
231 ib = &job->ibs[0];
232 addr = amdgpu_bo_gpu_offset(bo);
233
234 ib->length_dw = 0;
235 ib->ptr[ib->length_dw++] = 0x00000018;
236 ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
237 ib->ptr[ib->length_dw++] = handle;
238 ib->ptr[ib->length_dw++] = 0x00000000;
239 ib->ptr[ib->length_dw++] = upper_32_bits(addr);
240 ib->ptr[ib->length_dw++] = addr;
241
242 ib->ptr[ib->length_dw++] = 0x00000014;
243 ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
244 ib->ptr[ib->length_dw++] = 0x0000001c;
245 ib->ptr[ib->length_dw++] = 0x00000000;
246 ib->ptr[ib->length_dw++] = 0x00000000;
247
248 ib->ptr[ib->length_dw++] = 0x00000008;
249 ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */
250
251 for (i = ib->length_dw; i < ib_size_dw; ++i)
252 ib->ptr[i] = 0x0;
253
254 r = amdgpu_job_submit_direct(job, ring, &f);
255 if (r)
256 goto err;
257
258 if (fence)
259 *fence = dma_fence_get(f);
260 dma_fence_put(f);
261 return 0;
262
263err:
264 amdgpu_job_free(job);
265 return r;
266}
267
268/**
269 * uvd_v7_0_enc_get_destroy_msg - generate a UVD ENC destroy msg
270 *
271 * @adev: amdgpu_device pointer
272 * @ring: ring we should submit the msg to
273 * @handle: session handle to use
274 * @fence: optional fence to return
275 *
276 * Close up a stream for HW test or if userspace failed to do so
277 */
278static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
279 struct amdgpu_bo *bo,
280 struct dma_fence **fence)
281{
282 const unsigned ib_size_dw = 16;
283 struct amdgpu_job *job;
284 struct amdgpu_ib *ib;
285 struct dma_fence *f = NULL;
286 uint64_t addr;
287 int i, r;
288
289 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
290 if (r)
291 return r;
292
293 ib = &job->ibs[0];
294 addr = amdgpu_bo_gpu_offset(bo);
295
296 ib->length_dw = 0;
297 ib->ptr[ib->length_dw++] = 0x00000018;
298 ib->ptr[ib->length_dw++] = 0x00000001;
299 ib->ptr[ib->length_dw++] = handle;
300 ib->ptr[ib->length_dw++] = 0x00000000;
301 ib->ptr[ib->length_dw++] = upper_32_bits(addr);
302 ib->ptr[ib->length_dw++] = addr;
303
304 ib->ptr[ib->length_dw++] = 0x00000014;
305 ib->ptr[ib->length_dw++] = 0x00000002;
306 ib->ptr[ib->length_dw++] = 0x0000001c;
307 ib->ptr[ib->length_dw++] = 0x00000000;
308 ib->ptr[ib->length_dw++] = 0x00000000;
309
310 ib->ptr[ib->length_dw++] = 0x00000008;
311 ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */
312
313 for (i = ib->length_dw; i < ib_size_dw; ++i)
314 ib->ptr[i] = 0x0;
315
316 r = amdgpu_job_submit_direct(job, ring, &f);
317 if (r)
318 goto err;
319
320 if (fence)
321 *fence = dma_fence_get(f);
322 dma_fence_put(f);
323 return 0;
324
325err:
326 amdgpu_job_free(job);
327 return r;
328}
329
330/**
331 * uvd_v7_0_enc_ring_test_ib - test if UVD ENC IBs are working
332 *
333 * @ring: the engine to test on
334 *
335 */
336static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
337{
338 struct dma_fence *fence = NULL;
339 struct amdgpu_bo *bo = NULL;
340 long r;
341
342 r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE,
343 AMDGPU_GEM_DOMAIN_VRAM,
344 &bo, NULL, NULL);
345 if (r)
346 return r;
347
348 r = uvd_v7_0_enc_get_create_msg(ring, 1, bo, NULL);
349 if (r)
350 goto error;
351
352 r = uvd_v7_0_enc_get_destroy_msg(ring, 1, bo, &fence);
353 if (r)
354 goto error;
355
356 r = dma_fence_wait_timeout(fence, false, timeout);
357 if (r == 0)
358 r = -ETIMEDOUT;
359 else if (r > 0)
360 r = 0;
361
362error:
363 dma_fence_put(fence);
364 amdgpu_bo_unreserve(bo);
365 amdgpu_bo_unref(&bo);
366 return r;
367}
368
369static int uvd_v7_0_early_init(void *handle)
370{
371 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
372
373 if (adev->asic_type == CHIP_VEGA20) {
374 u32 harvest;
375 int i;
376
377 adev->uvd.num_uvd_inst = UVD7_MAX_HW_INSTANCES_VEGA20;
378 for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
379 harvest = RREG32_SOC15(UVD, i, mmUVD_PG0_CC_UVD_HARVESTING);
380 if (harvest & UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE_MASK) {
381 adev->uvd.harvest_config |= 1 << i;
382 }
383 }
384 if (adev->uvd.harvest_config == (AMDGPU_UVD_HARVEST_UVD0 |
385 AMDGPU_UVD_HARVEST_UVD1))
386 /* both instances are harvested, disable the block */
387 return -ENOENT;
388 } else {
389 adev->uvd.num_uvd_inst = 1;
390 }
391
392 if (amdgpu_sriov_vf(adev))
393 adev->uvd.num_enc_rings = 1;
394 else
395 adev->uvd.num_enc_rings = 2;
396 uvd_v7_0_set_ring_funcs(adev);
397 uvd_v7_0_set_enc_ring_funcs(adev);
398 uvd_v7_0_set_irq_funcs(adev);
399
400 return 0;
401}
402
403static int uvd_v7_0_sw_init(void *handle)
404{
405 struct amdgpu_ring *ring;
406
407 int i, j, r;
408 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
409
410 for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
411 if (adev->uvd.harvest_config & (1 << j))
412 continue;
413 /* UVD TRAP */
414 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_uvds[j], UVD_7_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->uvd.inst[j].irq);
415 if (r)
416 return r;
417
418 /* UVD ENC TRAP */
419 for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
420 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_uvds[j], i + UVD_7_0__SRCID__UVD_ENC_GEN_PURP, &adev->uvd.inst[j].irq);
421 if (r)
422 return r;
423 }
424 }
425
426 r = amdgpu_uvd_sw_init(adev);
427 if (r)
428 return r;
429
430 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
431 const struct common_firmware_header *hdr;
432 hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
433 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].ucode_id = AMDGPU_UCODE_ID_UVD;
434 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].fw = adev->uvd.fw;
435 adev->firmware.fw_size +=
436 ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
437
438 if (adev->uvd.num_uvd_inst == UVD7_MAX_HW_INSTANCES_VEGA20) {
439 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].ucode_id = AMDGPU_UCODE_ID_UVD1;
440 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].fw = adev->uvd.fw;
441 adev->firmware.fw_size +=
442 ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
443 }
444 DRM_INFO("PSP loading UVD firmware\n");
445 }
446
447 for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
448 if (adev->uvd.harvest_config & (1 << j))
449 continue;
450 if (!amdgpu_sriov_vf(adev)) {
451 ring = &adev->uvd.inst[j].ring;
452 sprintf(ring->name, "uvd_%d", ring->me);
453 r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst[j].irq, 0);
454 if (r)
455 return r;
456 }
457
458 for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
459 ring = &adev->uvd.inst[j].ring_enc[i];
460 sprintf(ring->name, "uvd_enc_%d.%d", ring->me, i);
461 if (amdgpu_sriov_vf(adev)) {
462 ring->use_doorbell = true;
463
464 /* currently only use the first enconding ring for
465 * sriov, so set unused location for other unused rings.
466 */
467 if (i == 0)
468 ring->doorbell_index = adev->doorbell_index.uvd_vce.uvd_ring0_1 * 2;
469 else
470 ring->doorbell_index = adev->doorbell_index.uvd_vce.uvd_ring2_3 * 2 + 1;
471 }
472 r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst[j].irq, 0);
473 if (r)
474 return r;
475 }
476 }
477
478 r = amdgpu_uvd_resume(adev);
479 if (r)
480 return r;
481
482 r = amdgpu_uvd_entity_init(adev);
483 if (r)
484 return r;
485
486 r = amdgpu_virt_alloc_mm_table(adev);
487 if (r)
488 return r;
489
490 return r;
491}
492
493static int uvd_v7_0_sw_fini(void *handle)
494{
495 int i, j, r;
496 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
497
498 amdgpu_virt_free_mm_table(adev);
499
500 r = amdgpu_uvd_suspend(adev);
501 if (r)
502 return r;
503
504 for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
505 if (adev->uvd.harvest_config & (1 << j))
506 continue;
507 for (i = 0; i < adev->uvd.num_enc_rings; ++i)
508 amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]);
509 }
510 return amdgpu_uvd_sw_fini(adev);
511}
512
513/**
514 * uvd_v7_0_hw_init - start and test UVD block
515 *
516 * @adev: amdgpu_device pointer
517 *
518 * Initialize the hardware, boot up the VCPU and do some testing
519 */
520static int uvd_v7_0_hw_init(void *handle)
521{
522 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
523 struct amdgpu_ring *ring;
524 uint32_t tmp;
525 int i, j, r;
526
527 if (amdgpu_sriov_vf(adev))
528 r = uvd_v7_0_sriov_start(adev);
529 else
530 r = uvd_v7_0_start(adev);
531 if (r)
532 goto done;
533
534 for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
535 if (adev->uvd.harvest_config & (1 << j))
536 continue;
537 ring = &adev->uvd.inst[j].ring;
538
539 if (!amdgpu_sriov_vf(adev)) {
540 r = amdgpu_ring_test_helper(ring);
541 if (r)
542 goto done;
543
544 r = amdgpu_ring_alloc(ring, 10);
545 if (r) {
546 DRM_ERROR("amdgpu: (%d)ring failed to lock UVD ring (%d).\n", j, r);
547 goto done;
548 }
549
550 tmp = PACKET0(SOC15_REG_OFFSET(UVD, j,
551 mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL), 0);
552 amdgpu_ring_write(ring, tmp);
553 amdgpu_ring_write(ring, 0xFFFFF);
554
555 tmp = PACKET0(SOC15_REG_OFFSET(UVD, j,
556 mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL), 0);
557 amdgpu_ring_write(ring, tmp);
558 amdgpu_ring_write(ring, 0xFFFFF);
559
560 tmp = PACKET0(SOC15_REG_OFFSET(UVD, j,
561 mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL), 0);
562 amdgpu_ring_write(ring, tmp);
563 amdgpu_ring_write(ring, 0xFFFFF);
564
565 /* Clear timeout status bits */
566 amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, j,
567 mmUVD_SEMA_TIMEOUT_STATUS), 0));
568 amdgpu_ring_write(ring, 0x8);
569
570 amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, j,
571 mmUVD_SEMA_CNTL), 0));
572 amdgpu_ring_write(ring, 3);
573
574 amdgpu_ring_commit(ring);
575 }
576
577 for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
578 ring = &adev->uvd.inst[j].ring_enc[i];
579 r = amdgpu_ring_test_helper(ring);
580 if (r)
581 goto done;
582 }
583 }
584done:
585 if (!r)
586 DRM_INFO("UVD and UVD ENC initialized successfully.\n");
587
588 return r;
589}
590
591/**
592 * uvd_v7_0_hw_fini - stop the hardware block
593 *
594 * @adev: amdgpu_device pointer
595 *
596 * Stop the UVD block, mark ring as not ready any more
597 */
598static int uvd_v7_0_hw_fini(void *handle)
599{
600 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
601 int i;
602
603 if (!amdgpu_sriov_vf(adev))
604 uvd_v7_0_stop(adev);
605 else {
606 /* full access mode, so don't touch any UVD register */
607 DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
608 }
609
610 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
611 if (adev->uvd.harvest_config & (1 << i))
612 continue;
613 adev->uvd.inst[i].ring.sched.ready = false;
614 }
615
616 return 0;
617}
618
619static int uvd_v7_0_suspend(void *handle)
620{
621 int r;
622 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
623
624 r = uvd_v7_0_hw_fini(adev);
625 if (r)
626 return r;
627
628 return amdgpu_uvd_suspend(adev);
629}
630
631static int uvd_v7_0_resume(void *handle)
632{
633 int r;
634 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
635
636 r = amdgpu_uvd_resume(adev);
637 if (r)
638 return r;
639
640 return uvd_v7_0_hw_init(adev);
641}
642
643/**
644 * uvd_v7_0_mc_resume - memory controller programming
645 *
646 * @adev: amdgpu_device pointer
647 *
648 * Let the UVD memory controller know it's offsets
649 */
650static void uvd_v7_0_mc_resume(struct amdgpu_device *adev)
651{
652 uint32_t size = AMDGPU_UVD_FIRMWARE_SIZE(adev);
653 uint32_t offset;
654 int i;
655
656 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
657 if (adev->uvd.harvest_config & (1 << i))
658 continue;
659 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
660 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
661 i == 0 ?
662 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_lo:
663 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].tmr_mc_addr_lo);
664 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
665 i == 0 ?
666 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_hi:
667 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].tmr_mc_addr_hi);
668 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0, 0);
669 offset = 0;
670 } else {
671 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
672 lower_32_bits(adev->uvd.inst[i].gpu_addr));
673 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
674 upper_32_bits(adev->uvd.inst[i].gpu_addr));
675 offset = size;
676 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0,
677 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
678 }
679
680 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE0, size);
681
682 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
683 lower_32_bits(adev->uvd.inst[i].gpu_addr + offset));
684 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
685 upper_32_bits(adev->uvd.inst[i].gpu_addr + offset));
686 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET1, (1 << 21));
687 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_UVD_HEAP_SIZE);
688
689 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
690 lower_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
691 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
692 upper_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
693 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET2, (2 << 21));
694 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE2,
695 AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40));
696
697 WREG32_SOC15(UVD, i, mmUVD_UDEC_ADDR_CONFIG,
698 adev->gfx.config.gb_addr_config);
699 WREG32_SOC15(UVD, i, mmUVD_UDEC_DB_ADDR_CONFIG,
700 adev->gfx.config.gb_addr_config);
701 WREG32_SOC15(UVD, i, mmUVD_UDEC_DBW_ADDR_CONFIG,
702 adev->gfx.config.gb_addr_config);
703
704 WREG32_SOC15(UVD, i, mmUVD_GP_SCRATCH4, adev->uvd.max_handles);
705 }
706}
707
708static int uvd_v7_0_mmsch_start(struct amdgpu_device *adev,
709 struct amdgpu_mm_table *table)
710{
711 uint32_t data = 0, loop;
712 uint64_t addr = table->gpu_addr;
713 struct mmsch_v1_0_init_header *header = (struct mmsch_v1_0_init_header *)table->cpu_addr;
714 uint32_t size;
715 int i;
716
717 size = header->header_size + header->vce_table_size + header->uvd_table_size;
718
719 /* 1, write to vce_mmsch_vf_ctx_addr_lo/hi register with GPU mc addr of memory descriptor location */
720 WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_CTX_ADDR_LO, lower_32_bits(addr));
721 WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_CTX_ADDR_HI, upper_32_bits(addr));
722
723 /* 2, update vmid of descriptor */
724 data = RREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_VMID);
725 data &= ~VCE_MMSCH_VF_VMID__VF_CTX_VMID_MASK;
726 data |= (0 << VCE_MMSCH_VF_VMID__VF_CTX_VMID__SHIFT); /* use domain0 for MM scheduler */
727 WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_VMID, data);
728
729 /* 3, notify mmsch about the size of this descriptor */
730 WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_CTX_SIZE, size);
731
732 /* 4, set resp to zero */
733 WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP, 0);
734
735 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
736 if (adev->uvd.harvest_config & (1 << i))
737 continue;
738 WDOORBELL32(adev->uvd.inst[i].ring_enc[0].doorbell_index, 0);
739 adev->wb.wb[adev->uvd.inst[i].ring_enc[0].wptr_offs] = 0;
740 adev->uvd.inst[i].ring_enc[0].wptr = 0;
741 adev->uvd.inst[i].ring_enc[0].wptr_old = 0;
742 }
743 /* 5, kick off the initialization and wait until VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero */
744 WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_HOST, 0x10000001);
745
746 data = RREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP);
747 loop = 1000;
748 while ((data & 0x10000002) != 0x10000002) {
749 udelay(10);
750 data = RREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP);
751 loop--;
752 if (!loop)
753 break;
754 }
755
756 if (!loop) {
757 dev_err(adev->dev, "failed to init MMSCH, mmVCE_MMSCH_VF_MAILBOX_RESP = %x\n", data);
758 return -EBUSY;
759 }
760
761 return 0;
762}
763
764static int uvd_v7_0_sriov_start(struct amdgpu_device *adev)
765{
766 struct amdgpu_ring *ring;
767 uint32_t offset, size, tmp;
768 uint32_t table_size = 0;
769 struct mmsch_v1_0_cmd_direct_write direct_wt = { {0} };
770 struct mmsch_v1_0_cmd_direct_read_modify_write direct_rd_mod_wt = { {0} };
771 struct mmsch_v1_0_cmd_direct_polling direct_poll = { {0} };
772 struct mmsch_v1_0_cmd_end end = { {0} };
773 uint32_t *init_table = adev->virt.mm_table.cpu_addr;
774 struct mmsch_v1_0_init_header *header = (struct mmsch_v1_0_init_header *)init_table;
775 uint8_t i = 0;
776
777 direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE;
778 direct_rd_mod_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
779 direct_poll.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_POLLING;
780 end.cmd_header.command_type = MMSCH_COMMAND__END;
781
782 if (header->uvd_table_offset == 0 && header->uvd_table_size == 0) {
783 header->version = MMSCH_VERSION;
784 header->header_size = sizeof(struct mmsch_v1_0_init_header) >> 2;
785
786 if (header->vce_table_offset == 0 && header->vce_table_size == 0)
787 header->uvd_table_offset = header->header_size;
788 else
789 header->uvd_table_offset = header->vce_table_size + header->vce_table_offset;
790
791 init_table += header->uvd_table_offset;
792
793 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
794 if (adev->uvd.harvest_config & (1 << i))
795 continue;
796 ring = &adev->uvd.inst[i].ring;
797 ring->wptr = 0;
798 size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4);
799
800 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS),
801 0xFFFFFFFF, 0x00000004);
802 /* mc resume*/
803 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
804 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i,
805 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
806 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_lo);
807 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i,
808 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
809 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_hi);
810 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0);
811 offset = 0;
812 } else {
813 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
814 lower_32_bits(adev->uvd.inst[i].gpu_addr));
815 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
816 upper_32_bits(adev->uvd.inst[i].gpu_addr));
817 offset = size;
818 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0),
819 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
820
821 }
822
823 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE0), size);
824
825 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
826 lower_32_bits(adev->uvd.inst[i].gpu_addr + offset));
827 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
828 upper_32_bits(adev->uvd.inst[i].gpu_addr + offset));
829 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET1), (1 << 21));
830 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_UVD_HEAP_SIZE);
831
832 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
833 lower_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
834 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
835 upper_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
836 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET2), (2 << 21));
837 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE2),
838 AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40));
839
840 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_GP_SCRATCH4), adev->uvd.max_handles);
841 /* mc resume end*/
842
843 /* disable clock gating */
844 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_CGC_CTRL),
845 ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK, 0);
846
847 /* disable interupt */
848 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN),
849 ~UVD_MASTINT_EN__VCPU_EN_MASK, 0);
850
851 /* stall UMC and register bus before resetting VCPU */
852 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2),
853 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
854 UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
855
856 /* put LMI, VCPU, RBC etc... into reset */
857 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_SOFT_RESET),
858 (uint32_t)(UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
859 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
860 UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
861 UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
862 UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
863 UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
864 UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
865 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK));
866
867 /* initialize UVD memory controller */
868 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL),
869 (uint32_t)((0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
870 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
871 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
872 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
873 UVD_LMI_CTRL__REQ_MODE_MASK |
874 0x00100000L));
875
876 /* take all subblocks out of reset, except VCPU */
877 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_SOFT_RESET),
878 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
879
880 /* enable VCPU clock */
881 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL),
882 UVD_VCPU_CNTL__CLK_EN_MASK);
883
884 /* enable master interrupt */
885 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN),
886 ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
887 (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
888
889 /* clear the bit 4 of UVD_STATUS */
890 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS),
891 ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT), 0);
892
893 /* force RBC into idle state */
894 size = order_base_2(ring->ring_size);
895 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, size);
896 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
897 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RBC_RB_CNTL), tmp);
898
899 ring = &adev->uvd.inst[i].ring_enc[0];
900 ring->wptr = 0;
901 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_LO), ring->gpu_addr);
902 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_HI), upper_32_bits(ring->gpu_addr));
903 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_SIZE), ring->ring_size / 4);
904
905 /* boot up the VCPU */
906 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_SOFT_RESET), 0);
907
908 /* enable UMC */
909 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2),
910 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, 0);
911
912 MMSCH_V1_0_INSERT_DIRECT_POLL(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS), 0x02, 0x02);
913 }
914 /* add end packet */
915 memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end));
916 table_size += sizeof(struct mmsch_v1_0_cmd_end) / 4;
917 header->uvd_table_size = table_size;
918
919 }
920 return uvd_v7_0_mmsch_start(adev, &adev->virt.mm_table);
921}
922
923/**
924 * uvd_v7_0_start - start UVD block
925 *
926 * @adev: amdgpu_device pointer
927 *
928 * Setup and start the UVD block
929 */
930static int uvd_v7_0_start(struct amdgpu_device *adev)
931{
932 struct amdgpu_ring *ring;
933 uint32_t rb_bufsz, tmp;
934 uint32_t lmi_swap_cntl;
935 uint32_t mp_swap_cntl;
936 int i, j, k, r;
937
938 for (k = 0; k < adev->uvd.num_uvd_inst; ++k) {
939 if (adev->uvd.harvest_config & (1 << k))
940 continue;
941 /* disable DPG */
942 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_POWER_STATUS), 0,
943 ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
944 }
945
946 /* disable byte swapping */
947 lmi_swap_cntl = 0;
948 mp_swap_cntl = 0;
949
950 uvd_v7_0_mc_resume(adev);
951
952 for (k = 0; k < adev->uvd.num_uvd_inst; ++k) {
953 if (adev->uvd.harvest_config & (1 << k))
954 continue;
955 ring = &adev->uvd.inst[k].ring;
956 /* disable clock gating */
957 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_CGC_CTRL), 0,
958 ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK);
959
960 /* disable interupt */
961 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_MASTINT_EN), 0,
962 ~UVD_MASTINT_EN__VCPU_EN_MASK);
963
964 /* stall UMC and register bus before resetting VCPU */
965 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_LMI_CTRL2),
966 UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
967 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
968 mdelay(1);
969
970 /* put LMI, VCPU, RBC etc... into reset */
971 WREG32_SOC15(UVD, k, mmUVD_SOFT_RESET,
972 UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
973 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
974 UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
975 UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
976 UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
977 UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
978 UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
979 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
980 mdelay(5);
981
982 /* initialize UVD memory controller */
983 WREG32_SOC15(UVD, k, mmUVD_LMI_CTRL,
984 (0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
985 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
986 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
987 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
988 UVD_LMI_CTRL__REQ_MODE_MASK |
989 0x00100000L);
990
991#ifdef __BIG_ENDIAN
992 /* swap (8 in 32) RB and IB */
993 lmi_swap_cntl = 0xa;
994 mp_swap_cntl = 0;
995#endif
996 WREG32_SOC15(UVD, k, mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
997 WREG32_SOC15(UVD, k, mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
998
999 WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXA0, 0x40c2040);
1000 WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXA1, 0x0);
1001 WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXB0, 0x40c2040);
1002 WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXB1, 0x0);
1003 WREG32_SOC15(UVD, k, mmUVD_MPC_SET_ALU, 0);
1004 WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUX, 0x88);
1005
1006 /* take all subblocks out of reset, except VCPU */
1007 WREG32_SOC15(UVD, k, mmUVD_SOFT_RESET,
1008 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1009 mdelay(5);
1010
1011 /* enable VCPU clock */
1012 WREG32_SOC15(UVD, k, mmUVD_VCPU_CNTL,
1013 UVD_VCPU_CNTL__CLK_EN_MASK);
1014
1015 /* enable UMC */
1016 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_LMI_CTRL2), 0,
1017 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1018
1019 /* boot up the VCPU */
1020 WREG32_SOC15(UVD, k, mmUVD_SOFT_RESET, 0);
1021 mdelay(10);
1022
1023 for (i = 0; i < 10; ++i) {
1024 uint32_t status;
1025
1026 for (j = 0; j < 100; ++j) {
1027 status = RREG32_SOC15(UVD, k, mmUVD_STATUS);
1028 if (status & 2)
1029 break;
1030 mdelay(10);
1031 }
1032 r = 0;
1033 if (status & 2)
1034 break;
1035
1036 DRM_ERROR("UVD(%d) not responding, trying to reset the VCPU!!!\n", k);
1037 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_SOFT_RESET),
1038 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
1039 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1040 mdelay(10);
1041 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_SOFT_RESET), 0,
1042 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1043 mdelay(10);
1044 r = -1;
1045 }
1046
1047 if (r) {
1048 DRM_ERROR("UVD(%d) not responding, giving up!!!\n", k);
1049 return r;
1050 }
1051 /* enable master interrupt */
1052 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_MASTINT_EN),
1053 (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
1054 ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
1055
1056 /* clear the bit 4 of UVD_STATUS */
1057 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_STATUS), 0,
1058 ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
1059
1060 /* force RBC into idle state */
1061 rb_bufsz = order_base_2(ring->ring_size);
1062 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
1063 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1064 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1065 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
1066 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1067 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1068 WREG32_SOC15(UVD, k, mmUVD_RBC_RB_CNTL, tmp);
1069
1070 /* set the write pointer delay */
1071 WREG32_SOC15(UVD, k, mmUVD_RBC_RB_WPTR_CNTL, 0);
1072
1073 /* set the wb address */
1074 WREG32_SOC15(UVD, k, mmUVD_RBC_RB_RPTR_ADDR,
1075 (upper_32_bits(ring->gpu_addr) >> 2));
1076
1077 /* programm the RB_BASE for ring buffer */
1078 WREG32_SOC15(UVD, k, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
1079 lower_32_bits(ring->gpu_addr));
1080 WREG32_SOC15(UVD, k, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
1081 upper_32_bits(ring->gpu_addr));
1082
1083 /* Initialize the ring buffer's read and write pointers */
1084 WREG32_SOC15(UVD, k, mmUVD_RBC_RB_RPTR, 0);
1085
1086 ring->wptr = RREG32_SOC15(UVD, k, mmUVD_RBC_RB_RPTR);
1087 WREG32_SOC15(UVD, k, mmUVD_RBC_RB_WPTR,
1088 lower_32_bits(ring->wptr));
1089
1090 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_RBC_RB_CNTL), 0,
1091 ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
1092
1093 ring = &adev->uvd.inst[k].ring_enc[0];
1094 WREG32_SOC15(UVD, k, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1095 WREG32_SOC15(UVD, k, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1096 WREG32_SOC15(UVD, k, mmUVD_RB_BASE_LO, ring->gpu_addr);
1097 WREG32_SOC15(UVD, k, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1098 WREG32_SOC15(UVD, k, mmUVD_RB_SIZE, ring->ring_size / 4);
1099
1100 ring = &adev->uvd.inst[k].ring_enc[1];
1101 WREG32_SOC15(UVD, k, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1102 WREG32_SOC15(UVD, k, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1103 WREG32_SOC15(UVD, k, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1104 WREG32_SOC15(UVD, k, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1105 WREG32_SOC15(UVD, k, mmUVD_RB_SIZE2, ring->ring_size / 4);
1106 }
1107 return 0;
1108}
1109
1110/**
1111 * uvd_v7_0_stop - stop UVD block
1112 *
1113 * @adev: amdgpu_device pointer
1114 *
1115 * stop the UVD block
1116 */
1117static void uvd_v7_0_stop(struct amdgpu_device *adev)
1118{
1119 uint8_t i = 0;
1120
1121 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
1122 if (adev->uvd.harvest_config & (1 << i))
1123 continue;
1124 /* force RBC into idle state */
1125 WREG32_SOC15(UVD, i, mmUVD_RBC_RB_CNTL, 0x11010101);
1126
1127 /* Stall UMC and register bus before resetting VCPU */
1128 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2),
1129 UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
1130 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1131 mdelay(1);
1132
1133 /* put VCPU into reset */
1134 WREG32_SOC15(UVD, i, mmUVD_SOFT_RESET,
1135 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1136 mdelay(5);
1137
1138 /* disable VCPU clock */
1139 WREG32_SOC15(UVD, i, mmUVD_VCPU_CNTL, 0x0);
1140
1141 /* Unstall UMC and register bus */
1142 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2), 0,
1143 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1144 }
1145}
1146
1147/**
1148 * uvd_v7_0_ring_emit_fence - emit an fence & trap command
1149 *
1150 * @ring: amdgpu_ring pointer
1151 * @fence: fence to emit
1152 *
1153 * Write a fence and a trap command to the ring.
1154 */
1155static void uvd_v7_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
1156 unsigned flags)
1157{
1158 struct amdgpu_device *adev = ring->adev;
1159
1160 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1161
1162 amdgpu_ring_write(ring,
1163 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_CONTEXT_ID), 0));
1164 amdgpu_ring_write(ring, seq);
1165 amdgpu_ring_write(ring,
1166 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
1167 amdgpu_ring_write(ring, addr & 0xffffffff);
1168 amdgpu_ring_write(ring,
1169 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
1170 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
1171 amdgpu_ring_write(ring,
1172 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
1173 amdgpu_ring_write(ring, 0);
1174
1175 amdgpu_ring_write(ring,
1176 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
1177 amdgpu_ring_write(ring, 0);
1178 amdgpu_ring_write(ring,
1179 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
1180 amdgpu_ring_write(ring, 0);
1181 amdgpu_ring_write(ring,
1182 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
1183 amdgpu_ring_write(ring, 2);
1184}
1185
1186/**
1187 * uvd_v7_0_enc_ring_emit_fence - emit an enc fence & trap command
1188 *
1189 * @ring: amdgpu_ring pointer
1190 * @fence: fence to emit
1191 *
1192 * Write enc a fence and a trap command to the ring.
1193 */
1194static void uvd_v7_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
1195 u64 seq, unsigned flags)
1196{
1197
1198 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1199
1200 amdgpu_ring_write(ring, HEVC_ENC_CMD_FENCE);
1201 amdgpu_ring_write(ring, addr);
1202 amdgpu_ring_write(ring, upper_32_bits(addr));
1203 amdgpu_ring_write(ring, seq);
1204 amdgpu_ring_write(ring, HEVC_ENC_CMD_TRAP);
1205}
1206
1207/**
1208 * uvd_v7_0_ring_emit_hdp_flush - skip HDP flushing
1209 *
1210 * @ring: amdgpu_ring pointer
1211 */
1212static void uvd_v7_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
1213{
1214 /* The firmware doesn't seem to like touching registers at this point. */
1215}
1216
1217/**
1218 * uvd_v7_0_ring_test_ring - register write test
1219 *
1220 * @ring: amdgpu_ring pointer
1221 *
1222 * Test if we can successfully write to the context register
1223 */
1224static int uvd_v7_0_ring_test_ring(struct amdgpu_ring *ring)
1225{
1226 struct amdgpu_device *adev = ring->adev;
1227 uint32_t tmp = 0;
1228 unsigned i;
1229 int r;
1230
1231 WREG32_SOC15(UVD, ring->me, mmUVD_CONTEXT_ID, 0xCAFEDEAD);
1232 r = amdgpu_ring_alloc(ring, 3);
1233 if (r)
1234 return r;
1235
1236 amdgpu_ring_write(ring,
1237 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_CONTEXT_ID), 0));
1238 amdgpu_ring_write(ring, 0xDEADBEEF);
1239 amdgpu_ring_commit(ring);
1240 for (i = 0; i < adev->usec_timeout; i++) {
1241 tmp = RREG32_SOC15(UVD, ring->me, mmUVD_CONTEXT_ID);
1242 if (tmp == 0xDEADBEEF)
1243 break;
1244 udelay(1);
1245 }
1246
1247 if (i >= adev->usec_timeout)
1248 r = -ETIMEDOUT;
1249
1250 return r;
1251}
1252
1253/**
1254 * uvd_v7_0_ring_patch_cs_in_place - Patch the IB for command submission.
1255 *
1256 * @p: the CS parser with the IBs
1257 * @ib_idx: which IB to patch
1258 *
1259 */
1260static int uvd_v7_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
1261 uint32_t ib_idx)
1262{
1263 struct amdgpu_ring *ring = to_amdgpu_ring(p->entity->rq->sched);
1264 struct amdgpu_ib *ib = &p->job->ibs[ib_idx];
1265 unsigned i;
1266
1267 /* No patching necessary for the first instance */
1268 if (!ring->me)
1269 return 0;
1270
1271 for (i = 0; i < ib->length_dw; i += 2) {
1272 uint32_t reg = amdgpu_get_ib_value(p, ib_idx, i);
1273
1274 reg -= p->adev->reg_offset[UVD_HWIP][0][1];
1275 reg += p->adev->reg_offset[UVD_HWIP][1][1];
1276
1277 amdgpu_set_ib_value(p, ib_idx, i, reg);
1278 }
1279 return 0;
1280}
1281
1282/**
1283 * uvd_v7_0_ring_emit_ib - execute indirect buffer
1284 *
1285 * @ring: amdgpu_ring pointer
1286 * @ib: indirect buffer to execute
1287 *
1288 * Write ring commands to execute the indirect buffer
1289 */
1290static void uvd_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
1291 struct amdgpu_job *job,
1292 struct amdgpu_ib *ib,
1293 uint32_t flags)
1294{
1295 struct amdgpu_device *adev = ring->adev;
1296 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1297
1298 amdgpu_ring_write(ring,
1299 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_VMID), 0));
1300 amdgpu_ring_write(ring, vmid);
1301
1302 amdgpu_ring_write(ring,
1303 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_64BIT_BAR_LOW), 0));
1304 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1305 amdgpu_ring_write(ring,
1306 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH), 0));
1307 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1308 amdgpu_ring_write(ring,
1309 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_RBC_IB_SIZE), 0));
1310 amdgpu_ring_write(ring, ib->length_dw);
1311}
1312
1313/**
1314 * uvd_v7_0_enc_ring_emit_ib - enc execute indirect buffer
1315 *
1316 * @ring: amdgpu_ring pointer
1317 * @ib: indirect buffer to execute
1318 *
1319 * Write enc ring commands to execute the indirect buffer
1320 */
1321static void uvd_v7_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
1322 struct amdgpu_job *job,
1323 struct amdgpu_ib *ib,
1324 uint32_t flags)
1325{
1326 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1327
1328 amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM);
1329 amdgpu_ring_write(ring, vmid);
1330 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1331 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1332 amdgpu_ring_write(ring, ib->length_dw);
1333}
1334
1335static void uvd_v7_0_ring_emit_wreg(struct amdgpu_ring *ring,
1336 uint32_t reg, uint32_t val)
1337{
1338 struct amdgpu_device *adev = ring->adev;
1339
1340 amdgpu_ring_write(ring,
1341 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
1342 amdgpu_ring_write(ring, reg << 2);
1343 amdgpu_ring_write(ring,
1344 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
1345 amdgpu_ring_write(ring, val);
1346 amdgpu_ring_write(ring,
1347 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
1348 amdgpu_ring_write(ring, 8);
1349}
1350
1351static void uvd_v7_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
1352 uint32_t val, uint32_t mask)
1353{
1354 struct amdgpu_device *adev = ring->adev;
1355
1356 amdgpu_ring_write(ring,
1357 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
1358 amdgpu_ring_write(ring, reg << 2);
1359 amdgpu_ring_write(ring,
1360 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
1361 amdgpu_ring_write(ring, val);
1362 amdgpu_ring_write(ring,
1363 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GP_SCRATCH8), 0));
1364 amdgpu_ring_write(ring, mask);
1365 amdgpu_ring_write(ring,
1366 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
1367 amdgpu_ring_write(ring, 12);
1368}
1369
1370static void uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
1371 unsigned vmid, uint64_t pd_addr)
1372{
1373 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
1374 uint32_t data0, data1, mask;
1375
1376 pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1377
1378 /* wait for reg writes */
1379 data0 = hub->ctx0_ptb_addr_lo32 + vmid * 2;
1380 data1 = lower_32_bits(pd_addr);
1381 mask = 0xffffffff;
1382 uvd_v7_0_ring_emit_reg_wait(ring, data0, data1, mask);
1383}
1384
1385static void uvd_v7_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
1386{
1387 struct amdgpu_device *adev = ring->adev;
1388 int i;
1389
1390 WARN_ON(ring->wptr % 2 || count % 2);
1391
1392 for (i = 0; i < count / 2; i++) {
1393 amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_NO_OP), 0));
1394 amdgpu_ring_write(ring, 0);
1395 }
1396}
1397
1398static void uvd_v7_0_enc_ring_insert_end(struct amdgpu_ring *ring)
1399{
1400 amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
1401}
1402
1403static void uvd_v7_0_enc_ring_emit_reg_wait(struct amdgpu_ring *ring,
1404 uint32_t reg, uint32_t val,
1405 uint32_t mask)
1406{
1407 amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WAIT);
1408 amdgpu_ring_write(ring, reg << 2);
1409 amdgpu_ring_write(ring, mask);
1410 amdgpu_ring_write(ring, val);
1411}
1412
1413static void uvd_v7_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
1414 unsigned int vmid, uint64_t pd_addr)
1415{
1416 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
1417
1418 pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1419
1420 /* wait for reg writes */
1421 uvd_v7_0_enc_ring_emit_reg_wait(ring, hub->ctx0_ptb_addr_lo32 + vmid * 2,
1422 lower_32_bits(pd_addr), 0xffffffff);
1423}
1424
1425static void uvd_v7_0_enc_ring_emit_wreg(struct amdgpu_ring *ring,
1426 uint32_t reg, uint32_t val)
1427{
1428 amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE);
1429 amdgpu_ring_write(ring, reg << 2);
1430 amdgpu_ring_write(ring, val);
1431}
1432
1433#if 0
1434static bool uvd_v7_0_is_idle(void *handle)
1435{
1436 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1437
1438 return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
1439}
1440
1441static int uvd_v7_0_wait_for_idle(void *handle)
1442{
1443 unsigned i;
1444 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1445
1446 for (i = 0; i < adev->usec_timeout; i++) {
1447 if (uvd_v7_0_is_idle(handle))
1448 return 0;
1449 }
1450 return -ETIMEDOUT;
1451}
1452
1453#define AMDGPU_UVD_STATUS_BUSY_MASK 0xfd
1454static bool uvd_v7_0_check_soft_reset(void *handle)
1455{
1456 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1457 u32 srbm_soft_reset = 0;
1458 u32 tmp = RREG32(mmSRBM_STATUS);
1459
1460 if (REG_GET_FIELD(tmp, SRBM_STATUS, UVD_RQ_PENDING) ||
1461 REG_GET_FIELD(tmp, SRBM_STATUS, UVD_BUSY) ||
1462 (RREG32_SOC15(UVD, ring->me, mmUVD_STATUS) &
1463 AMDGPU_UVD_STATUS_BUSY_MASK))
1464 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1465 SRBM_SOFT_RESET, SOFT_RESET_UVD, 1);
1466
1467 if (srbm_soft_reset) {
1468 adev->uvd.inst[ring->me].srbm_soft_reset = srbm_soft_reset;
1469 return true;
1470 } else {
1471 adev->uvd.inst[ring->me].srbm_soft_reset = 0;
1472 return false;
1473 }
1474}
1475
1476static int uvd_v7_0_pre_soft_reset(void *handle)
1477{
1478 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1479
1480 if (!adev->uvd.inst[ring->me].srbm_soft_reset)
1481 return 0;
1482
1483 uvd_v7_0_stop(adev);
1484 return 0;
1485}
1486
1487static int uvd_v7_0_soft_reset(void *handle)
1488{
1489 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1490 u32 srbm_soft_reset;
1491
1492 if (!adev->uvd.inst[ring->me].srbm_soft_reset)
1493 return 0;
1494 srbm_soft_reset = adev->uvd.inst[ring->me].srbm_soft_reset;
1495
1496 if (srbm_soft_reset) {
1497 u32 tmp;
1498
1499 tmp = RREG32(mmSRBM_SOFT_RESET);
1500 tmp |= srbm_soft_reset;
1501 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1502 WREG32(mmSRBM_SOFT_RESET, tmp);
1503 tmp = RREG32(mmSRBM_SOFT_RESET);
1504
1505 udelay(50);
1506
1507 tmp &= ~srbm_soft_reset;
1508 WREG32(mmSRBM_SOFT_RESET, tmp);
1509 tmp = RREG32(mmSRBM_SOFT_RESET);
1510
1511 /* Wait a little for things to settle down */
1512 udelay(50);
1513 }
1514
1515 return 0;
1516}
1517
1518static int uvd_v7_0_post_soft_reset(void *handle)
1519{
1520 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1521
1522 if (!adev->uvd.inst[ring->me].srbm_soft_reset)
1523 return 0;
1524
1525 mdelay(5);
1526
1527 return uvd_v7_0_start(adev);
1528}
1529#endif
1530
1531static int uvd_v7_0_set_interrupt_state(struct amdgpu_device *adev,
1532 struct amdgpu_irq_src *source,
1533 unsigned type,
1534 enum amdgpu_interrupt_state state)
1535{
1536 // TODO
1537 return 0;
1538}
1539
1540static int uvd_v7_0_process_interrupt(struct amdgpu_device *adev,
1541 struct amdgpu_irq_src *source,
1542 struct amdgpu_iv_entry *entry)
1543{
1544 uint32_t ip_instance;
1545
1546 switch (entry->client_id) {
1547 case SOC15_IH_CLIENTID_UVD:
1548 ip_instance = 0;
1549 break;
1550 case SOC15_IH_CLIENTID_UVD1:
1551 ip_instance = 1;
1552 break;
1553 default:
1554 DRM_ERROR("Unhandled client id: %d\n", entry->client_id);
1555 return 0;
1556 }
1557
1558 DRM_DEBUG("IH: UVD TRAP\n");
1559
1560 switch (entry->src_id) {
1561 case 124:
1562 amdgpu_fence_process(&adev->uvd.inst[ip_instance].ring);
1563 break;
1564 case 119:
1565 amdgpu_fence_process(&adev->uvd.inst[ip_instance].ring_enc[0]);
1566 break;
1567 case 120:
1568 if (!amdgpu_sriov_vf(adev))
1569 amdgpu_fence_process(&adev->uvd.inst[ip_instance].ring_enc[1]);
1570 break;
1571 default:
1572 DRM_ERROR("Unhandled interrupt: %d %d\n",
1573 entry->src_id, entry->src_data[0]);
1574 break;
1575 }
1576
1577 return 0;
1578}
1579
1580#if 0
1581static void uvd_v7_0_set_sw_clock_gating(struct amdgpu_device *adev)
1582{
1583 uint32_t data, data1, data2, suvd_flags;
1584
1585 data = RREG32_SOC15(UVD, ring->me, mmUVD_CGC_CTRL);
1586 data1 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE);
1587 data2 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_CTRL);
1588
1589 data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK |
1590 UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
1591
1592 suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
1593 UVD_SUVD_CGC_GATE__SIT_MASK |
1594 UVD_SUVD_CGC_GATE__SMP_MASK |
1595 UVD_SUVD_CGC_GATE__SCM_MASK |
1596 UVD_SUVD_CGC_GATE__SDB_MASK;
1597
1598 data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
1599 (1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) |
1600 (4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY));
1601
1602 data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
1603 UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
1604 UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
1605 UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
1606 UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
1607 UVD_CGC_CTRL__SYS_MODE_MASK |
1608 UVD_CGC_CTRL__UDEC_MODE_MASK |
1609 UVD_CGC_CTRL__MPEG2_MODE_MASK |
1610 UVD_CGC_CTRL__REGS_MODE_MASK |
1611 UVD_CGC_CTRL__RBC_MODE_MASK |
1612 UVD_CGC_CTRL__LMI_MC_MODE_MASK |
1613 UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
1614 UVD_CGC_CTRL__IDCT_MODE_MASK |
1615 UVD_CGC_CTRL__MPRD_MODE_MASK |
1616 UVD_CGC_CTRL__MPC_MODE_MASK |
1617 UVD_CGC_CTRL__LBSI_MODE_MASK |
1618 UVD_CGC_CTRL__LRBBM_MODE_MASK |
1619 UVD_CGC_CTRL__WCB_MODE_MASK |
1620 UVD_CGC_CTRL__VCPU_MODE_MASK |
1621 UVD_CGC_CTRL__JPEG_MODE_MASK |
1622 UVD_CGC_CTRL__JPEG2_MODE_MASK |
1623 UVD_CGC_CTRL__SCPU_MODE_MASK);
1624 data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK |
1625 UVD_SUVD_CGC_CTRL__SIT_MODE_MASK |
1626 UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
1627 UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
1628 UVD_SUVD_CGC_CTRL__SDB_MODE_MASK);
1629 data1 |= suvd_flags;
1630
1631 WREG32_SOC15(UVD, ring->me, mmUVD_CGC_CTRL, data);
1632 WREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE, 0);
1633 WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE, data1);
1634 WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_CTRL, data2);
1635}
1636
1637static void uvd_v7_0_set_hw_clock_gating(struct amdgpu_device *adev)
1638{
1639 uint32_t data, data1, cgc_flags, suvd_flags;
1640
1641 data = RREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE);
1642 data1 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE);
1643
1644 cgc_flags = UVD_CGC_GATE__SYS_MASK |
1645 UVD_CGC_GATE__UDEC_MASK |
1646 UVD_CGC_GATE__MPEG2_MASK |
1647 UVD_CGC_GATE__RBC_MASK |
1648 UVD_CGC_GATE__LMI_MC_MASK |
1649 UVD_CGC_GATE__IDCT_MASK |
1650 UVD_CGC_GATE__MPRD_MASK |
1651 UVD_CGC_GATE__MPC_MASK |
1652 UVD_CGC_GATE__LBSI_MASK |
1653 UVD_CGC_GATE__LRBBM_MASK |
1654 UVD_CGC_GATE__UDEC_RE_MASK |
1655 UVD_CGC_GATE__UDEC_CM_MASK |
1656 UVD_CGC_GATE__UDEC_IT_MASK |
1657 UVD_CGC_GATE__UDEC_DB_MASK |
1658 UVD_CGC_GATE__UDEC_MP_MASK |
1659 UVD_CGC_GATE__WCB_MASK |
1660 UVD_CGC_GATE__VCPU_MASK |
1661 UVD_CGC_GATE__SCPU_MASK |
1662 UVD_CGC_GATE__JPEG_MASK |
1663 UVD_CGC_GATE__JPEG2_MASK;
1664
1665 suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
1666 UVD_SUVD_CGC_GATE__SIT_MASK |
1667 UVD_SUVD_CGC_GATE__SMP_MASK |
1668 UVD_SUVD_CGC_GATE__SCM_MASK |
1669 UVD_SUVD_CGC_GATE__SDB_MASK;
1670
1671 data |= cgc_flags;
1672 data1 |= suvd_flags;
1673
1674 WREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE, data);
1675 WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE, data1);
1676}
1677
1678static void uvd_v7_0_set_bypass_mode(struct amdgpu_device *adev, bool enable)
1679{
1680 u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
1681
1682 if (enable)
1683 tmp |= (GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
1684 GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
1685 else
1686 tmp &= ~(GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
1687 GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
1688
1689 WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp);
1690}
1691
1692
1693static int uvd_v7_0_set_clockgating_state(void *handle,
1694 enum amd_clockgating_state state)
1695{
1696 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1697 bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
1698
1699 uvd_v7_0_set_bypass_mode(adev, enable);
1700
1701 if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
1702 return 0;
1703
1704 if (enable) {
1705 /* disable HW gating and enable Sw gating */
1706 uvd_v7_0_set_sw_clock_gating(adev);
1707 } else {
1708 /* wait for STATUS to clear */
1709 if (uvd_v7_0_wait_for_idle(handle))
1710 return -EBUSY;
1711
1712 /* enable HW gates because UVD is idle */
1713 /* uvd_v7_0_set_hw_clock_gating(adev); */
1714 }
1715
1716 return 0;
1717}
1718
1719static int uvd_v7_0_set_powergating_state(void *handle,
1720 enum amd_powergating_state state)
1721{
1722 /* This doesn't actually powergate the UVD block.
1723 * That's done in the dpm code via the SMC. This
1724 * just re-inits the block as necessary. The actual
1725 * gating still happens in the dpm code. We should
1726 * revisit this when there is a cleaner line between
1727 * the smc and the hw blocks
1728 */
1729 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1730
1731 if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
1732 return 0;
1733
1734 WREG32_SOC15(UVD, ring->me, mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK);
1735
1736 if (state == AMD_PG_STATE_GATE) {
1737 uvd_v7_0_stop(adev);
1738 return 0;
1739 } else {
1740 return uvd_v7_0_start(adev);
1741 }
1742}
1743#endif
1744
1745static int uvd_v7_0_set_clockgating_state(void *handle,
1746 enum amd_clockgating_state state)
1747{
1748 /* needed for driver unload*/
1749 return 0;
1750}
1751
1752const struct amd_ip_funcs uvd_v7_0_ip_funcs = {
1753 .name = "uvd_v7_0",
1754 .early_init = uvd_v7_0_early_init,
1755 .late_init = NULL,
1756 .sw_init = uvd_v7_0_sw_init,
1757 .sw_fini = uvd_v7_0_sw_fini,
1758 .hw_init = uvd_v7_0_hw_init,
1759 .hw_fini = uvd_v7_0_hw_fini,
1760 .suspend = uvd_v7_0_suspend,
1761 .resume = uvd_v7_0_resume,
1762 .is_idle = NULL /* uvd_v7_0_is_idle */,
1763 .wait_for_idle = NULL /* uvd_v7_0_wait_for_idle */,
1764 .check_soft_reset = NULL /* uvd_v7_0_check_soft_reset */,
1765 .pre_soft_reset = NULL /* uvd_v7_0_pre_soft_reset */,
1766 .soft_reset = NULL /* uvd_v7_0_soft_reset */,
1767 .post_soft_reset = NULL /* uvd_v7_0_post_soft_reset */,
1768 .set_clockgating_state = uvd_v7_0_set_clockgating_state,
1769 .set_powergating_state = NULL /* uvd_v7_0_set_powergating_state */,
1770};
1771
1772static const struct amdgpu_ring_funcs uvd_v7_0_ring_vm_funcs = {
1773 .type = AMDGPU_RING_TYPE_UVD,
1774 .align_mask = 0xf,
1775 .support_64bit_ptrs = false,
1776 .no_user_fence = true,
1777 .vmhub = AMDGPU_MMHUB_0,
1778 .get_rptr = uvd_v7_0_ring_get_rptr,
1779 .get_wptr = uvd_v7_0_ring_get_wptr,
1780 .set_wptr = uvd_v7_0_ring_set_wptr,
1781 .patch_cs_in_place = uvd_v7_0_ring_patch_cs_in_place,
1782 .emit_frame_size =
1783 6 + /* hdp invalidate */
1784 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
1785 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
1786 8 + /* uvd_v7_0_ring_emit_vm_flush */
1787 14 + 14, /* uvd_v7_0_ring_emit_fence x2 vm fence */
1788 .emit_ib_size = 8, /* uvd_v7_0_ring_emit_ib */
1789 .emit_ib = uvd_v7_0_ring_emit_ib,
1790 .emit_fence = uvd_v7_0_ring_emit_fence,
1791 .emit_vm_flush = uvd_v7_0_ring_emit_vm_flush,
1792 .emit_hdp_flush = uvd_v7_0_ring_emit_hdp_flush,
1793 .test_ring = uvd_v7_0_ring_test_ring,
1794 .test_ib = amdgpu_uvd_ring_test_ib,
1795 .insert_nop = uvd_v7_0_ring_insert_nop,
1796 .pad_ib = amdgpu_ring_generic_pad_ib,
1797 .begin_use = amdgpu_uvd_ring_begin_use,
1798 .end_use = amdgpu_uvd_ring_end_use,
1799 .emit_wreg = uvd_v7_0_ring_emit_wreg,
1800 .emit_reg_wait = uvd_v7_0_ring_emit_reg_wait,
1801 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1802};
1803
1804static const struct amdgpu_ring_funcs uvd_v7_0_enc_ring_vm_funcs = {
1805 .type = AMDGPU_RING_TYPE_UVD_ENC,
1806 .align_mask = 0x3f,
1807 .nop = HEVC_ENC_CMD_NO_OP,
1808 .support_64bit_ptrs = false,
1809 .no_user_fence = true,
1810 .vmhub = AMDGPU_MMHUB_0,
1811 .get_rptr = uvd_v7_0_enc_ring_get_rptr,
1812 .get_wptr = uvd_v7_0_enc_ring_get_wptr,
1813 .set_wptr = uvd_v7_0_enc_ring_set_wptr,
1814 .emit_frame_size =
1815 3 + 3 + /* hdp flush / invalidate */
1816 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1817 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
1818 4 + /* uvd_v7_0_enc_ring_emit_vm_flush */
1819 5 + 5 + /* uvd_v7_0_enc_ring_emit_fence x2 vm fence */
1820 1, /* uvd_v7_0_enc_ring_insert_end */
1821 .emit_ib_size = 5, /* uvd_v7_0_enc_ring_emit_ib */
1822 .emit_ib = uvd_v7_0_enc_ring_emit_ib,
1823 .emit_fence = uvd_v7_0_enc_ring_emit_fence,
1824 .emit_vm_flush = uvd_v7_0_enc_ring_emit_vm_flush,
1825 .test_ring = uvd_v7_0_enc_ring_test_ring,
1826 .test_ib = uvd_v7_0_enc_ring_test_ib,
1827 .insert_nop = amdgpu_ring_insert_nop,
1828 .insert_end = uvd_v7_0_enc_ring_insert_end,
1829 .pad_ib = amdgpu_ring_generic_pad_ib,
1830 .begin_use = amdgpu_uvd_ring_begin_use,
1831 .end_use = amdgpu_uvd_ring_end_use,
1832 .emit_wreg = uvd_v7_0_enc_ring_emit_wreg,
1833 .emit_reg_wait = uvd_v7_0_enc_ring_emit_reg_wait,
1834 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1835};
1836
1837static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev)
1838{
1839 int i;
1840
1841 for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
1842 if (adev->uvd.harvest_config & (1 << i))
1843 continue;
1844 adev->uvd.inst[i].ring.funcs = &uvd_v7_0_ring_vm_funcs;
1845 adev->uvd.inst[i].ring.me = i;
1846 DRM_INFO("UVD(%d) is enabled in VM mode\n", i);
1847 }
1848}
1849
1850static void uvd_v7_0_set_enc_ring_funcs(struct amdgpu_device *adev)
1851{
1852 int i, j;
1853
1854 for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
1855 if (adev->uvd.harvest_config & (1 << j))
1856 continue;
1857 for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
1858 adev->uvd.inst[j].ring_enc[i].funcs = &uvd_v7_0_enc_ring_vm_funcs;
1859 adev->uvd.inst[j].ring_enc[i].me = j;
1860 }
1861
1862 DRM_INFO("UVD(%d) ENC is enabled in VM mode\n", j);
1863 }
1864}
1865
1866static const struct amdgpu_irq_src_funcs uvd_v7_0_irq_funcs = {
1867 .set = uvd_v7_0_set_interrupt_state,
1868 .process = uvd_v7_0_process_interrupt,
1869};
1870
1871static void uvd_v7_0_set_irq_funcs(struct amdgpu_device *adev)
1872{
1873 int i;
1874
1875 for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
1876 if (adev->uvd.harvest_config & (1 << i))
1877 continue;
1878 adev->uvd.inst[i].irq.num_types = adev->uvd.num_enc_rings + 1;
1879 adev->uvd.inst[i].irq.funcs = &uvd_v7_0_irq_funcs;
1880 }
1881}
1882
1883const struct amdgpu_ip_block_version uvd_v7_0_ip_block =
1884{
1885 .type = AMD_IP_BLOCK_TYPE_UVD,
1886 .major = 7,
1887 .minor = 0,
1888 .rev = 0,
1889 .funcs = &uvd_v7_0_ip_funcs,
1890};