Loading...
1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/firmware.h>
25#include <drm/drmP.h>
26#include "amdgpu.h"
27#include "amdgpu_vcn.h"
28#include "soc15.h"
29#include "soc15d.h"
30#include "soc15_common.h"
31
32#include "vcn/vcn_1_0_offset.h"
33#include "vcn/vcn_1_0_sh_mask.h"
34#include "hdp/hdp_4_0_offset.h"
35#include "mmhub/mmhub_9_1_offset.h"
36#include "mmhub/mmhub_9_1_sh_mask.h"
37
38static int vcn_v1_0_start(struct amdgpu_device *adev);
39static int vcn_v1_0_stop(struct amdgpu_device *adev);
40static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev);
41static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev);
42static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev);
43
44/**
45 * vcn_v1_0_early_init - set function pointers
46 *
47 * @handle: amdgpu_device pointer
48 *
49 * Set ring and irq function pointers
50 */
51static int vcn_v1_0_early_init(void *handle)
52{
53 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
54
55 adev->vcn.num_enc_rings = 2;
56
57 vcn_v1_0_set_dec_ring_funcs(adev);
58 vcn_v1_0_set_enc_ring_funcs(adev);
59 vcn_v1_0_set_irq_funcs(adev);
60
61 return 0;
62}
63
64/**
65 * vcn_v1_0_sw_init - sw init for VCN block
66 *
67 * @handle: amdgpu_device pointer
68 *
69 * Load firmware and sw initialization
70 */
71static int vcn_v1_0_sw_init(void *handle)
72{
73 struct amdgpu_ring *ring;
74 int i, r;
75 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
76
77 /* VCN DEC TRAP */
78 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, 124, &adev->vcn.irq);
79 if (r)
80 return r;
81
82 /* VCN ENC TRAP */
83 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
84 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, i + 119,
85 &adev->vcn.irq);
86 if (r)
87 return r;
88 }
89
90 r = amdgpu_vcn_sw_init(adev);
91 if (r)
92 return r;
93
94 r = amdgpu_vcn_resume(adev);
95 if (r)
96 return r;
97
98 ring = &adev->vcn.ring_dec;
99 sprintf(ring->name, "vcn_dec");
100 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.irq, 0);
101 if (r)
102 return r;
103
104 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
105 ring = &adev->vcn.ring_enc[i];
106 sprintf(ring->name, "vcn_enc%d", i);
107 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.irq, 0);
108 if (r)
109 return r;
110 }
111
112 return r;
113}
114
115/**
116 * vcn_v1_0_sw_fini - sw fini for VCN block
117 *
118 * @handle: amdgpu_device pointer
119 *
120 * VCN suspend and free up sw allocation
121 */
122static int vcn_v1_0_sw_fini(void *handle)
123{
124 int r;
125 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
126
127 r = amdgpu_vcn_suspend(adev);
128 if (r)
129 return r;
130
131 r = amdgpu_vcn_sw_fini(adev);
132
133 return r;
134}
135
136/**
137 * vcn_v1_0_hw_init - start and test VCN block
138 *
139 * @handle: amdgpu_device pointer
140 *
141 * Initialize the hardware, boot up the VCPU and do some testing
142 */
143static int vcn_v1_0_hw_init(void *handle)
144{
145 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
146 struct amdgpu_ring *ring = &adev->vcn.ring_dec;
147 int i, r;
148
149 r = vcn_v1_0_start(adev);
150 if (r)
151 goto done;
152
153 ring->ready = true;
154 r = amdgpu_ring_test_ring(ring);
155 if (r) {
156 ring->ready = false;
157 goto done;
158 }
159
160 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
161 ring = &adev->vcn.ring_enc[i];
162 ring->ready = true;
163 r = amdgpu_ring_test_ring(ring);
164 if (r) {
165 ring->ready = false;
166 goto done;
167 }
168 }
169
170done:
171 if (!r)
172 DRM_INFO("VCN decode and encode initialized successfully.\n");
173
174 return r;
175}
176
177/**
178 * vcn_v1_0_hw_fini - stop the hardware block
179 *
180 * @handle: amdgpu_device pointer
181 *
182 * Stop the VCN block, mark ring as not ready any more
183 */
184static int vcn_v1_0_hw_fini(void *handle)
185{
186 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
187 struct amdgpu_ring *ring = &adev->vcn.ring_dec;
188 int r;
189
190 r = vcn_v1_0_stop(adev);
191 if (r)
192 return r;
193
194 ring->ready = false;
195
196 return 0;
197}
198
199/**
200 * vcn_v1_0_suspend - suspend VCN block
201 *
202 * @handle: amdgpu_device pointer
203 *
204 * HW fini and suspend VCN block
205 */
206static int vcn_v1_0_suspend(void *handle)
207{
208 int r;
209 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
210
211 r = vcn_v1_0_hw_fini(adev);
212 if (r)
213 return r;
214
215 r = amdgpu_vcn_suspend(adev);
216
217 return r;
218}
219
220/**
221 * vcn_v1_0_resume - resume VCN block
222 *
223 * @handle: amdgpu_device pointer
224 *
225 * Resume firmware and hw init VCN block
226 */
227static int vcn_v1_0_resume(void *handle)
228{
229 int r;
230 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
231
232 r = amdgpu_vcn_resume(adev);
233 if (r)
234 return r;
235
236 r = vcn_v1_0_hw_init(adev);
237
238 return r;
239}
240
241/**
242 * vcn_v1_0_mc_resume - memory controller programming
243 *
244 * @adev: amdgpu_device pointer
245 *
246 * Let the VCN memory controller know it's offsets
247 */
248static void vcn_v1_0_mc_resume(struct amdgpu_device *adev)
249{
250 uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
251
252 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
253 lower_32_bits(adev->vcn.gpu_addr));
254 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
255 upper_32_bits(adev->vcn.gpu_addr));
256 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0,
257 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
258 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size);
259
260 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
261 lower_32_bits(adev->vcn.gpu_addr + size));
262 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
263 upper_32_bits(adev->vcn.gpu_addr + size));
264 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0);
265 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_HEAP_SIZE);
266
267 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
268 lower_32_bits(adev->vcn.gpu_addr + size + AMDGPU_VCN_HEAP_SIZE));
269 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
270 upper_32_bits(adev->vcn.gpu_addr + size + AMDGPU_VCN_HEAP_SIZE));
271 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0);
272 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2,
273 AMDGPU_VCN_STACK_SIZE + (AMDGPU_VCN_SESSION_SIZE * 40));
274
275 WREG32_SOC15(UVD, 0, mmUVD_UDEC_ADDR_CONFIG,
276 adev->gfx.config.gb_addr_config);
277 WREG32_SOC15(UVD, 0, mmUVD_UDEC_DB_ADDR_CONFIG,
278 adev->gfx.config.gb_addr_config);
279 WREG32_SOC15(UVD, 0, mmUVD_UDEC_DBW_ADDR_CONFIG,
280 adev->gfx.config.gb_addr_config);
281}
282
283/**
284 * vcn_v1_0_disable_clock_gating - disable VCN clock gating
285 *
286 * @adev: amdgpu_device pointer
287 * @sw: enable SW clock gating
288 *
289 * Disable clock gating for VCN block
290 */
291static void vcn_v1_0_disable_clock_gating(struct amdgpu_device *adev, bool sw)
292{
293 uint32_t data;
294
295 /* JPEG disable CGC */
296 data = RREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL);
297
298 if (sw)
299 data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
300 else
301 data &= ~JPEG_CGC_CTRL__DYN_CLOCK_MODE_MASK;
302
303 data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
304 data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
305 WREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL, data);
306
307 data = RREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE);
308 data &= ~(JPEG_CGC_GATE__JPEG_MASK | JPEG_CGC_GATE__JPEG2_MASK);
309 WREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE, data);
310
311 /* UVD disable CGC */
312 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
313 if (sw)
314 data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
315 else
316 data &= ~ UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
317
318 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
319 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
320 WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
321
322 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_GATE);
323 data &= ~(UVD_CGC_GATE__SYS_MASK
324 | UVD_CGC_GATE__UDEC_MASK
325 | UVD_CGC_GATE__MPEG2_MASK
326 | UVD_CGC_GATE__REGS_MASK
327 | UVD_CGC_GATE__RBC_MASK
328 | UVD_CGC_GATE__LMI_MC_MASK
329 | UVD_CGC_GATE__LMI_UMC_MASK
330 | UVD_CGC_GATE__IDCT_MASK
331 | UVD_CGC_GATE__MPRD_MASK
332 | UVD_CGC_GATE__MPC_MASK
333 | UVD_CGC_GATE__LBSI_MASK
334 | UVD_CGC_GATE__LRBBM_MASK
335 | UVD_CGC_GATE__UDEC_RE_MASK
336 | UVD_CGC_GATE__UDEC_CM_MASK
337 | UVD_CGC_GATE__UDEC_IT_MASK
338 | UVD_CGC_GATE__UDEC_DB_MASK
339 | UVD_CGC_GATE__UDEC_MP_MASK
340 | UVD_CGC_GATE__WCB_MASK
341 | UVD_CGC_GATE__VCPU_MASK
342 | UVD_CGC_GATE__SCPU_MASK);
343 WREG32_SOC15(VCN, 0, mmUVD_CGC_GATE, data);
344
345 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
346 data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK
347 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
348 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
349 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
350 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
351 | UVD_CGC_CTRL__SYS_MODE_MASK
352 | UVD_CGC_CTRL__UDEC_MODE_MASK
353 | UVD_CGC_CTRL__MPEG2_MODE_MASK
354 | UVD_CGC_CTRL__REGS_MODE_MASK
355 | UVD_CGC_CTRL__RBC_MODE_MASK
356 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
357 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
358 | UVD_CGC_CTRL__IDCT_MODE_MASK
359 | UVD_CGC_CTRL__MPRD_MODE_MASK
360 | UVD_CGC_CTRL__MPC_MODE_MASK
361 | UVD_CGC_CTRL__LBSI_MODE_MASK
362 | UVD_CGC_CTRL__LRBBM_MODE_MASK
363 | UVD_CGC_CTRL__WCB_MODE_MASK
364 | UVD_CGC_CTRL__VCPU_MODE_MASK
365 | UVD_CGC_CTRL__SCPU_MODE_MASK);
366 WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
367
368 /* turn on */
369 data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_GATE);
370 data |= (UVD_SUVD_CGC_GATE__SRE_MASK
371 | UVD_SUVD_CGC_GATE__SIT_MASK
372 | UVD_SUVD_CGC_GATE__SMP_MASK
373 | UVD_SUVD_CGC_GATE__SCM_MASK
374 | UVD_SUVD_CGC_GATE__SDB_MASK
375 | UVD_SUVD_CGC_GATE__SRE_H264_MASK
376 | UVD_SUVD_CGC_GATE__SRE_HEVC_MASK
377 | UVD_SUVD_CGC_GATE__SIT_H264_MASK
378 | UVD_SUVD_CGC_GATE__SIT_HEVC_MASK
379 | UVD_SUVD_CGC_GATE__SCM_H264_MASK
380 | UVD_SUVD_CGC_GATE__SCM_HEVC_MASK
381 | UVD_SUVD_CGC_GATE__SDB_H264_MASK
382 | UVD_SUVD_CGC_GATE__SDB_HEVC_MASK
383 | UVD_SUVD_CGC_GATE__SCLR_MASK
384 | UVD_SUVD_CGC_GATE__UVD_SC_MASK
385 | UVD_SUVD_CGC_GATE__ENT_MASK
386 | UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK
387 | UVD_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK
388 | UVD_SUVD_CGC_GATE__SITE_MASK
389 | UVD_SUVD_CGC_GATE__SRE_VP9_MASK
390 | UVD_SUVD_CGC_GATE__SCM_VP9_MASK
391 | UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK
392 | UVD_SUVD_CGC_GATE__SDB_VP9_MASK
393 | UVD_SUVD_CGC_GATE__IME_HEVC_MASK);
394 WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_GATE, data);
395
396 data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL);
397 data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
398 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
399 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
400 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
401 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
402 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
403 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
404 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
405 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
406 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
407 WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL, data);
408}
409
410/**
411 * vcn_v1_0_enable_clock_gating - enable VCN clock gating
412 *
413 * @adev: amdgpu_device pointer
414 * @sw: enable SW clock gating
415 *
416 * Enable clock gating for VCN block
417 */
418static void vcn_v1_0_enable_clock_gating(struct amdgpu_device *adev, bool sw)
419{
420 uint32_t data = 0;
421
422 /* enable JPEG CGC */
423 data = RREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL);
424 if (sw)
425 data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
426 else
427 data |= 0 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
428 data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
429 data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
430 WREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL, data);
431
432 data = RREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE);
433 data |= (JPEG_CGC_GATE__JPEG_MASK | JPEG_CGC_GATE__JPEG2_MASK);
434 WREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE, data);
435
436 /* enable UVD CGC */
437 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
438 if (sw)
439 data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
440 else
441 data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
442 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
443 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
444 WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
445
446 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
447 data |= (UVD_CGC_CTRL__UDEC_RE_MODE_MASK
448 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
449 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
450 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
451 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
452 | UVD_CGC_CTRL__SYS_MODE_MASK
453 | UVD_CGC_CTRL__UDEC_MODE_MASK
454 | UVD_CGC_CTRL__MPEG2_MODE_MASK
455 | UVD_CGC_CTRL__REGS_MODE_MASK
456 | UVD_CGC_CTRL__RBC_MODE_MASK
457 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
458 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
459 | UVD_CGC_CTRL__IDCT_MODE_MASK
460 | UVD_CGC_CTRL__MPRD_MODE_MASK
461 | UVD_CGC_CTRL__MPC_MODE_MASK
462 | UVD_CGC_CTRL__LBSI_MODE_MASK
463 | UVD_CGC_CTRL__LRBBM_MODE_MASK
464 | UVD_CGC_CTRL__WCB_MODE_MASK
465 | UVD_CGC_CTRL__VCPU_MODE_MASK
466 | UVD_CGC_CTRL__SCPU_MODE_MASK);
467 WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
468
469 data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL);
470 data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
471 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
472 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
473 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
474 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
475 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
476 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
477 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
478 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
479 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
480 WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL, data);
481}
482
483/**
484 * vcn_v1_0_start - start VCN block
485 *
486 * @adev: amdgpu_device pointer
487 *
488 * Setup and start the VCN block
489 */
490static int vcn_v1_0_start(struct amdgpu_device *adev)
491{
492 struct amdgpu_ring *ring = &adev->vcn.ring_dec;
493 uint32_t rb_bufsz, tmp;
494 uint32_t lmi_swap_cntl;
495 int i, j, r;
496
497 /* disable byte swapping */
498 lmi_swap_cntl = 0;
499
500 vcn_v1_0_mc_resume(adev);
501
502 /* disable clock gating */
503 vcn_v1_0_disable_clock_gating(adev, true);
504
505 /* disable interupt */
506 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN), 0,
507 ~UVD_MASTINT_EN__VCPU_EN_MASK);
508
509 /* stall UMC and register bus before resetting VCPU */
510 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2),
511 UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
512 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
513 mdelay(1);
514
515 /* put LMI, VCPU, RBC etc... into reset */
516 WREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET,
517 UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
518 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
519 UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
520 UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
521 UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
522 UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
523 UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
524 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
525 mdelay(5);
526
527 /* initialize VCN memory controller */
528 WREG32_SOC15(UVD, 0, mmUVD_LMI_CTRL,
529 (0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
530 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
531 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
532 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
533 UVD_LMI_CTRL__REQ_MODE_MASK |
534 0x00100000L);
535
536#ifdef __BIG_ENDIAN
537 /* swap (8 in 32) RB and IB */
538 lmi_swap_cntl = 0xa;
539#endif
540 WREG32_SOC15(UVD, 0, mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
541
542 WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXA0, 0x40c2040);
543 WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXA1, 0x0);
544 WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXB0, 0x40c2040);
545 WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXB1, 0x0);
546 WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_ALU, 0);
547 WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUX, 0x88);
548
549 /* take all subblocks out of reset, except VCPU */
550 WREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET,
551 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
552 mdelay(5);
553
554 /* enable VCPU clock */
555 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CNTL,
556 UVD_VCPU_CNTL__CLK_EN_MASK);
557
558 /* enable UMC */
559 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), 0,
560 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
561
562 /* boot up the VCPU */
563 WREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET, 0);
564 mdelay(10);
565
566 for (i = 0; i < 10; ++i) {
567 uint32_t status;
568
569 for (j = 0; j < 100; ++j) {
570 status = RREG32_SOC15(UVD, 0, mmUVD_STATUS);
571 if (status & 2)
572 break;
573 mdelay(10);
574 }
575 r = 0;
576 if (status & 2)
577 break;
578
579 DRM_ERROR("VCN decode not responding, trying to reset the VCPU!!!\n");
580 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
581 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
582 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
583 mdelay(10);
584 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0,
585 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
586 mdelay(10);
587 r = -1;
588 }
589
590 if (r) {
591 DRM_ERROR("VCN decode not responding, giving up!!!\n");
592 return r;
593 }
594 /* enable master interrupt */
595 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN),
596 (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
597 ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
598
599 /* clear the bit 4 of VCN_STATUS */
600 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS), 0,
601 ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
602
603 /* force RBC into idle state */
604 rb_bufsz = order_base_2(ring->ring_size);
605 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
606 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
607 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
608 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
609 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
610 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
611 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp);
612
613 /* set the write pointer delay */
614 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR_CNTL, 0);
615
616 /* set the wb address */
617 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR_ADDR,
618 (upper_32_bits(ring->gpu_addr) >> 2));
619
620 /* programm the RB_BASE for ring buffer */
621 WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
622 lower_32_bits(ring->gpu_addr));
623 WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
624 upper_32_bits(ring->gpu_addr));
625
626 /* Initialize the ring buffer's read and write pointers */
627 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR, 0);
628
629 ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
630 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
631 lower_32_bits(ring->wptr));
632
633 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), 0,
634 ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
635
636 ring = &adev->vcn.ring_enc[0];
637 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
638 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
639 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr);
640 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
641 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4);
642
643 ring = &adev->vcn.ring_enc[1];
644 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
645 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
646 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr);
647 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
648 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
649
650 return 0;
651}
652
653/**
654 * vcn_v1_0_stop - stop VCN block
655 *
656 * @adev: amdgpu_device pointer
657 *
658 * stop the VCN block
659 */
660static int vcn_v1_0_stop(struct amdgpu_device *adev)
661{
662 /* force RBC into idle state */
663 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, 0x11010101);
664
665 /* Stall UMC and register bus before resetting VCPU */
666 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2),
667 UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
668 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
669 mdelay(1);
670
671 /* put VCPU into reset */
672 WREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET,
673 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
674 mdelay(5);
675
676 /* disable VCPU clock */
677 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CNTL, 0x0);
678
679 /* Unstall UMC and register bus */
680 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), 0,
681 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
682
683 /* enable clock gating */
684 vcn_v1_0_enable_clock_gating(adev, true);
685
686 return 0;
687}
688
689static int vcn_v1_0_set_clockgating_state(void *handle,
690 enum amd_clockgating_state state)
691{
692 /* needed for driver unload*/
693 return 0;
694}
695
696/**
697 * vcn_v1_0_dec_ring_get_rptr - get read pointer
698 *
699 * @ring: amdgpu_ring pointer
700 *
701 * Returns the current hardware read pointer
702 */
703static uint64_t vcn_v1_0_dec_ring_get_rptr(struct amdgpu_ring *ring)
704{
705 struct amdgpu_device *adev = ring->adev;
706
707 return RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
708}
709
710/**
711 * vcn_v1_0_dec_ring_get_wptr - get write pointer
712 *
713 * @ring: amdgpu_ring pointer
714 *
715 * Returns the current hardware write pointer
716 */
717static uint64_t vcn_v1_0_dec_ring_get_wptr(struct amdgpu_ring *ring)
718{
719 struct amdgpu_device *adev = ring->adev;
720
721 return RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR);
722}
723
724/**
725 * vcn_v1_0_dec_ring_set_wptr - set write pointer
726 *
727 * @ring: amdgpu_ring pointer
728 *
729 * Commits the write pointer to the hardware
730 */
731static void vcn_v1_0_dec_ring_set_wptr(struct amdgpu_ring *ring)
732{
733 struct amdgpu_device *adev = ring->adev;
734
735 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
736}
737
738/**
739 * vcn_v1_0_dec_ring_insert_start - insert a start command
740 *
741 * @ring: amdgpu_ring pointer
742 *
743 * Write a start command to the ring.
744 */
745static void vcn_v1_0_dec_ring_insert_start(struct amdgpu_ring *ring)
746{
747 struct amdgpu_device *adev = ring->adev;
748
749 amdgpu_ring_write(ring,
750 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
751 amdgpu_ring_write(ring, 0);
752 amdgpu_ring_write(ring,
753 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
754 amdgpu_ring_write(ring, VCN_DEC_CMD_PACKET_START << 1);
755}
756
757/**
758 * vcn_v1_0_dec_ring_insert_end - insert a end command
759 *
760 * @ring: amdgpu_ring pointer
761 *
762 * Write a end command to the ring.
763 */
764static void vcn_v1_0_dec_ring_insert_end(struct amdgpu_ring *ring)
765{
766 struct amdgpu_device *adev = ring->adev;
767
768 amdgpu_ring_write(ring,
769 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
770 amdgpu_ring_write(ring, VCN_DEC_CMD_PACKET_END << 1);
771}
772
773/**
774 * vcn_v1_0_dec_ring_emit_fence - emit an fence & trap command
775 *
776 * @ring: amdgpu_ring pointer
777 * @fence: fence to emit
778 *
779 * Write a fence and a trap command to the ring.
780 */
781static void vcn_v1_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
782 unsigned flags)
783{
784 struct amdgpu_device *adev = ring->adev;
785
786 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
787
788 amdgpu_ring_write(ring,
789 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0));
790 amdgpu_ring_write(ring, seq);
791 amdgpu_ring_write(ring,
792 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
793 amdgpu_ring_write(ring, addr & 0xffffffff);
794 amdgpu_ring_write(ring,
795 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
796 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
797 amdgpu_ring_write(ring,
798 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
799 amdgpu_ring_write(ring, VCN_DEC_CMD_FENCE << 1);
800
801 amdgpu_ring_write(ring,
802 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
803 amdgpu_ring_write(ring, 0);
804 amdgpu_ring_write(ring,
805 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
806 amdgpu_ring_write(ring, 0);
807 amdgpu_ring_write(ring,
808 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
809 amdgpu_ring_write(ring, VCN_DEC_CMD_TRAP << 1);
810}
811
812/**
813 * vcn_v1_0_dec_ring_emit_ib - execute indirect buffer
814 *
815 * @ring: amdgpu_ring pointer
816 * @ib: indirect buffer to execute
817 *
818 * Write ring commands to execute the indirect buffer
819 */
820static void vcn_v1_0_dec_ring_emit_ib(struct amdgpu_ring *ring,
821 struct amdgpu_ib *ib,
822 unsigned vmid, bool ctx_switch)
823{
824 struct amdgpu_device *adev = ring->adev;
825
826 amdgpu_ring_write(ring,
827 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_VMID), 0));
828 amdgpu_ring_write(ring, vmid);
829
830 amdgpu_ring_write(ring,
831 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_64BIT_BAR_LOW), 0));
832 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
833 amdgpu_ring_write(ring,
834 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH), 0));
835 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
836 amdgpu_ring_write(ring,
837 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_IB_SIZE), 0));
838 amdgpu_ring_write(ring, ib->length_dw);
839}
840
841static void vcn_v1_0_dec_ring_emit_reg_wait(struct amdgpu_ring *ring,
842 uint32_t reg, uint32_t val,
843 uint32_t mask)
844{
845 struct amdgpu_device *adev = ring->adev;
846
847 amdgpu_ring_write(ring,
848 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
849 amdgpu_ring_write(ring, reg << 2);
850 amdgpu_ring_write(ring,
851 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
852 amdgpu_ring_write(ring, val);
853 amdgpu_ring_write(ring,
854 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GP_SCRATCH8), 0));
855 amdgpu_ring_write(ring, mask);
856 amdgpu_ring_write(ring,
857 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
858 amdgpu_ring_write(ring, VCN_DEC_CMD_REG_READ_COND_WAIT << 1);
859}
860
861static void vcn_v1_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
862 unsigned vmid, uint64_t pd_addr)
863{
864 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
865 uint32_t data0, data1, mask;
866
867 pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
868
869 /* wait for register write */
870 data0 = hub->ctx0_ptb_addr_lo32 + vmid * 2;
871 data1 = lower_32_bits(pd_addr);
872 mask = 0xffffffff;
873 vcn_v1_0_dec_ring_emit_reg_wait(ring, data0, data1, mask);
874}
875
876static void vcn_v1_0_dec_ring_emit_wreg(struct amdgpu_ring *ring,
877 uint32_t reg, uint32_t val)
878{
879 struct amdgpu_device *adev = ring->adev;
880
881 amdgpu_ring_write(ring,
882 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
883 amdgpu_ring_write(ring, reg << 2);
884 amdgpu_ring_write(ring,
885 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
886 amdgpu_ring_write(ring, val);
887 amdgpu_ring_write(ring,
888 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
889 amdgpu_ring_write(ring, VCN_DEC_CMD_WRITE_REG << 1);
890}
891
892/**
893 * vcn_v1_0_enc_ring_get_rptr - get enc read pointer
894 *
895 * @ring: amdgpu_ring pointer
896 *
897 * Returns the current hardware enc read pointer
898 */
899static uint64_t vcn_v1_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
900{
901 struct amdgpu_device *adev = ring->adev;
902
903 if (ring == &adev->vcn.ring_enc[0])
904 return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR);
905 else
906 return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2);
907}
908
909 /**
910 * vcn_v1_0_enc_ring_get_wptr - get enc write pointer
911 *
912 * @ring: amdgpu_ring pointer
913 *
914 * Returns the current hardware enc write pointer
915 */
916static uint64_t vcn_v1_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
917{
918 struct amdgpu_device *adev = ring->adev;
919
920 if (ring == &adev->vcn.ring_enc[0])
921 return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR);
922 else
923 return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2);
924}
925
926 /**
927 * vcn_v1_0_enc_ring_set_wptr - set enc write pointer
928 *
929 * @ring: amdgpu_ring pointer
930 *
931 * Commits the enc write pointer to the hardware
932 */
933static void vcn_v1_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
934{
935 struct amdgpu_device *adev = ring->adev;
936
937 if (ring == &adev->vcn.ring_enc[0])
938 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR,
939 lower_32_bits(ring->wptr));
940 else
941 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2,
942 lower_32_bits(ring->wptr));
943}
944
945/**
946 * vcn_v1_0_enc_ring_emit_fence - emit an enc fence & trap command
947 *
948 * @ring: amdgpu_ring pointer
949 * @fence: fence to emit
950 *
951 * Write enc a fence and a trap command to the ring.
952 */
953static void vcn_v1_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
954 u64 seq, unsigned flags)
955{
956 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
957
958 amdgpu_ring_write(ring, VCN_ENC_CMD_FENCE);
959 amdgpu_ring_write(ring, addr);
960 amdgpu_ring_write(ring, upper_32_bits(addr));
961 amdgpu_ring_write(ring, seq);
962 amdgpu_ring_write(ring, VCN_ENC_CMD_TRAP);
963}
964
965static void vcn_v1_0_enc_ring_insert_end(struct amdgpu_ring *ring)
966{
967 amdgpu_ring_write(ring, VCN_ENC_CMD_END);
968}
969
970/**
971 * vcn_v1_0_enc_ring_emit_ib - enc execute indirect buffer
972 *
973 * @ring: amdgpu_ring pointer
974 * @ib: indirect buffer to execute
975 *
976 * Write enc ring commands to execute the indirect buffer
977 */
978static void vcn_v1_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
979 struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch)
980{
981 amdgpu_ring_write(ring, VCN_ENC_CMD_IB);
982 amdgpu_ring_write(ring, vmid);
983 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
984 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
985 amdgpu_ring_write(ring, ib->length_dw);
986}
987
988static void vcn_v1_0_enc_ring_emit_reg_wait(struct amdgpu_ring *ring,
989 uint32_t reg, uint32_t val,
990 uint32_t mask)
991{
992 amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WAIT);
993 amdgpu_ring_write(ring, reg << 2);
994 amdgpu_ring_write(ring, mask);
995 amdgpu_ring_write(ring, val);
996}
997
998static void vcn_v1_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
999 unsigned int vmid, uint64_t pd_addr)
1000{
1001 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
1002
1003 pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1004
1005 /* wait for reg writes */
1006 vcn_v1_0_enc_ring_emit_reg_wait(ring, hub->ctx0_ptb_addr_lo32 + vmid * 2,
1007 lower_32_bits(pd_addr), 0xffffffff);
1008}
1009
1010static void vcn_v1_0_enc_ring_emit_wreg(struct amdgpu_ring *ring,
1011 uint32_t reg, uint32_t val)
1012{
1013 amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WRITE);
1014 amdgpu_ring_write(ring, reg << 2);
1015 amdgpu_ring_write(ring, val);
1016}
1017
1018static int vcn_v1_0_set_interrupt_state(struct amdgpu_device *adev,
1019 struct amdgpu_irq_src *source,
1020 unsigned type,
1021 enum amdgpu_interrupt_state state)
1022{
1023 return 0;
1024}
1025
1026static int vcn_v1_0_process_interrupt(struct amdgpu_device *adev,
1027 struct amdgpu_irq_src *source,
1028 struct amdgpu_iv_entry *entry)
1029{
1030 DRM_DEBUG("IH: VCN TRAP\n");
1031
1032 switch (entry->src_id) {
1033 case 124:
1034 amdgpu_fence_process(&adev->vcn.ring_dec);
1035 break;
1036 case 119:
1037 amdgpu_fence_process(&adev->vcn.ring_enc[0]);
1038 break;
1039 case 120:
1040 amdgpu_fence_process(&adev->vcn.ring_enc[1]);
1041 break;
1042 default:
1043 DRM_ERROR("Unhandled interrupt: %d %d\n",
1044 entry->src_id, entry->src_data[0]);
1045 break;
1046 }
1047
1048 return 0;
1049}
1050
1051static void vcn_v1_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
1052{
1053 int i;
1054 struct amdgpu_device *adev = ring->adev;
1055
1056 for (i = 0; i < count; i++)
1057 amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP), 0));
1058
1059}
1060
1061
1062static const struct amd_ip_funcs vcn_v1_0_ip_funcs = {
1063 .name = "vcn_v1_0",
1064 .early_init = vcn_v1_0_early_init,
1065 .late_init = NULL,
1066 .sw_init = vcn_v1_0_sw_init,
1067 .sw_fini = vcn_v1_0_sw_fini,
1068 .hw_init = vcn_v1_0_hw_init,
1069 .hw_fini = vcn_v1_0_hw_fini,
1070 .suspend = vcn_v1_0_suspend,
1071 .resume = vcn_v1_0_resume,
1072 .is_idle = NULL /* vcn_v1_0_is_idle */,
1073 .wait_for_idle = NULL /* vcn_v1_0_wait_for_idle */,
1074 .check_soft_reset = NULL /* vcn_v1_0_check_soft_reset */,
1075 .pre_soft_reset = NULL /* vcn_v1_0_pre_soft_reset */,
1076 .soft_reset = NULL /* vcn_v1_0_soft_reset */,
1077 .post_soft_reset = NULL /* vcn_v1_0_post_soft_reset */,
1078 .set_clockgating_state = vcn_v1_0_set_clockgating_state,
1079 .set_powergating_state = NULL /* vcn_v1_0_set_powergating_state */,
1080};
1081
1082static const struct amdgpu_ring_funcs vcn_v1_0_dec_ring_vm_funcs = {
1083 .type = AMDGPU_RING_TYPE_VCN_DEC,
1084 .align_mask = 0xf,
1085 .nop = PACKET0(0x81ff, 0),
1086 .support_64bit_ptrs = false,
1087 .vmhub = AMDGPU_MMHUB,
1088 .get_rptr = vcn_v1_0_dec_ring_get_rptr,
1089 .get_wptr = vcn_v1_0_dec_ring_get_wptr,
1090 .set_wptr = vcn_v1_0_dec_ring_set_wptr,
1091 .emit_frame_size =
1092 6 + 6 + /* hdp invalidate / flush */
1093 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
1094 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
1095 8 + /* vcn_v1_0_dec_ring_emit_vm_flush */
1096 14 + 14 + /* vcn_v1_0_dec_ring_emit_fence x2 vm fence */
1097 6,
1098 .emit_ib_size = 8, /* vcn_v1_0_dec_ring_emit_ib */
1099 .emit_ib = vcn_v1_0_dec_ring_emit_ib,
1100 .emit_fence = vcn_v1_0_dec_ring_emit_fence,
1101 .emit_vm_flush = vcn_v1_0_dec_ring_emit_vm_flush,
1102 .test_ring = amdgpu_vcn_dec_ring_test_ring,
1103 .test_ib = amdgpu_vcn_dec_ring_test_ib,
1104 .insert_nop = vcn_v1_0_ring_insert_nop,
1105 .insert_start = vcn_v1_0_dec_ring_insert_start,
1106 .insert_end = vcn_v1_0_dec_ring_insert_end,
1107 .pad_ib = amdgpu_ring_generic_pad_ib,
1108 .begin_use = amdgpu_vcn_ring_begin_use,
1109 .end_use = amdgpu_vcn_ring_end_use,
1110 .emit_wreg = vcn_v1_0_dec_ring_emit_wreg,
1111 .emit_reg_wait = vcn_v1_0_dec_ring_emit_reg_wait,
1112};
1113
1114static const struct amdgpu_ring_funcs vcn_v1_0_enc_ring_vm_funcs = {
1115 .type = AMDGPU_RING_TYPE_VCN_ENC,
1116 .align_mask = 0x3f,
1117 .nop = VCN_ENC_CMD_NO_OP,
1118 .support_64bit_ptrs = false,
1119 .vmhub = AMDGPU_MMHUB,
1120 .get_rptr = vcn_v1_0_enc_ring_get_rptr,
1121 .get_wptr = vcn_v1_0_enc_ring_get_wptr,
1122 .set_wptr = vcn_v1_0_enc_ring_set_wptr,
1123 .emit_frame_size =
1124 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1125 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
1126 4 + /* vcn_v1_0_enc_ring_emit_vm_flush */
1127 5 + 5 + /* vcn_v1_0_enc_ring_emit_fence x2 vm fence */
1128 1, /* vcn_v1_0_enc_ring_insert_end */
1129 .emit_ib_size = 5, /* vcn_v1_0_enc_ring_emit_ib */
1130 .emit_ib = vcn_v1_0_enc_ring_emit_ib,
1131 .emit_fence = vcn_v1_0_enc_ring_emit_fence,
1132 .emit_vm_flush = vcn_v1_0_enc_ring_emit_vm_flush,
1133 .test_ring = amdgpu_vcn_enc_ring_test_ring,
1134 .test_ib = amdgpu_vcn_enc_ring_test_ib,
1135 .insert_nop = amdgpu_ring_insert_nop,
1136 .insert_end = vcn_v1_0_enc_ring_insert_end,
1137 .pad_ib = amdgpu_ring_generic_pad_ib,
1138 .begin_use = amdgpu_vcn_ring_begin_use,
1139 .end_use = amdgpu_vcn_ring_end_use,
1140 .emit_wreg = vcn_v1_0_enc_ring_emit_wreg,
1141 .emit_reg_wait = vcn_v1_0_enc_ring_emit_reg_wait,
1142};
1143
1144static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev)
1145{
1146 adev->vcn.ring_dec.funcs = &vcn_v1_0_dec_ring_vm_funcs;
1147 DRM_INFO("VCN decode is enabled in VM mode\n");
1148}
1149
1150static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev)
1151{
1152 int i;
1153
1154 for (i = 0; i < adev->vcn.num_enc_rings; ++i)
1155 adev->vcn.ring_enc[i].funcs = &vcn_v1_0_enc_ring_vm_funcs;
1156
1157 DRM_INFO("VCN encode is enabled in VM mode\n");
1158}
1159
1160static const struct amdgpu_irq_src_funcs vcn_v1_0_irq_funcs = {
1161 .set = vcn_v1_0_set_interrupt_state,
1162 .process = vcn_v1_0_process_interrupt,
1163};
1164
1165static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev)
1166{
1167 adev->vcn.irq.num_types = adev->vcn.num_enc_rings + 1;
1168 adev->vcn.irq.funcs = &vcn_v1_0_irq_funcs;
1169}
1170
1171const struct amdgpu_ip_block_version vcn_v1_0_ip_block =
1172{
1173 .type = AMD_IP_BLOCK_TYPE_VCN,
1174 .major = 1,
1175 .minor = 0,
1176 .rev = 0,
1177 .funcs = &vcn_v1_0_ip_funcs,
1178};
1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/firmware.h>
25
26#include "amdgpu.h"
27#include "amdgpu_vcn.h"
28#include "amdgpu_pm.h"
29#include "soc15.h"
30#include "soc15d.h"
31#include "soc15_common.h"
32
33#include "vcn/vcn_1_0_offset.h"
34#include "vcn/vcn_1_0_sh_mask.h"
35#include "mmhub/mmhub_9_1_offset.h"
36#include "mmhub/mmhub_9_1_sh_mask.h"
37
38#include "ivsrcid/vcn/irqsrcs_vcn_1_0.h"
39#include "jpeg_v1_0.h"
40#include "vcn_v1_0.h"
41
42#define mmUVD_RBC_XX_IB_REG_CHECK_1_0 0x05ab
43#define mmUVD_RBC_XX_IB_REG_CHECK_1_0_BASE_IDX 1
44#define mmUVD_REG_XX_MASK_1_0 0x05ac
45#define mmUVD_REG_XX_MASK_1_0_BASE_IDX 1
46
47static int vcn_v1_0_stop(struct amdgpu_device *adev);
48static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev);
49static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev);
50static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev);
51static int vcn_v1_0_set_powergating_state(void *handle, enum amd_powergating_state state);
52static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev,
53 int inst_idx, struct dpg_pause_state *new_state);
54
55static void vcn_v1_0_idle_work_handler(struct work_struct *work);
56static void vcn_v1_0_ring_begin_use(struct amdgpu_ring *ring);
57
58/**
59 * vcn_v1_0_early_init - set function pointers
60 *
61 * @handle: amdgpu_device pointer
62 *
63 * Set ring and irq function pointers
64 */
65static int vcn_v1_0_early_init(void *handle)
66{
67 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
68
69 adev->vcn.num_vcn_inst = 1;
70 adev->vcn.num_enc_rings = 2;
71
72 vcn_v1_0_set_dec_ring_funcs(adev);
73 vcn_v1_0_set_enc_ring_funcs(adev);
74 vcn_v1_0_set_irq_funcs(adev);
75
76 jpeg_v1_0_early_init(handle);
77
78 return 0;
79}
80
81/**
82 * vcn_v1_0_sw_init - sw init for VCN block
83 *
84 * @handle: amdgpu_device pointer
85 *
86 * Load firmware and sw initialization
87 */
88static int vcn_v1_0_sw_init(void *handle)
89{
90 struct amdgpu_ring *ring;
91 int i, r;
92 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
93
94 /* VCN DEC TRAP */
95 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
96 VCN_1_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->vcn.inst->irq);
97 if (r)
98 return r;
99
100 /* VCN ENC TRAP */
101 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
102 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, i + VCN_1_0__SRCID__UVD_ENC_GENERAL_PURPOSE,
103 &adev->vcn.inst->irq);
104 if (r)
105 return r;
106 }
107
108 r = amdgpu_vcn_sw_init(adev);
109 if (r)
110 return r;
111
112 /* Override the work func */
113 adev->vcn.idle_work.work.func = vcn_v1_0_idle_work_handler;
114
115 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
116 const struct common_firmware_header *hdr;
117 hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
118 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].ucode_id = AMDGPU_UCODE_ID_VCN;
119 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].fw = adev->vcn.fw;
120 adev->firmware.fw_size +=
121 ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
122 dev_info(adev->dev, "Will use PSP to load VCN firmware\n");
123 }
124
125 r = amdgpu_vcn_resume(adev);
126 if (r)
127 return r;
128
129 ring = &adev->vcn.inst->ring_dec;
130 sprintf(ring->name, "vcn_dec");
131 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
132 AMDGPU_RING_PRIO_DEFAULT, NULL);
133 if (r)
134 return r;
135
136 adev->vcn.internal.scratch9 = adev->vcn.inst->external.scratch9 =
137 SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9);
138 adev->vcn.internal.data0 = adev->vcn.inst->external.data0 =
139 SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0);
140 adev->vcn.internal.data1 = adev->vcn.inst->external.data1 =
141 SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1);
142 adev->vcn.internal.cmd = adev->vcn.inst->external.cmd =
143 SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD);
144 adev->vcn.internal.nop = adev->vcn.inst->external.nop =
145 SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP);
146
147 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
148 ring = &adev->vcn.inst->ring_enc[i];
149 sprintf(ring->name, "vcn_enc%d", i);
150 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
151 AMDGPU_RING_PRIO_DEFAULT, NULL);
152 if (r)
153 return r;
154 }
155
156 adev->vcn.pause_dpg_mode = vcn_v1_0_pause_dpg_mode;
157
158 r = jpeg_v1_0_sw_init(handle);
159
160 return r;
161}
162
163/**
164 * vcn_v1_0_sw_fini - sw fini for VCN block
165 *
166 * @handle: amdgpu_device pointer
167 *
168 * VCN suspend and free up sw allocation
169 */
170static int vcn_v1_0_sw_fini(void *handle)
171{
172 int r;
173 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
174
175 r = amdgpu_vcn_suspend(adev);
176 if (r)
177 return r;
178
179 jpeg_v1_0_sw_fini(handle);
180
181 r = amdgpu_vcn_sw_fini(adev);
182
183 return r;
184}
185
186/**
187 * vcn_v1_0_hw_init - start and test VCN block
188 *
189 * @handle: amdgpu_device pointer
190 *
191 * Initialize the hardware, boot up the VCPU and do some testing
192 */
193static int vcn_v1_0_hw_init(void *handle)
194{
195 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
196 struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
197 int i, r;
198
199 r = amdgpu_ring_test_helper(ring);
200 if (r)
201 goto done;
202
203 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
204 ring = &adev->vcn.inst->ring_enc[i];
205 r = amdgpu_ring_test_helper(ring);
206 if (r)
207 goto done;
208 }
209
210 ring = &adev->jpeg.inst->ring_dec;
211 r = amdgpu_ring_test_helper(ring);
212 if (r)
213 goto done;
214
215done:
216 if (!r)
217 DRM_INFO("VCN decode and encode initialized successfully(under %s).\n",
218 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)?"DPG Mode":"SPG Mode");
219
220 return r;
221}
222
223/**
224 * vcn_v1_0_hw_fini - stop the hardware block
225 *
226 * @handle: amdgpu_device pointer
227 *
228 * Stop the VCN block, mark ring as not ready any more
229 */
230static int vcn_v1_0_hw_fini(void *handle)
231{
232 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
233
234 cancel_delayed_work_sync(&adev->vcn.idle_work);
235
236 if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
237 (adev->vcn.cur_state != AMD_PG_STATE_GATE &&
238 RREG32_SOC15(VCN, 0, mmUVD_STATUS))) {
239 vcn_v1_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
240 }
241
242 return 0;
243}
244
245/**
246 * vcn_v1_0_suspend - suspend VCN block
247 *
248 * @handle: amdgpu_device pointer
249 *
250 * HW fini and suspend VCN block
251 */
252static int vcn_v1_0_suspend(void *handle)
253{
254 int r;
255 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
256
257 r = vcn_v1_0_hw_fini(adev);
258 if (r)
259 return r;
260
261 r = amdgpu_vcn_suspend(adev);
262
263 return r;
264}
265
266/**
267 * vcn_v1_0_resume - resume VCN block
268 *
269 * @handle: amdgpu_device pointer
270 *
271 * Resume firmware and hw init VCN block
272 */
273static int vcn_v1_0_resume(void *handle)
274{
275 int r;
276 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
277
278 r = amdgpu_vcn_resume(adev);
279 if (r)
280 return r;
281
282 r = vcn_v1_0_hw_init(adev);
283
284 return r;
285}
286
287/**
288 * vcn_v1_0_mc_resume_spg_mode - memory controller programming
289 *
290 * @adev: amdgpu_device pointer
291 *
292 * Let the VCN memory controller know it's offsets
293 */
294static void vcn_v1_0_mc_resume_spg_mode(struct amdgpu_device *adev)
295{
296 uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
297 uint32_t offset;
298
299 /* cache window 0: fw */
300 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
301 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
302 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo));
303 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
304 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi));
305 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, 0);
306 offset = 0;
307 } else {
308 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
309 lower_32_bits(adev->vcn.inst->gpu_addr));
310 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
311 upper_32_bits(adev->vcn.inst->gpu_addr));
312 offset = size;
313 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0,
314 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
315 }
316
317 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size);
318
319 /* cache window 1: stack */
320 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
321 lower_32_bits(adev->vcn.inst->gpu_addr + offset));
322 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
323 upper_32_bits(adev->vcn.inst->gpu_addr + offset));
324 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0);
325 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
326
327 /* cache window 2: context */
328 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
329 lower_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
330 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
331 upper_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
332 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0);
333 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
334
335 WREG32_SOC15(UVD, 0, mmUVD_UDEC_ADDR_CONFIG,
336 adev->gfx.config.gb_addr_config);
337 WREG32_SOC15(UVD, 0, mmUVD_UDEC_DB_ADDR_CONFIG,
338 adev->gfx.config.gb_addr_config);
339 WREG32_SOC15(UVD, 0, mmUVD_UDEC_DBW_ADDR_CONFIG,
340 adev->gfx.config.gb_addr_config);
341 WREG32_SOC15(UVD, 0, mmUVD_UDEC_DBW_UV_ADDR_CONFIG,
342 adev->gfx.config.gb_addr_config);
343 WREG32_SOC15(UVD, 0, mmUVD_MIF_CURR_ADDR_CONFIG,
344 adev->gfx.config.gb_addr_config);
345 WREG32_SOC15(UVD, 0, mmUVD_MIF_CURR_UV_ADDR_CONFIG,
346 adev->gfx.config.gb_addr_config);
347 WREG32_SOC15(UVD, 0, mmUVD_MIF_RECON1_ADDR_CONFIG,
348 adev->gfx.config.gb_addr_config);
349 WREG32_SOC15(UVD, 0, mmUVD_MIF_RECON1_UV_ADDR_CONFIG,
350 adev->gfx.config.gb_addr_config);
351 WREG32_SOC15(UVD, 0, mmUVD_MIF_REF_ADDR_CONFIG,
352 adev->gfx.config.gb_addr_config);
353 WREG32_SOC15(UVD, 0, mmUVD_MIF_REF_UV_ADDR_CONFIG,
354 adev->gfx.config.gb_addr_config);
355 WREG32_SOC15(UVD, 0, mmUVD_JPEG_ADDR_CONFIG,
356 adev->gfx.config.gb_addr_config);
357 WREG32_SOC15(UVD, 0, mmUVD_JPEG_UV_ADDR_CONFIG,
358 adev->gfx.config.gb_addr_config);
359}
360
361static void vcn_v1_0_mc_resume_dpg_mode(struct amdgpu_device *adev)
362{
363 uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
364 uint32_t offset;
365
366 /* cache window 0: fw */
367 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
368 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
369 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo),
370 0xFFFFFFFF, 0);
371 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
372 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi),
373 0xFFFFFFFF, 0);
374 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, 0,
375 0xFFFFFFFF, 0);
376 offset = 0;
377 } else {
378 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
379 lower_32_bits(adev->vcn.inst->gpu_addr), 0xFFFFFFFF, 0);
380 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
381 upper_32_bits(adev->vcn.inst->gpu_addr), 0xFFFFFFFF, 0);
382 offset = size;
383 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0,
384 AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0xFFFFFFFF, 0);
385 }
386
387 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size, 0xFFFFFFFF, 0);
388
389 /* cache window 1: stack */
390 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
391 lower_32_bits(adev->vcn.inst->gpu_addr + offset), 0xFFFFFFFF, 0);
392 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
393 upper_32_bits(adev->vcn.inst->gpu_addr + offset), 0xFFFFFFFF, 0);
394 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0,
395 0xFFFFFFFF, 0);
396 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE,
397 0xFFFFFFFF, 0);
398
399 /* cache window 2: context */
400 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
401 lower_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE),
402 0xFFFFFFFF, 0);
403 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
404 upper_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE),
405 0xFFFFFFFF, 0);
406 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0, 0xFFFFFFFF, 0);
407 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE,
408 0xFFFFFFFF, 0);
409
410 /* VCN global tiling registers */
411 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_UDEC_ADDR_CONFIG,
412 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
413 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_UDEC_DB_ADDR_CONFIG,
414 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
415 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_UDEC_DBW_ADDR_CONFIG,
416 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
417 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_UDEC_DBW_UV_ADDR_CONFIG,
418 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
419 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MIF_CURR_ADDR_CONFIG,
420 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
421 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MIF_CURR_UV_ADDR_CONFIG,
422 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
423 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MIF_RECON1_ADDR_CONFIG,
424 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
425 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MIF_RECON1_UV_ADDR_CONFIG,
426 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
427 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MIF_REF_ADDR_CONFIG,
428 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
429 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MIF_REF_UV_ADDR_CONFIG,
430 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
431}
432
433/**
434 * vcn_v1_0_disable_clock_gating - disable VCN clock gating
435 *
436 * @adev: amdgpu_device pointer
437 *
438 * Disable clock gating for VCN block
439 */
440static void vcn_v1_0_disable_clock_gating(struct amdgpu_device *adev)
441{
442 uint32_t data;
443
444 /* JPEG disable CGC */
445 data = RREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL);
446
447 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
448 data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
449 else
450 data &= ~JPEG_CGC_CTRL__DYN_CLOCK_MODE_MASK;
451
452 data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
453 data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
454 WREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL, data);
455
456 data = RREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE);
457 data &= ~(JPEG_CGC_GATE__JPEG_MASK | JPEG_CGC_GATE__JPEG2_MASK);
458 WREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE, data);
459
460 /* UVD disable CGC */
461 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
462 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
463 data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
464 else
465 data &= ~ UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
466
467 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
468 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
469 WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
470
471 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_GATE);
472 data &= ~(UVD_CGC_GATE__SYS_MASK
473 | UVD_CGC_GATE__UDEC_MASK
474 | UVD_CGC_GATE__MPEG2_MASK
475 | UVD_CGC_GATE__REGS_MASK
476 | UVD_CGC_GATE__RBC_MASK
477 | UVD_CGC_GATE__LMI_MC_MASK
478 | UVD_CGC_GATE__LMI_UMC_MASK
479 | UVD_CGC_GATE__IDCT_MASK
480 | UVD_CGC_GATE__MPRD_MASK
481 | UVD_CGC_GATE__MPC_MASK
482 | UVD_CGC_GATE__LBSI_MASK
483 | UVD_CGC_GATE__LRBBM_MASK
484 | UVD_CGC_GATE__UDEC_RE_MASK
485 | UVD_CGC_GATE__UDEC_CM_MASK
486 | UVD_CGC_GATE__UDEC_IT_MASK
487 | UVD_CGC_GATE__UDEC_DB_MASK
488 | UVD_CGC_GATE__UDEC_MP_MASK
489 | UVD_CGC_GATE__WCB_MASK
490 | UVD_CGC_GATE__VCPU_MASK
491 | UVD_CGC_GATE__SCPU_MASK);
492 WREG32_SOC15(VCN, 0, mmUVD_CGC_GATE, data);
493
494 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
495 data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK
496 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
497 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
498 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
499 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
500 | UVD_CGC_CTRL__SYS_MODE_MASK
501 | UVD_CGC_CTRL__UDEC_MODE_MASK
502 | UVD_CGC_CTRL__MPEG2_MODE_MASK
503 | UVD_CGC_CTRL__REGS_MODE_MASK
504 | UVD_CGC_CTRL__RBC_MODE_MASK
505 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
506 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
507 | UVD_CGC_CTRL__IDCT_MODE_MASK
508 | UVD_CGC_CTRL__MPRD_MODE_MASK
509 | UVD_CGC_CTRL__MPC_MODE_MASK
510 | UVD_CGC_CTRL__LBSI_MODE_MASK
511 | UVD_CGC_CTRL__LRBBM_MODE_MASK
512 | UVD_CGC_CTRL__WCB_MODE_MASK
513 | UVD_CGC_CTRL__VCPU_MODE_MASK
514 | UVD_CGC_CTRL__SCPU_MODE_MASK);
515 WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
516
517 /* turn on */
518 data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_GATE);
519 data |= (UVD_SUVD_CGC_GATE__SRE_MASK
520 | UVD_SUVD_CGC_GATE__SIT_MASK
521 | UVD_SUVD_CGC_GATE__SMP_MASK
522 | UVD_SUVD_CGC_GATE__SCM_MASK
523 | UVD_SUVD_CGC_GATE__SDB_MASK
524 | UVD_SUVD_CGC_GATE__SRE_H264_MASK
525 | UVD_SUVD_CGC_GATE__SRE_HEVC_MASK
526 | UVD_SUVD_CGC_GATE__SIT_H264_MASK
527 | UVD_SUVD_CGC_GATE__SIT_HEVC_MASK
528 | UVD_SUVD_CGC_GATE__SCM_H264_MASK
529 | UVD_SUVD_CGC_GATE__SCM_HEVC_MASK
530 | UVD_SUVD_CGC_GATE__SDB_H264_MASK
531 | UVD_SUVD_CGC_GATE__SDB_HEVC_MASK
532 | UVD_SUVD_CGC_GATE__SCLR_MASK
533 | UVD_SUVD_CGC_GATE__UVD_SC_MASK
534 | UVD_SUVD_CGC_GATE__ENT_MASK
535 | UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK
536 | UVD_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK
537 | UVD_SUVD_CGC_GATE__SITE_MASK
538 | UVD_SUVD_CGC_GATE__SRE_VP9_MASK
539 | UVD_SUVD_CGC_GATE__SCM_VP9_MASK
540 | UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK
541 | UVD_SUVD_CGC_GATE__SDB_VP9_MASK
542 | UVD_SUVD_CGC_GATE__IME_HEVC_MASK);
543 WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_GATE, data);
544
545 data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL);
546 data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
547 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
548 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
549 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
550 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
551 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
552 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
553 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
554 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
555 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
556 WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL, data);
557}
558
559/**
560 * vcn_v1_0_enable_clock_gating - enable VCN clock gating
561 *
562 * @adev: amdgpu_device pointer
563 *
564 * Enable clock gating for VCN block
565 */
566static void vcn_v1_0_enable_clock_gating(struct amdgpu_device *adev)
567{
568 uint32_t data = 0;
569
570 /* enable JPEG CGC */
571 data = RREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL);
572 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
573 data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
574 else
575 data |= 0 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
576 data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
577 data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
578 WREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL, data);
579
580 data = RREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE);
581 data |= (JPEG_CGC_GATE__JPEG_MASK | JPEG_CGC_GATE__JPEG2_MASK);
582 WREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE, data);
583
584 /* enable UVD CGC */
585 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
586 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
587 data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
588 else
589 data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
590 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
591 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
592 WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
593
594 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
595 data |= (UVD_CGC_CTRL__UDEC_RE_MODE_MASK
596 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
597 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
598 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
599 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
600 | UVD_CGC_CTRL__SYS_MODE_MASK
601 | UVD_CGC_CTRL__UDEC_MODE_MASK
602 | UVD_CGC_CTRL__MPEG2_MODE_MASK
603 | UVD_CGC_CTRL__REGS_MODE_MASK
604 | UVD_CGC_CTRL__RBC_MODE_MASK
605 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
606 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
607 | UVD_CGC_CTRL__IDCT_MODE_MASK
608 | UVD_CGC_CTRL__MPRD_MODE_MASK
609 | UVD_CGC_CTRL__MPC_MODE_MASK
610 | UVD_CGC_CTRL__LBSI_MODE_MASK
611 | UVD_CGC_CTRL__LRBBM_MODE_MASK
612 | UVD_CGC_CTRL__WCB_MODE_MASK
613 | UVD_CGC_CTRL__VCPU_MODE_MASK
614 | UVD_CGC_CTRL__SCPU_MODE_MASK);
615 WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
616
617 data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL);
618 data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
619 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
620 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
621 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
622 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
623 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
624 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
625 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
626 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
627 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
628 WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL, data);
629}
630
631static void vcn_v1_0_clock_gating_dpg_mode(struct amdgpu_device *adev, uint8_t sram_sel)
632{
633 uint32_t reg_data = 0;
634
635 /* disable JPEG CGC */
636 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
637 reg_data = 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
638 else
639 reg_data = 0 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
640 reg_data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
641 reg_data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
642 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmJPEG_CGC_CTRL, reg_data, 0xFFFFFFFF, sram_sel);
643
644 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmJPEG_CGC_GATE, 0, 0xFFFFFFFF, sram_sel);
645
646 /* enable sw clock gating control */
647 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
648 reg_data = 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
649 else
650 reg_data = 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
651 reg_data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
652 reg_data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
653 reg_data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
654 UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
655 UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
656 UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
657 UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
658 UVD_CGC_CTRL__SYS_MODE_MASK |
659 UVD_CGC_CTRL__UDEC_MODE_MASK |
660 UVD_CGC_CTRL__MPEG2_MODE_MASK |
661 UVD_CGC_CTRL__REGS_MODE_MASK |
662 UVD_CGC_CTRL__RBC_MODE_MASK |
663 UVD_CGC_CTRL__LMI_MC_MODE_MASK |
664 UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
665 UVD_CGC_CTRL__IDCT_MODE_MASK |
666 UVD_CGC_CTRL__MPRD_MODE_MASK |
667 UVD_CGC_CTRL__MPC_MODE_MASK |
668 UVD_CGC_CTRL__LBSI_MODE_MASK |
669 UVD_CGC_CTRL__LRBBM_MODE_MASK |
670 UVD_CGC_CTRL__WCB_MODE_MASK |
671 UVD_CGC_CTRL__VCPU_MODE_MASK |
672 UVD_CGC_CTRL__SCPU_MODE_MASK);
673 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_CGC_CTRL, reg_data, 0xFFFFFFFF, sram_sel);
674
675 /* turn off clock gating */
676 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_CGC_GATE, 0, 0xFFFFFFFF, sram_sel);
677
678 /* turn on SUVD clock gating */
679 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_SUVD_CGC_GATE, 1, 0xFFFFFFFF, sram_sel);
680
681 /* turn on sw mode in UVD_SUVD_CGC_CTRL */
682 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_SUVD_CGC_CTRL, 0, 0xFFFFFFFF, sram_sel);
683}
684
685static void vcn_1_0_disable_static_power_gating(struct amdgpu_device *adev)
686{
687 uint32_t data = 0;
688
689 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
690 data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
691 | 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
692 | 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
693 | 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
694 | 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
695 | 2 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT
696 | 2 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT
697 | 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
698 | 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
699 | 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
700 | 2 << UVD_PGFSM_CONFIG__UVDW_PWR_CONFIG__SHIFT);
701
702 WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data);
703 SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, UVD_PGFSM_STATUS__UVDM_UVDU_PWR_ON, 0xFFFFFF);
704 } else {
705 data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
706 | 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
707 | 1 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
708 | 1 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
709 | 1 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
710 | 1 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT
711 | 1 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT
712 | 1 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
713 | 1 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
714 | 1 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
715 | 1 << UVD_PGFSM_CONFIG__UVDW_PWR_CONFIG__SHIFT);
716 WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data);
717 SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, 0, 0xFFFFFFFF);
718 }
719
720 /* polling UVD_PGFSM_STATUS to confirm UVDM_PWR_STATUS , UVDU_PWR_STATUS are 0 (power on) */
721
722 data = RREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS);
723 data &= ~0x103;
724 if (adev->pg_flags & AMD_PG_SUPPORT_VCN)
725 data |= UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON | UVD_POWER_STATUS__UVD_PG_EN_MASK;
726
727 WREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS, data);
728}
729
730static void vcn_1_0_enable_static_power_gating(struct amdgpu_device *adev)
731{
732 uint32_t data = 0;
733
734 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
735 /* Before power off, this indicator has to be turned on */
736 data = RREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS);
737 data &= ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK;
738 data |= UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF;
739 WREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS, data);
740
741
742 data = (2 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
743 | 2 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
744 | 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
745 | 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
746 | 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
747 | 2 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT
748 | 2 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT
749 | 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
750 | 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
751 | 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
752 | 2 << UVD_PGFSM_CONFIG__UVDW_PWR_CONFIG__SHIFT);
753
754 WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data);
755
756 data = (2 << UVD_PGFSM_STATUS__UVDM_PWR_STATUS__SHIFT
757 | 2 << UVD_PGFSM_STATUS__UVDU_PWR_STATUS__SHIFT
758 | 2 << UVD_PGFSM_STATUS__UVDF_PWR_STATUS__SHIFT
759 | 2 << UVD_PGFSM_STATUS__UVDC_PWR_STATUS__SHIFT
760 | 2 << UVD_PGFSM_STATUS__UVDB_PWR_STATUS__SHIFT
761 | 2 << UVD_PGFSM_STATUS__UVDIL_PWR_STATUS__SHIFT
762 | 2 << UVD_PGFSM_STATUS__UVDIR_PWR_STATUS__SHIFT
763 | 2 << UVD_PGFSM_STATUS__UVDTD_PWR_STATUS__SHIFT
764 | 2 << UVD_PGFSM_STATUS__UVDTE_PWR_STATUS__SHIFT
765 | 2 << UVD_PGFSM_STATUS__UVDE_PWR_STATUS__SHIFT
766 | 2 << UVD_PGFSM_STATUS__UVDW_PWR_STATUS__SHIFT);
767 SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, data, 0xFFFFFFFF);
768 }
769}
770
771/**
772 * vcn_v1_0_start_spg_mode - start VCN block
773 *
774 * @adev: amdgpu_device pointer
775 *
776 * Setup and start the VCN block
777 */
778static int vcn_v1_0_start_spg_mode(struct amdgpu_device *adev)
779{
780 struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
781 uint32_t rb_bufsz, tmp;
782 uint32_t lmi_swap_cntl;
783 int i, j, r;
784
785 /* disable byte swapping */
786 lmi_swap_cntl = 0;
787
788 vcn_1_0_disable_static_power_gating(adev);
789
790 tmp = RREG32_SOC15(UVD, 0, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY;
791 WREG32_SOC15(UVD, 0, mmUVD_STATUS, tmp);
792
793 /* disable clock gating */
794 vcn_v1_0_disable_clock_gating(adev);
795
796 /* disable interupt */
797 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN), 0,
798 ~UVD_MASTINT_EN__VCPU_EN_MASK);
799
800 /* initialize VCN memory controller */
801 tmp = RREG32_SOC15(UVD, 0, mmUVD_LMI_CTRL);
802 WREG32_SOC15(UVD, 0, mmUVD_LMI_CTRL, tmp |
803 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
804 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
805 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
806 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
807
808#ifdef __BIG_ENDIAN
809 /* swap (8 in 32) RB and IB */
810 lmi_swap_cntl = 0xa;
811#endif
812 WREG32_SOC15(UVD, 0, mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
813
814 tmp = RREG32_SOC15(UVD, 0, mmUVD_MPC_CNTL);
815 tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
816 tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
817 WREG32_SOC15(UVD, 0, mmUVD_MPC_CNTL, tmp);
818
819 WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXA0,
820 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
821 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
822 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
823 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
824
825 WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXB0,
826 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
827 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
828 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
829 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
830
831 WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUX,
832 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
833 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
834 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
835
836 vcn_v1_0_mc_resume_spg_mode(adev);
837
838 WREG32_SOC15(UVD, 0, mmUVD_REG_XX_MASK_1_0, 0x10);
839 WREG32_SOC15(UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK_1_0,
840 RREG32_SOC15(UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK_1_0) | 0x3);
841
842 /* enable VCPU clock */
843 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CNTL, UVD_VCPU_CNTL__CLK_EN_MASK);
844
845 /* boot up the VCPU */
846 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0,
847 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
848
849 /* enable UMC */
850 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), 0,
851 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
852
853 tmp = RREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET);
854 tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
855 tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
856 WREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET, tmp);
857
858 for (i = 0; i < 10; ++i) {
859 uint32_t status;
860
861 for (j = 0; j < 100; ++j) {
862 status = RREG32_SOC15(UVD, 0, mmUVD_STATUS);
863 if (status & UVD_STATUS__IDLE)
864 break;
865 mdelay(10);
866 }
867 r = 0;
868 if (status & UVD_STATUS__IDLE)
869 break;
870
871 DRM_ERROR("VCN decode not responding, trying to reset the VCPU!!!\n");
872 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
873 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
874 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
875 mdelay(10);
876 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0,
877 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
878 mdelay(10);
879 r = -1;
880 }
881
882 if (r) {
883 DRM_ERROR("VCN decode not responding, giving up!!!\n");
884 return r;
885 }
886 /* enable master interrupt */
887 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN),
888 UVD_MASTINT_EN__VCPU_EN_MASK, ~UVD_MASTINT_EN__VCPU_EN_MASK);
889
890 /* enable system interrupt for JRBC, TODO: move to set interrupt*/
891 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SYS_INT_EN),
892 UVD_SYS_INT_EN__UVD_JRBC_EN_MASK,
893 ~UVD_SYS_INT_EN__UVD_JRBC_EN_MASK);
894
895 /* clear the busy bit of UVD_STATUS */
896 tmp = RREG32_SOC15(UVD, 0, mmUVD_STATUS) & ~UVD_STATUS__UVD_BUSY;
897 WREG32_SOC15(UVD, 0, mmUVD_STATUS, tmp);
898
899 /* force RBC into idle state */
900 rb_bufsz = order_base_2(ring->ring_size);
901 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
902 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
903 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
904 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
905 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
906 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp);
907
908 /* set the write pointer delay */
909 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR_CNTL, 0);
910
911 /* set the wb address */
912 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR_ADDR,
913 (upper_32_bits(ring->gpu_addr) >> 2));
914
915 /* program the RB_BASE for ring buffer */
916 WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
917 lower_32_bits(ring->gpu_addr));
918 WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
919 upper_32_bits(ring->gpu_addr));
920
921 /* Initialize the ring buffer's read and write pointers */
922 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR, 0);
923
924 WREG32_SOC15(UVD, 0, mmUVD_SCRATCH2, 0);
925
926 ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
927 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
928 lower_32_bits(ring->wptr));
929
930 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), 0,
931 ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
932
933 ring = &adev->vcn.inst->ring_enc[0];
934 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
935 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
936 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr);
937 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
938 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4);
939
940 ring = &adev->vcn.inst->ring_enc[1];
941 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
942 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
943 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr);
944 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
945 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
946
947 jpeg_v1_0_start(adev, 0);
948
949 return 0;
950}
951
952static int vcn_v1_0_start_dpg_mode(struct amdgpu_device *adev)
953{
954 struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
955 uint32_t rb_bufsz, tmp;
956 uint32_t lmi_swap_cntl;
957
958 /* disable byte swapping */
959 lmi_swap_cntl = 0;
960
961 vcn_1_0_enable_static_power_gating(adev);
962
963 /* enable dynamic power gating mode */
964 tmp = RREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS);
965 tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK;
966 tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK;
967 WREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS, tmp);
968
969 /* enable clock gating */
970 vcn_v1_0_clock_gating_dpg_mode(adev, 0);
971
972 /* enable VCPU clock */
973 tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
974 tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
975 tmp |= UVD_VCPU_CNTL__MIF_WR_LOW_THRESHOLD_BP_MASK;
976 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_VCPU_CNTL, tmp, 0xFFFFFFFF, 0);
977
978 /* disable interupt */
979 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MASTINT_EN,
980 0, UVD_MASTINT_EN__VCPU_EN_MASK, 0);
981
982 /* initialize VCN memory controller */
983 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_CTRL,
984 (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
985 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
986 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
987 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
988 UVD_LMI_CTRL__REQ_MODE_MASK |
989 UVD_LMI_CTRL__CRC_RESET_MASK |
990 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
991 0x00100000L, 0xFFFFFFFF, 0);
992
993#ifdef __BIG_ENDIAN
994 /* swap (8 in 32) RB and IB */
995 lmi_swap_cntl = 0xa;
996#endif
997 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl, 0xFFFFFFFF, 0);
998
999 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MPC_CNTL,
1000 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0xFFFFFFFF, 0);
1001
1002 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MPC_SET_MUXA0,
1003 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
1004 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
1005 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
1006 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0xFFFFFFFF, 0);
1007
1008 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MPC_SET_MUXB0,
1009 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
1010 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
1011 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
1012 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0xFFFFFFFF, 0);
1013
1014 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MPC_SET_MUX,
1015 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
1016 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
1017 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0xFFFFFFFF, 0);
1018
1019 vcn_v1_0_mc_resume_dpg_mode(adev);
1020
1021 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_REG_XX_MASK, 0x10, 0xFFFFFFFF, 0);
1022 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK, 0x3, 0xFFFFFFFF, 0);
1023
1024 /* boot up the VCPU */
1025 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_SOFT_RESET, 0, 0xFFFFFFFF, 0);
1026
1027 /* enable UMC */
1028 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_CTRL2,
1029 0x1F << UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM__SHIFT,
1030 0xFFFFFFFF, 0);
1031
1032 /* enable master interrupt */
1033 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MASTINT_EN,
1034 UVD_MASTINT_EN__VCPU_EN_MASK, UVD_MASTINT_EN__VCPU_EN_MASK, 0);
1035
1036 vcn_v1_0_clock_gating_dpg_mode(adev, 1);
1037 /* setup mmUVD_LMI_CTRL */
1038 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_CTRL,
1039 (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
1040 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
1041 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
1042 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
1043 UVD_LMI_CTRL__REQ_MODE_MASK |
1044 UVD_LMI_CTRL__CRC_RESET_MASK |
1045 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
1046 0x00100000L, 0xFFFFFFFF, 1);
1047
1048 tmp = adev->gfx.config.gb_addr_config;
1049 /* setup VCN global tiling registers */
1050 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_JPEG_ADDR_CONFIG, tmp, 0xFFFFFFFF, 1);
1051 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_JPEG_UV_ADDR_CONFIG, tmp, 0xFFFFFFFF, 1);
1052
1053 /* enable System Interrupt for JRBC */
1054 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_SYS_INT_EN,
1055 UVD_SYS_INT_EN__UVD_JRBC_EN_MASK, 0xFFFFFFFF, 1);
1056
1057 /* force RBC into idle state */
1058 rb_bufsz = order_base_2(ring->ring_size);
1059 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
1060 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1061 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1062 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1063 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1064 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp);
1065
1066 /* set the write pointer delay */
1067 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR_CNTL, 0);
1068
1069 /* set the wb address */
1070 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR_ADDR,
1071 (upper_32_bits(ring->gpu_addr) >> 2));
1072
1073 /* program the RB_BASE for ring buffer */
1074 WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
1075 lower_32_bits(ring->gpu_addr));
1076 WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
1077 upper_32_bits(ring->gpu_addr));
1078
1079 /* Initialize the ring buffer's read and write pointers */
1080 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR, 0);
1081
1082 WREG32_SOC15(UVD, 0, mmUVD_SCRATCH2, 0);
1083
1084 ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
1085 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
1086 lower_32_bits(ring->wptr));
1087
1088 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), 0,
1089 ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
1090
1091 jpeg_v1_0_start(adev, 1);
1092
1093 return 0;
1094}
1095
1096static int vcn_v1_0_start(struct amdgpu_device *adev)
1097{
1098 int r;
1099
1100 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
1101 r = vcn_v1_0_start_dpg_mode(adev);
1102 else
1103 r = vcn_v1_0_start_spg_mode(adev);
1104 return r;
1105}
1106
1107/**
1108 * vcn_v1_0_stop_spg_mode - stop VCN block
1109 *
1110 * @adev: amdgpu_device pointer
1111 *
1112 * stop the VCN block
1113 */
1114static int vcn_v1_0_stop_spg_mode(struct amdgpu_device *adev)
1115{
1116 int tmp;
1117
1118 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7);
1119
1120 tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
1121 UVD_LMI_STATUS__READ_CLEAN_MASK |
1122 UVD_LMI_STATUS__WRITE_CLEAN_MASK |
1123 UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
1124 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_LMI_STATUS, tmp, tmp);
1125
1126 /* stall UMC channel */
1127 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2),
1128 UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
1129 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1130
1131 tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK |
1132 UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
1133 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_LMI_STATUS, tmp, tmp);
1134
1135 /* disable VCPU clock */
1136 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL), 0,
1137 ~UVD_VCPU_CNTL__CLK_EN_MASK);
1138
1139 /* reset LMI UMC/LMI */
1140 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
1141 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK,
1142 ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
1143
1144 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
1145 UVD_SOFT_RESET__LMI_SOFT_RESET_MASK,
1146 ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK);
1147
1148 /* put VCPU into reset */
1149 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
1150 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
1151 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1152
1153 WREG32_SOC15(UVD, 0, mmUVD_STATUS, 0);
1154
1155 vcn_v1_0_enable_clock_gating(adev);
1156 vcn_1_0_enable_static_power_gating(adev);
1157 return 0;
1158}
1159
1160static int vcn_v1_0_stop_dpg_mode(struct amdgpu_device *adev)
1161{
1162 uint32_t tmp;
1163
1164 /* Wait for power status to be UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF */
1165 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
1166 UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF,
1167 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1168
1169 /* wait for read ptr to be equal to write ptr */
1170 tmp = RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR);
1171 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF);
1172
1173 tmp = RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2);
1174 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF);
1175
1176 tmp = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR);
1177 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_JRBC_RB_RPTR, tmp, 0xFFFFFFFF);
1178
1179 tmp = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF;
1180 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF);
1181
1182 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
1183 UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF,
1184 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1185
1186 /* disable dynamic power gating mode */
1187 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), 0,
1188 ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
1189
1190 return 0;
1191}
1192
1193static int vcn_v1_0_stop(struct amdgpu_device *adev)
1194{
1195 int r;
1196
1197 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
1198 r = vcn_v1_0_stop_dpg_mode(adev);
1199 else
1200 r = vcn_v1_0_stop_spg_mode(adev);
1201
1202 return r;
1203}
1204
1205static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev,
1206 int inst_idx, struct dpg_pause_state *new_state)
1207{
1208 int ret_code;
1209 uint32_t reg_data = 0;
1210 uint32_t reg_data2 = 0;
1211 struct amdgpu_ring *ring;
1212
1213 /* pause/unpause if state is changed */
1214 if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) {
1215 DRM_DEBUG("dpg pause state changed %d:%d -> %d:%d",
1216 adev->vcn.inst[inst_idx].pause_state.fw_based,
1217 adev->vcn.inst[inst_idx].pause_state.jpeg,
1218 new_state->fw_based, new_state->jpeg);
1219
1220 reg_data = RREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE) &
1221 (~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1222
1223 if (new_state->fw_based == VCN_DPG_STATE__PAUSE) {
1224 ret_code = 0;
1225
1226 if (!(reg_data & UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK))
1227 ret_code = SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
1228 UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF,
1229 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1230
1231 if (!ret_code) {
1232 /* pause DPG non-jpeg */
1233 reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1234 WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
1235 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_DPG_PAUSE,
1236 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
1237 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1238
1239 /* Restore */
1240 ring = &adev->vcn.inst->ring_enc[0];
1241 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr);
1242 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1243 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4);
1244 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1245 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1246
1247 ring = &adev->vcn.inst->ring_enc[1];
1248 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1249 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1250 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
1251 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1252 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1253
1254 ring = &adev->vcn.inst->ring_dec;
1255 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
1256 RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF);
1257 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
1258 UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON,
1259 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1260 }
1261 } else {
1262 /* unpause dpg non-jpeg, no need to wait */
1263 reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1264 WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
1265 }
1266 adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based;
1267 }
1268
1269 /* pause/unpause if state is changed */
1270 if (adev->vcn.inst[inst_idx].pause_state.jpeg != new_state->jpeg) {
1271 DRM_DEBUG("dpg pause state changed %d:%d -> %d:%d",
1272 adev->vcn.inst[inst_idx].pause_state.fw_based,
1273 adev->vcn.inst[inst_idx].pause_state.jpeg,
1274 new_state->fw_based, new_state->jpeg);
1275
1276 reg_data = RREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE) &
1277 (~UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK);
1278
1279 if (new_state->jpeg == VCN_DPG_STATE__PAUSE) {
1280 ret_code = 0;
1281
1282 if (!(reg_data & UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK))
1283 ret_code = SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
1284 UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF,
1285 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1286
1287 if (!ret_code) {
1288 /* Make sure JPRG Snoop is disabled before sending the pause */
1289 reg_data2 = RREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS);
1290 reg_data2 |= UVD_POWER_STATUS__JRBC_SNOOP_DIS_MASK;
1291 WREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS, reg_data2);
1292
1293 /* pause DPG jpeg */
1294 reg_data |= UVD_DPG_PAUSE__JPEG_PAUSE_DPG_REQ_MASK;
1295 WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
1296 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_DPG_PAUSE,
1297 UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK,
1298 UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK);
1299
1300 /* Restore */
1301 ring = &adev->jpeg.inst->ring_dec;
1302 WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_VMID, 0);
1303 WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL,
1304 UVD_JRBC_RB_CNTL__RB_NO_FETCH_MASK |
1305 UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK);
1306 WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW,
1307 lower_32_bits(ring->gpu_addr));
1308 WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH,
1309 upper_32_bits(ring->gpu_addr));
1310 WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_RPTR, ring->wptr);
1311 WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR, ring->wptr);
1312 WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL,
1313 UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK);
1314
1315 ring = &adev->vcn.inst->ring_dec;
1316 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
1317 RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF);
1318 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
1319 UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON,
1320 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1321 }
1322 } else {
1323 /* unpause dpg jpeg, no need to wait */
1324 reg_data &= ~UVD_DPG_PAUSE__JPEG_PAUSE_DPG_REQ_MASK;
1325 WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
1326 }
1327 adev->vcn.inst[inst_idx].pause_state.jpeg = new_state->jpeg;
1328 }
1329
1330 return 0;
1331}
1332
1333static bool vcn_v1_0_is_idle(void *handle)
1334{
1335 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1336
1337 return (RREG32_SOC15(VCN, 0, mmUVD_STATUS) == UVD_STATUS__IDLE);
1338}
1339
1340static int vcn_v1_0_wait_for_idle(void *handle)
1341{
1342 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1343 int ret;
1344
1345 ret = SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_STATUS, UVD_STATUS__IDLE,
1346 UVD_STATUS__IDLE);
1347
1348 return ret;
1349}
1350
1351static int vcn_v1_0_set_clockgating_state(void *handle,
1352 enum amd_clockgating_state state)
1353{
1354 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1355 bool enable = (state == AMD_CG_STATE_GATE);
1356
1357 if (enable) {
1358 /* wait for STATUS to clear */
1359 if (!vcn_v1_0_is_idle(handle))
1360 return -EBUSY;
1361 vcn_v1_0_enable_clock_gating(adev);
1362 } else {
1363 /* disable HW gating and enable Sw gating */
1364 vcn_v1_0_disable_clock_gating(adev);
1365 }
1366 return 0;
1367}
1368
1369/**
1370 * vcn_v1_0_dec_ring_get_rptr - get read pointer
1371 *
1372 * @ring: amdgpu_ring pointer
1373 *
1374 * Returns the current hardware read pointer
1375 */
1376static uint64_t vcn_v1_0_dec_ring_get_rptr(struct amdgpu_ring *ring)
1377{
1378 struct amdgpu_device *adev = ring->adev;
1379
1380 return RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
1381}
1382
1383/**
1384 * vcn_v1_0_dec_ring_get_wptr - get write pointer
1385 *
1386 * @ring: amdgpu_ring pointer
1387 *
1388 * Returns the current hardware write pointer
1389 */
1390static uint64_t vcn_v1_0_dec_ring_get_wptr(struct amdgpu_ring *ring)
1391{
1392 struct amdgpu_device *adev = ring->adev;
1393
1394 return RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR);
1395}
1396
1397/**
1398 * vcn_v1_0_dec_ring_set_wptr - set write pointer
1399 *
1400 * @ring: amdgpu_ring pointer
1401 *
1402 * Commits the write pointer to the hardware
1403 */
1404static void vcn_v1_0_dec_ring_set_wptr(struct amdgpu_ring *ring)
1405{
1406 struct amdgpu_device *adev = ring->adev;
1407
1408 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
1409 WREG32_SOC15(UVD, 0, mmUVD_SCRATCH2,
1410 lower_32_bits(ring->wptr) | 0x80000000);
1411
1412 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
1413}
1414
1415/**
1416 * vcn_v1_0_dec_ring_insert_start - insert a start command
1417 *
1418 * @ring: amdgpu_ring pointer
1419 *
1420 * Write a start command to the ring.
1421 */
1422static void vcn_v1_0_dec_ring_insert_start(struct amdgpu_ring *ring)
1423{
1424 struct amdgpu_device *adev = ring->adev;
1425
1426 amdgpu_ring_write(ring,
1427 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
1428 amdgpu_ring_write(ring, 0);
1429 amdgpu_ring_write(ring,
1430 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
1431 amdgpu_ring_write(ring, VCN_DEC_CMD_PACKET_START << 1);
1432}
1433
1434/**
1435 * vcn_v1_0_dec_ring_insert_end - insert a end command
1436 *
1437 * @ring: amdgpu_ring pointer
1438 *
1439 * Write a end command to the ring.
1440 */
1441static void vcn_v1_0_dec_ring_insert_end(struct amdgpu_ring *ring)
1442{
1443 struct amdgpu_device *adev = ring->adev;
1444
1445 amdgpu_ring_write(ring,
1446 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
1447 amdgpu_ring_write(ring, VCN_DEC_CMD_PACKET_END << 1);
1448}
1449
1450/**
1451 * vcn_v1_0_dec_ring_emit_fence - emit an fence & trap command
1452 *
1453 * @ring: amdgpu_ring pointer
1454 * @addr: address
1455 * @seq: sequence number
1456 * @flags: fence related flags
1457 *
1458 * Write a fence and a trap command to the ring.
1459 */
1460static void vcn_v1_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
1461 unsigned flags)
1462{
1463 struct amdgpu_device *adev = ring->adev;
1464
1465 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1466
1467 amdgpu_ring_write(ring,
1468 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0));
1469 amdgpu_ring_write(ring, seq);
1470 amdgpu_ring_write(ring,
1471 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
1472 amdgpu_ring_write(ring, addr & 0xffffffff);
1473 amdgpu_ring_write(ring,
1474 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
1475 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
1476 amdgpu_ring_write(ring,
1477 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
1478 amdgpu_ring_write(ring, VCN_DEC_CMD_FENCE << 1);
1479
1480 amdgpu_ring_write(ring,
1481 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
1482 amdgpu_ring_write(ring, 0);
1483 amdgpu_ring_write(ring,
1484 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
1485 amdgpu_ring_write(ring, 0);
1486 amdgpu_ring_write(ring,
1487 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
1488 amdgpu_ring_write(ring, VCN_DEC_CMD_TRAP << 1);
1489}
1490
1491/**
1492 * vcn_v1_0_dec_ring_emit_ib - execute indirect buffer
1493 *
1494 * @ring: amdgpu_ring pointer
1495 * @job: job to retrieve vmid from
1496 * @ib: indirect buffer to execute
1497 * @flags: unused
1498 *
1499 * Write ring commands to execute the indirect buffer
1500 */
1501static void vcn_v1_0_dec_ring_emit_ib(struct amdgpu_ring *ring,
1502 struct amdgpu_job *job,
1503 struct amdgpu_ib *ib,
1504 uint32_t flags)
1505{
1506 struct amdgpu_device *adev = ring->adev;
1507 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1508
1509 amdgpu_ring_write(ring,
1510 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_VMID), 0));
1511 amdgpu_ring_write(ring, vmid);
1512
1513 amdgpu_ring_write(ring,
1514 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_64BIT_BAR_LOW), 0));
1515 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1516 amdgpu_ring_write(ring,
1517 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH), 0));
1518 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1519 amdgpu_ring_write(ring,
1520 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_IB_SIZE), 0));
1521 amdgpu_ring_write(ring, ib->length_dw);
1522}
1523
1524static void vcn_v1_0_dec_ring_emit_reg_wait(struct amdgpu_ring *ring,
1525 uint32_t reg, uint32_t val,
1526 uint32_t mask)
1527{
1528 struct amdgpu_device *adev = ring->adev;
1529
1530 amdgpu_ring_write(ring,
1531 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
1532 amdgpu_ring_write(ring, reg << 2);
1533 amdgpu_ring_write(ring,
1534 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
1535 amdgpu_ring_write(ring, val);
1536 amdgpu_ring_write(ring,
1537 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GP_SCRATCH8), 0));
1538 amdgpu_ring_write(ring, mask);
1539 amdgpu_ring_write(ring,
1540 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
1541 amdgpu_ring_write(ring, VCN_DEC_CMD_REG_READ_COND_WAIT << 1);
1542}
1543
1544static void vcn_v1_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
1545 unsigned vmid, uint64_t pd_addr)
1546{
1547 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
1548 uint32_t data0, data1, mask;
1549
1550 pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1551
1552 /* wait for register write */
1553 data0 = hub->ctx0_ptb_addr_lo32 + vmid * hub->ctx_addr_distance;
1554 data1 = lower_32_bits(pd_addr);
1555 mask = 0xffffffff;
1556 vcn_v1_0_dec_ring_emit_reg_wait(ring, data0, data1, mask);
1557}
1558
1559static void vcn_v1_0_dec_ring_emit_wreg(struct amdgpu_ring *ring,
1560 uint32_t reg, uint32_t val)
1561{
1562 struct amdgpu_device *adev = ring->adev;
1563
1564 amdgpu_ring_write(ring,
1565 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
1566 amdgpu_ring_write(ring, reg << 2);
1567 amdgpu_ring_write(ring,
1568 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
1569 amdgpu_ring_write(ring, val);
1570 amdgpu_ring_write(ring,
1571 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
1572 amdgpu_ring_write(ring, VCN_DEC_CMD_WRITE_REG << 1);
1573}
1574
1575/**
1576 * vcn_v1_0_enc_ring_get_rptr - get enc read pointer
1577 *
1578 * @ring: amdgpu_ring pointer
1579 *
1580 * Returns the current hardware enc read pointer
1581 */
1582static uint64_t vcn_v1_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
1583{
1584 struct amdgpu_device *adev = ring->adev;
1585
1586 if (ring == &adev->vcn.inst->ring_enc[0])
1587 return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR);
1588 else
1589 return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2);
1590}
1591
1592 /**
1593 * vcn_v1_0_enc_ring_get_wptr - get enc write pointer
1594 *
1595 * @ring: amdgpu_ring pointer
1596 *
1597 * Returns the current hardware enc write pointer
1598 */
1599static uint64_t vcn_v1_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
1600{
1601 struct amdgpu_device *adev = ring->adev;
1602
1603 if (ring == &adev->vcn.inst->ring_enc[0])
1604 return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR);
1605 else
1606 return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2);
1607}
1608
1609 /**
1610 * vcn_v1_0_enc_ring_set_wptr - set enc write pointer
1611 *
1612 * @ring: amdgpu_ring pointer
1613 *
1614 * Commits the enc write pointer to the hardware
1615 */
1616static void vcn_v1_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
1617{
1618 struct amdgpu_device *adev = ring->adev;
1619
1620 if (ring == &adev->vcn.inst->ring_enc[0])
1621 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR,
1622 lower_32_bits(ring->wptr));
1623 else
1624 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2,
1625 lower_32_bits(ring->wptr));
1626}
1627
1628/**
1629 * vcn_v1_0_enc_ring_emit_fence - emit an enc fence & trap command
1630 *
1631 * @ring: amdgpu_ring pointer
1632 * @addr: address
1633 * @seq: sequence number
1634 * @flags: fence related flags
1635 *
1636 * Write enc a fence and a trap command to the ring.
1637 */
1638static void vcn_v1_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
1639 u64 seq, unsigned flags)
1640{
1641 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1642
1643 amdgpu_ring_write(ring, VCN_ENC_CMD_FENCE);
1644 amdgpu_ring_write(ring, addr);
1645 amdgpu_ring_write(ring, upper_32_bits(addr));
1646 amdgpu_ring_write(ring, seq);
1647 amdgpu_ring_write(ring, VCN_ENC_CMD_TRAP);
1648}
1649
1650static void vcn_v1_0_enc_ring_insert_end(struct amdgpu_ring *ring)
1651{
1652 amdgpu_ring_write(ring, VCN_ENC_CMD_END);
1653}
1654
1655/**
1656 * vcn_v1_0_enc_ring_emit_ib - enc execute indirect buffer
1657 *
1658 * @ring: amdgpu_ring pointer
1659 * @job: job to retrive vmid from
1660 * @ib: indirect buffer to execute
1661 * @flags: unused
1662 *
1663 * Write enc ring commands to execute the indirect buffer
1664 */
1665static void vcn_v1_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
1666 struct amdgpu_job *job,
1667 struct amdgpu_ib *ib,
1668 uint32_t flags)
1669{
1670 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1671
1672 amdgpu_ring_write(ring, VCN_ENC_CMD_IB);
1673 amdgpu_ring_write(ring, vmid);
1674 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1675 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1676 amdgpu_ring_write(ring, ib->length_dw);
1677}
1678
1679static void vcn_v1_0_enc_ring_emit_reg_wait(struct amdgpu_ring *ring,
1680 uint32_t reg, uint32_t val,
1681 uint32_t mask)
1682{
1683 amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WAIT);
1684 amdgpu_ring_write(ring, reg << 2);
1685 amdgpu_ring_write(ring, mask);
1686 amdgpu_ring_write(ring, val);
1687}
1688
1689static void vcn_v1_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
1690 unsigned int vmid, uint64_t pd_addr)
1691{
1692 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
1693
1694 pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1695
1696 /* wait for reg writes */
1697 vcn_v1_0_enc_ring_emit_reg_wait(ring, hub->ctx0_ptb_addr_lo32 +
1698 vmid * hub->ctx_addr_distance,
1699 lower_32_bits(pd_addr), 0xffffffff);
1700}
1701
1702static void vcn_v1_0_enc_ring_emit_wreg(struct amdgpu_ring *ring,
1703 uint32_t reg, uint32_t val)
1704{
1705 amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WRITE);
1706 amdgpu_ring_write(ring, reg << 2);
1707 amdgpu_ring_write(ring, val);
1708}
1709
1710static int vcn_v1_0_set_interrupt_state(struct amdgpu_device *adev,
1711 struct amdgpu_irq_src *source,
1712 unsigned type,
1713 enum amdgpu_interrupt_state state)
1714{
1715 return 0;
1716}
1717
1718static int vcn_v1_0_process_interrupt(struct amdgpu_device *adev,
1719 struct amdgpu_irq_src *source,
1720 struct amdgpu_iv_entry *entry)
1721{
1722 DRM_DEBUG("IH: VCN TRAP\n");
1723
1724 switch (entry->src_id) {
1725 case 124:
1726 amdgpu_fence_process(&adev->vcn.inst->ring_dec);
1727 break;
1728 case 119:
1729 amdgpu_fence_process(&adev->vcn.inst->ring_enc[0]);
1730 break;
1731 case 120:
1732 amdgpu_fence_process(&adev->vcn.inst->ring_enc[1]);
1733 break;
1734 default:
1735 DRM_ERROR("Unhandled interrupt: %d %d\n",
1736 entry->src_id, entry->src_data[0]);
1737 break;
1738 }
1739
1740 return 0;
1741}
1742
1743static void vcn_v1_0_dec_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
1744{
1745 struct amdgpu_device *adev = ring->adev;
1746 int i;
1747
1748 WARN_ON(ring->wptr % 2 || count % 2);
1749
1750 for (i = 0; i < count / 2; i++) {
1751 amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP), 0));
1752 amdgpu_ring_write(ring, 0);
1753 }
1754}
1755
1756static int vcn_v1_0_set_powergating_state(void *handle,
1757 enum amd_powergating_state state)
1758{
1759 /* This doesn't actually powergate the VCN block.
1760 * That's done in the dpm code via the SMC. This
1761 * just re-inits the block as necessary. The actual
1762 * gating still happens in the dpm code. We should
1763 * revisit this when there is a cleaner line between
1764 * the smc and the hw blocks
1765 */
1766 int ret;
1767 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1768
1769 if(state == adev->vcn.cur_state)
1770 return 0;
1771
1772 if (state == AMD_PG_STATE_GATE)
1773 ret = vcn_v1_0_stop(adev);
1774 else
1775 ret = vcn_v1_0_start(adev);
1776
1777 if(!ret)
1778 adev->vcn.cur_state = state;
1779 return ret;
1780}
1781
1782static void vcn_v1_0_idle_work_handler(struct work_struct *work)
1783{
1784 struct amdgpu_device *adev =
1785 container_of(work, struct amdgpu_device, vcn.idle_work.work);
1786 unsigned int fences = 0, i;
1787
1788 for (i = 0; i < adev->vcn.num_enc_rings; ++i)
1789 fences += amdgpu_fence_count_emitted(&adev->vcn.inst->ring_enc[i]);
1790
1791 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
1792 struct dpg_pause_state new_state;
1793
1794 if (fences)
1795 new_state.fw_based = VCN_DPG_STATE__PAUSE;
1796 else
1797 new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
1798
1799 if (amdgpu_fence_count_emitted(&adev->jpeg.inst->ring_dec))
1800 new_state.jpeg = VCN_DPG_STATE__PAUSE;
1801 else
1802 new_state.jpeg = VCN_DPG_STATE__UNPAUSE;
1803
1804 adev->vcn.pause_dpg_mode(adev, 0, &new_state);
1805 }
1806
1807 fences += amdgpu_fence_count_emitted(&adev->jpeg.inst->ring_dec);
1808 fences += amdgpu_fence_count_emitted(&adev->vcn.inst->ring_dec);
1809
1810 if (fences == 0) {
1811 amdgpu_gfx_off_ctrl(adev, true);
1812 if (adev->pm.dpm_enabled)
1813 amdgpu_dpm_enable_uvd(adev, false);
1814 else
1815 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
1816 AMD_PG_STATE_GATE);
1817 } else {
1818 schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
1819 }
1820}
1821
1822static void vcn_v1_0_ring_begin_use(struct amdgpu_ring *ring)
1823{
1824 struct amdgpu_device *adev = ring->adev;
1825 bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work);
1826
1827 mutex_lock(&adev->vcn.vcn1_jpeg1_workaround);
1828
1829 if (amdgpu_fence_wait_empty(&ring->adev->jpeg.inst->ring_dec))
1830 DRM_ERROR("VCN dec: jpeg dec ring may not be empty\n");
1831
1832 vcn_v1_0_set_pg_for_begin_use(ring, set_clocks);
1833
1834}
1835
1836void vcn_v1_0_set_pg_for_begin_use(struct amdgpu_ring *ring, bool set_clocks)
1837{
1838 struct amdgpu_device *adev = ring->adev;
1839
1840 if (set_clocks) {
1841 amdgpu_gfx_off_ctrl(adev, false);
1842 if (adev->pm.dpm_enabled)
1843 amdgpu_dpm_enable_uvd(adev, true);
1844 else
1845 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
1846 AMD_PG_STATE_UNGATE);
1847 }
1848
1849 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
1850 struct dpg_pause_state new_state;
1851 unsigned int fences = 0, i;
1852
1853 for (i = 0; i < adev->vcn.num_enc_rings; ++i)
1854 fences += amdgpu_fence_count_emitted(&adev->vcn.inst->ring_enc[i]);
1855
1856 if (fences)
1857 new_state.fw_based = VCN_DPG_STATE__PAUSE;
1858 else
1859 new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
1860
1861 if (amdgpu_fence_count_emitted(&adev->jpeg.inst->ring_dec))
1862 new_state.jpeg = VCN_DPG_STATE__PAUSE;
1863 else
1864 new_state.jpeg = VCN_DPG_STATE__UNPAUSE;
1865
1866 if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
1867 new_state.fw_based = VCN_DPG_STATE__PAUSE;
1868 else if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG)
1869 new_state.jpeg = VCN_DPG_STATE__PAUSE;
1870
1871 adev->vcn.pause_dpg_mode(adev, 0, &new_state);
1872 }
1873}
1874
1875void vcn_v1_0_ring_end_use(struct amdgpu_ring *ring)
1876{
1877 schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
1878 mutex_unlock(&ring->adev->vcn.vcn1_jpeg1_workaround);
1879}
1880
1881static const struct amd_ip_funcs vcn_v1_0_ip_funcs = {
1882 .name = "vcn_v1_0",
1883 .early_init = vcn_v1_0_early_init,
1884 .late_init = NULL,
1885 .sw_init = vcn_v1_0_sw_init,
1886 .sw_fini = vcn_v1_0_sw_fini,
1887 .hw_init = vcn_v1_0_hw_init,
1888 .hw_fini = vcn_v1_0_hw_fini,
1889 .suspend = vcn_v1_0_suspend,
1890 .resume = vcn_v1_0_resume,
1891 .is_idle = vcn_v1_0_is_idle,
1892 .wait_for_idle = vcn_v1_0_wait_for_idle,
1893 .check_soft_reset = NULL /* vcn_v1_0_check_soft_reset */,
1894 .pre_soft_reset = NULL /* vcn_v1_0_pre_soft_reset */,
1895 .soft_reset = NULL /* vcn_v1_0_soft_reset */,
1896 .post_soft_reset = NULL /* vcn_v1_0_post_soft_reset */,
1897 .set_clockgating_state = vcn_v1_0_set_clockgating_state,
1898 .set_powergating_state = vcn_v1_0_set_powergating_state,
1899};
1900
1901static const struct amdgpu_ring_funcs vcn_v1_0_dec_ring_vm_funcs = {
1902 .type = AMDGPU_RING_TYPE_VCN_DEC,
1903 .align_mask = 0xf,
1904 .support_64bit_ptrs = false,
1905 .no_user_fence = true,
1906 .vmhub = AMDGPU_MMHUB_0,
1907 .get_rptr = vcn_v1_0_dec_ring_get_rptr,
1908 .get_wptr = vcn_v1_0_dec_ring_get_wptr,
1909 .set_wptr = vcn_v1_0_dec_ring_set_wptr,
1910 .emit_frame_size =
1911 6 + 6 + /* hdp invalidate / flush */
1912 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
1913 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
1914 8 + /* vcn_v1_0_dec_ring_emit_vm_flush */
1915 14 + 14 + /* vcn_v1_0_dec_ring_emit_fence x2 vm fence */
1916 6,
1917 .emit_ib_size = 8, /* vcn_v1_0_dec_ring_emit_ib */
1918 .emit_ib = vcn_v1_0_dec_ring_emit_ib,
1919 .emit_fence = vcn_v1_0_dec_ring_emit_fence,
1920 .emit_vm_flush = vcn_v1_0_dec_ring_emit_vm_flush,
1921 .test_ring = amdgpu_vcn_dec_ring_test_ring,
1922 .test_ib = amdgpu_vcn_dec_ring_test_ib,
1923 .insert_nop = vcn_v1_0_dec_ring_insert_nop,
1924 .insert_start = vcn_v1_0_dec_ring_insert_start,
1925 .insert_end = vcn_v1_0_dec_ring_insert_end,
1926 .pad_ib = amdgpu_ring_generic_pad_ib,
1927 .begin_use = vcn_v1_0_ring_begin_use,
1928 .end_use = vcn_v1_0_ring_end_use,
1929 .emit_wreg = vcn_v1_0_dec_ring_emit_wreg,
1930 .emit_reg_wait = vcn_v1_0_dec_ring_emit_reg_wait,
1931 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1932};
1933
1934static const struct amdgpu_ring_funcs vcn_v1_0_enc_ring_vm_funcs = {
1935 .type = AMDGPU_RING_TYPE_VCN_ENC,
1936 .align_mask = 0x3f,
1937 .nop = VCN_ENC_CMD_NO_OP,
1938 .support_64bit_ptrs = false,
1939 .no_user_fence = true,
1940 .vmhub = AMDGPU_MMHUB_0,
1941 .get_rptr = vcn_v1_0_enc_ring_get_rptr,
1942 .get_wptr = vcn_v1_0_enc_ring_get_wptr,
1943 .set_wptr = vcn_v1_0_enc_ring_set_wptr,
1944 .emit_frame_size =
1945 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1946 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
1947 4 + /* vcn_v1_0_enc_ring_emit_vm_flush */
1948 5 + 5 + /* vcn_v1_0_enc_ring_emit_fence x2 vm fence */
1949 1, /* vcn_v1_0_enc_ring_insert_end */
1950 .emit_ib_size = 5, /* vcn_v1_0_enc_ring_emit_ib */
1951 .emit_ib = vcn_v1_0_enc_ring_emit_ib,
1952 .emit_fence = vcn_v1_0_enc_ring_emit_fence,
1953 .emit_vm_flush = vcn_v1_0_enc_ring_emit_vm_flush,
1954 .test_ring = amdgpu_vcn_enc_ring_test_ring,
1955 .test_ib = amdgpu_vcn_enc_ring_test_ib,
1956 .insert_nop = amdgpu_ring_insert_nop,
1957 .insert_end = vcn_v1_0_enc_ring_insert_end,
1958 .pad_ib = amdgpu_ring_generic_pad_ib,
1959 .begin_use = vcn_v1_0_ring_begin_use,
1960 .end_use = vcn_v1_0_ring_end_use,
1961 .emit_wreg = vcn_v1_0_enc_ring_emit_wreg,
1962 .emit_reg_wait = vcn_v1_0_enc_ring_emit_reg_wait,
1963 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1964};
1965
1966static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev)
1967{
1968 adev->vcn.inst->ring_dec.funcs = &vcn_v1_0_dec_ring_vm_funcs;
1969 DRM_INFO("VCN decode is enabled in VM mode\n");
1970}
1971
1972static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev)
1973{
1974 int i;
1975
1976 for (i = 0; i < adev->vcn.num_enc_rings; ++i)
1977 adev->vcn.inst->ring_enc[i].funcs = &vcn_v1_0_enc_ring_vm_funcs;
1978
1979 DRM_INFO("VCN encode is enabled in VM mode\n");
1980}
1981
1982static const struct amdgpu_irq_src_funcs vcn_v1_0_irq_funcs = {
1983 .set = vcn_v1_0_set_interrupt_state,
1984 .process = vcn_v1_0_process_interrupt,
1985};
1986
1987static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev)
1988{
1989 adev->vcn.inst->irq.num_types = adev->vcn.num_enc_rings + 2;
1990 adev->vcn.inst->irq.funcs = &vcn_v1_0_irq_funcs;
1991}
1992
1993const struct amdgpu_ip_block_version vcn_v1_0_ip_block =
1994{
1995 .type = AMD_IP_BLOCK_TYPE_VCN,
1996 .major = 1,
1997 .minor = 0,
1998 .rev = 0,
1999 .funcs = &vcn_v1_0_ip_funcs,
2000};