Loading...
1/*
2 * Copyright 2018 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/firmware.h>
25
26#include "amdgpu.h"
27#include "amdgpu_vcn.h"
28#include "soc15.h"
29#include "soc15d.h"
30#include "amdgpu_pm.h"
31#include "amdgpu_psp.h"
32
33#include "vcn/vcn_2_0_0_offset.h"
34#include "vcn/vcn_2_0_0_sh_mask.h"
35#include "ivsrcid/vcn/irqsrcs_vcn_2_0.h"
36
37#define mmUVD_CONTEXT_ID_INTERNAL_OFFSET 0x1fd
38#define mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET 0x503
39#define mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET 0x504
40#define mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET 0x505
41#define mmUVD_NO_OP_INTERNAL_OFFSET 0x53f
42#define mmUVD_GP_SCRATCH8_INTERNAL_OFFSET 0x54a
43#define mmUVD_SCRATCH9_INTERNAL_OFFSET 0xc01d
44
45#define mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET 0x1e1
46#define mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x5a6
47#define mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET 0x5a7
48#define mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET 0x1e2
49
50#define mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET 0x1bfff
51#define mmUVD_JPEG_GPCOM_CMD_INTERNAL_OFFSET 0x4029
52#define mmUVD_JPEG_GPCOM_DATA0_INTERNAL_OFFSET 0x402a
53#define mmUVD_JPEG_GPCOM_DATA1_INTERNAL_OFFSET 0x402b
54#define mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW_INTERNAL_OFFSET 0x40ea
55#define mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x40eb
56#define mmUVD_LMI_JRBC_IB_VMID_INTERNAL_OFFSET 0x40cf
57#define mmUVD_LMI_JPEG_VMID_INTERNAL_OFFSET 0x40d1
58#define mmUVD_LMI_JRBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET 0x40e8
59#define mmUVD_LMI_JRBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x40e9
60#define mmUVD_JRBC_IB_SIZE_INTERNAL_OFFSET 0x4082
61#define mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW_INTERNAL_OFFSET 0x40ec
62#define mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x40ed
63#define mmUVD_JRBC_RB_COND_RD_TIMER_INTERNAL_OFFSET 0x4085
64#define mmUVD_JRBC_RB_REF_DATA_INTERNAL_OFFSET 0x4084
65#define mmUVD_JRBC_STATUS_INTERNAL_OFFSET 0x4089
66#define mmUVD_JPEG_PITCH_INTERNAL_OFFSET 0x401f
67
68#define JRBC_DEC_EXTERNAL_REG_WRITE_ADDR 0x18000
69
70#define mmUVD_RBC_XX_IB_REG_CHECK 0x026b
71#define mmUVD_RBC_XX_IB_REG_CHECK_BASE_IDX 1
72#define mmUVD_REG_XX_MASK 0x026c
73#define mmUVD_REG_XX_MASK_BASE_IDX 1
74
75static void vcn_v2_0_set_dec_ring_funcs(struct amdgpu_device *adev);
76static void vcn_v2_0_set_enc_ring_funcs(struct amdgpu_device *adev);
77static void vcn_v2_0_set_jpeg_ring_funcs(struct amdgpu_device *adev);
78static void vcn_v2_0_set_irq_funcs(struct amdgpu_device *adev);
79static int vcn_v2_0_set_powergating_state(void *handle,
80 enum amd_powergating_state state);
81static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev,
82 struct dpg_pause_state *new_state);
83
84/**
85 * vcn_v2_0_early_init - set function pointers
86 *
87 * @handle: amdgpu_device pointer
88 *
89 * Set ring and irq function pointers
90 */
91static int vcn_v2_0_early_init(void *handle)
92{
93 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
94
95 adev->vcn.num_vcn_inst = 1;
96 adev->vcn.num_enc_rings = 2;
97
98 vcn_v2_0_set_dec_ring_funcs(adev);
99 vcn_v2_0_set_enc_ring_funcs(adev);
100 vcn_v2_0_set_jpeg_ring_funcs(adev);
101 vcn_v2_0_set_irq_funcs(adev);
102
103 return 0;
104}
105
106/**
107 * vcn_v2_0_sw_init - sw init for VCN block
108 *
109 * @handle: amdgpu_device pointer
110 *
111 * Load firmware and sw initialization
112 */
113static int vcn_v2_0_sw_init(void *handle)
114{
115 struct amdgpu_ring *ring;
116 int i, r;
117 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
118
119 /* VCN DEC TRAP */
120 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
121 VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT,
122 &adev->vcn.inst->irq);
123 if (r)
124 return r;
125
126 /* VCN ENC TRAP */
127 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
128 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
129 i + VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE,
130 &adev->vcn.inst->irq);
131 if (r)
132 return r;
133 }
134
135 /* VCN JPEG TRAP */
136 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
137 VCN_2_0__SRCID__JPEG_DECODE, &adev->vcn.inst->irq);
138 if (r)
139 return r;
140
141 r = amdgpu_vcn_sw_init(adev);
142 if (r)
143 return r;
144
145 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
146 const struct common_firmware_header *hdr;
147 hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
148 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].ucode_id = AMDGPU_UCODE_ID_VCN;
149 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].fw = adev->vcn.fw;
150 adev->firmware.fw_size +=
151 ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
152 DRM_INFO("PSP loading VCN firmware\n");
153 }
154
155 r = amdgpu_vcn_resume(adev);
156 if (r)
157 return r;
158
159 ring = &adev->vcn.inst->ring_dec;
160
161 ring->use_doorbell = true;
162 ring->doorbell_index = adev->doorbell_index.vcn.vcn_ring0_1 << 1;
163
164 sprintf(ring->name, "vcn_dec");
165 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0);
166 if (r)
167 return r;
168
169 adev->vcn.internal.context_id = mmUVD_CONTEXT_ID_INTERNAL_OFFSET;
170 adev->vcn.internal.ib_vmid = mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET;
171 adev->vcn.internal.ib_bar_low = mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET;
172 adev->vcn.internal.ib_bar_high = mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET;
173 adev->vcn.internal.ib_size = mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET;
174 adev->vcn.internal.gp_scratch8 = mmUVD_GP_SCRATCH8_INTERNAL_OFFSET;
175
176 adev->vcn.internal.scratch9 = mmUVD_SCRATCH9_INTERNAL_OFFSET;
177 adev->vcn.inst->external.scratch9 = SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9);
178 adev->vcn.internal.data0 = mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET;
179 adev->vcn.inst->external.data0 = SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0);
180 adev->vcn.internal.data1 = mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET;
181 adev->vcn.inst->external.data1 = SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1);
182 adev->vcn.internal.cmd = mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET;
183 adev->vcn.inst->external.cmd = SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD);
184 adev->vcn.internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET;
185 adev->vcn.inst->external.nop = SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP);
186
187 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
188 ring = &adev->vcn.inst->ring_enc[i];
189 ring->use_doorbell = true;
190 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + i;
191 sprintf(ring->name, "vcn_enc%d", i);
192 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0);
193 if (r)
194 return r;
195 }
196
197 ring = &adev->vcn.inst->ring_jpeg;
198 ring->use_doorbell = true;
199 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1;
200 sprintf(ring->name, "vcn_jpeg");
201 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0);
202 if (r)
203 return r;
204
205 adev->vcn.pause_dpg_mode = vcn_v2_0_pause_dpg_mode;
206
207 adev->vcn.internal.jpeg_pitch = mmUVD_JPEG_PITCH_INTERNAL_OFFSET;
208 adev->vcn.inst->external.jpeg_pitch = SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_PITCH);
209
210 return 0;
211}
212
213/**
214 * vcn_v2_0_sw_fini - sw fini for VCN block
215 *
216 * @handle: amdgpu_device pointer
217 *
218 * VCN suspend and free up sw allocation
219 */
220static int vcn_v2_0_sw_fini(void *handle)
221{
222 int r;
223 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
224
225 r = amdgpu_vcn_suspend(adev);
226 if (r)
227 return r;
228
229 r = amdgpu_vcn_sw_fini(adev);
230
231 return r;
232}
233
234/**
235 * vcn_v2_0_hw_init - start and test VCN block
236 *
237 * @handle: amdgpu_device pointer
238 *
239 * Initialize the hardware, boot up the VCPU and do some testing
240 */
241static int vcn_v2_0_hw_init(void *handle)
242{
243 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
244 struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
245 int i, r;
246
247 adev->nbio_funcs->vcn_doorbell_range(adev, ring->use_doorbell,
248 ring->doorbell_index, 0);
249
250 ring->sched.ready = true;
251 r = amdgpu_ring_test_ring(ring);
252 if (r) {
253 ring->sched.ready = false;
254 goto done;
255 }
256
257 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
258 ring = &adev->vcn.inst->ring_enc[i];
259 ring->sched.ready = true;
260 r = amdgpu_ring_test_ring(ring);
261 if (r) {
262 ring->sched.ready = false;
263 goto done;
264 }
265 }
266
267 ring = &adev->vcn.inst->ring_jpeg;
268 ring->sched.ready = true;
269 r = amdgpu_ring_test_ring(ring);
270 if (r) {
271 ring->sched.ready = false;
272 goto done;
273 }
274
275done:
276 if (!r)
277 DRM_INFO("VCN decode and encode initialized successfully(under %s).\n",
278 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)?"DPG Mode":"SPG Mode");
279
280 return r;
281}
282
283/**
284 * vcn_v2_0_hw_fini - stop the hardware block
285 *
286 * @handle: amdgpu_device pointer
287 *
288 * Stop the VCN block, mark ring as not ready any more
289 */
290static int vcn_v2_0_hw_fini(void *handle)
291{
292 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
293 struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
294 int i;
295
296 if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
297 (adev->vcn.cur_state != AMD_PG_STATE_GATE &&
298 RREG32_SOC15(VCN, 0, mmUVD_STATUS)))
299 vcn_v2_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
300
301 ring->sched.ready = false;
302
303 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
304 ring = &adev->vcn.inst->ring_enc[i];
305 ring->sched.ready = false;
306 }
307
308 ring = &adev->vcn.inst->ring_jpeg;
309 ring->sched.ready = false;
310
311 return 0;
312}
313
314/**
315 * vcn_v2_0_suspend - suspend VCN block
316 *
317 * @handle: amdgpu_device pointer
318 *
319 * HW fini and suspend VCN block
320 */
321static int vcn_v2_0_suspend(void *handle)
322{
323 int r;
324 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
325
326 r = vcn_v2_0_hw_fini(adev);
327 if (r)
328 return r;
329
330 r = amdgpu_vcn_suspend(adev);
331
332 return r;
333}
334
335/**
336 * vcn_v2_0_resume - resume VCN block
337 *
338 * @handle: amdgpu_device pointer
339 *
340 * Resume firmware and hw init VCN block
341 */
342static int vcn_v2_0_resume(void *handle)
343{
344 int r;
345 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
346
347 r = amdgpu_vcn_resume(adev);
348 if (r)
349 return r;
350
351 r = vcn_v2_0_hw_init(adev);
352
353 return r;
354}
355
356/**
357 * vcn_v2_0_mc_resume - memory controller programming
358 *
359 * @adev: amdgpu_device pointer
360 *
361 * Let the VCN memory controller know it's offsets
362 */
363static void vcn_v2_0_mc_resume(struct amdgpu_device *adev)
364{
365 uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
366 uint32_t offset;
367
368 /* cache window 0: fw */
369 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
370 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
371 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo));
372 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
373 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi));
374 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, 0);
375 offset = 0;
376 } else {
377 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
378 lower_32_bits(adev->vcn.inst->gpu_addr));
379 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
380 upper_32_bits(adev->vcn.inst->gpu_addr));
381 offset = size;
382 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0,
383 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
384 }
385
386 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size);
387
388 /* cache window 1: stack */
389 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
390 lower_32_bits(adev->vcn.inst->gpu_addr + offset));
391 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
392 upper_32_bits(adev->vcn.inst->gpu_addr + offset));
393 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0);
394 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
395
396 /* cache window 2: context */
397 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
398 lower_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
399 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
400 upper_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
401 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0);
402 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
403
404 WREG32_SOC15(UVD, 0, mmUVD_GFX10_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
405 WREG32_SOC15(UVD, 0, mmJPEG_DEC_GFX10_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
406}
407
408static void vcn_v2_0_mc_resume_dpg_mode(struct amdgpu_device *adev, bool indirect)
409{
410 uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
411 uint32_t offset;
412
413 /* cache window 0: fw */
414 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
415 if (!indirect) {
416 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
417 UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
418 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo), 0, indirect);
419 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
420 UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
421 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi), 0, indirect);
422 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
423 UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
424 } else {
425 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
426 UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect);
427 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
428 UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect);
429 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
430 UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
431 }
432 offset = 0;
433 } else {
434 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
435 UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
436 lower_32_bits(adev->vcn.inst->gpu_addr), 0, indirect);
437 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
438 UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
439 upper_32_bits(adev->vcn.inst->gpu_addr), 0, indirect);
440 offset = size;
441 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
442 UVD, 0, mmUVD_VCPU_CACHE_OFFSET0),
443 AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect);
444 }
445
446 if (!indirect)
447 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
448 UVD, 0, mmUVD_VCPU_CACHE_SIZE0), size, 0, indirect);
449 else
450 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
451 UVD, 0, mmUVD_VCPU_CACHE_SIZE0), 0, 0, indirect);
452
453 /* cache window 1: stack */
454 if (!indirect) {
455 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
456 UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
457 lower_32_bits(adev->vcn.inst->gpu_addr + offset), 0, indirect);
458 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
459 UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
460 upper_32_bits(adev->vcn.inst->gpu_addr + offset), 0, indirect);
461 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
462 UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
463 } else {
464 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
465 UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect);
466 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
467 UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect);
468 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
469 UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
470 }
471 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
472 UVD, 0, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect);
473
474 /* cache window 2: context */
475 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
476 UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
477 lower_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
478 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
479 UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
480 upper_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
481 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
482 UVD, 0, mmUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect);
483 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
484 UVD, 0, mmUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect);
485
486 /* non-cache window */
487 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
488 UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW), 0, 0, indirect);
489 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
490 UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH), 0, 0, indirect);
491 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
492 UVD, 0, mmUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
493 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
494 UVD, 0, mmUVD_VCPU_NONCACHE_SIZE0), 0, 0, indirect);
495
496 /* VCN global tiling registers */
497 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
498 UVD, 0, mmUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
499}
500
501/**
502 * vcn_v2_0_disable_clock_gating - disable VCN clock gating
503 *
504 * @adev: amdgpu_device pointer
505 * @sw: enable SW clock gating
506 *
507 * Disable clock gating for VCN block
508 */
509static void vcn_v2_0_disable_clock_gating(struct amdgpu_device *adev)
510{
511 uint32_t data;
512
513 /* UVD disable CGC */
514 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
515 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
516 data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
517 else
518 data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
519 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
520 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
521 WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
522
523 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_GATE);
524 data &= ~(UVD_CGC_GATE__SYS_MASK
525 | UVD_CGC_GATE__UDEC_MASK
526 | UVD_CGC_GATE__MPEG2_MASK
527 | UVD_CGC_GATE__REGS_MASK
528 | UVD_CGC_GATE__RBC_MASK
529 | UVD_CGC_GATE__LMI_MC_MASK
530 | UVD_CGC_GATE__LMI_UMC_MASK
531 | UVD_CGC_GATE__IDCT_MASK
532 | UVD_CGC_GATE__MPRD_MASK
533 | UVD_CGC_GATE__MPC_MASK
534 | UVD_CGC_GATE__LBSI_MASK
535 | UVD_CGC_GATE__LRBBM_MASK
536 | UVD_CGC_GATE__UDEC_RE_MASK
537 | UVD_CGC_GATE__UDEC_CM_MASK
538 | UVD_CGC_GATE__UDEC_IT_MASK
539 | UVD_CGC_GATE__UDEC_DB_MASK
540 | UVD_CGC_GATE__UDEC_MP_MASK
541 | UVD_CGC_GATE__WCB_MASK
542 | UVD_CGC_GATE__VCPU_MASK
543 | UVD_CGC_GATE__SCPU_MASK);
544 WREG32_SOC15(VCN, 0, mmUVD_CGC_GATE, data);
545
546 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
547 data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK
548 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
549 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
550 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
551 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
552 | UVD_CGC_CTRL__SYS_MODE_MASK
553 | UVD_CGC_CTRL__UDEC_MODE_MASK
554 | UVD_CGC_CTRL__MPEG2_MODE_MASK
555 | UVD_CGC_CTRL__REGS_MODE_MASK
556 | UVD_CGC_CTRL__RBC_MODE_MASK
557 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
558 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
559 | UVD_CGC_CTRL__IDCT_MODE_MASK
560 | UVD_CGC_CTRL__MPRD_MODE_MASK
561 | UVD_CGC_CTRL__MPC_MODE_MASK
562 | UVD_CGC_CTRL__LBSI_MODE_MASK
563 | UVD_CGC_CTRL__LRBBM_MODE_MASK
564 | UVD_CGC_CTRL__WCB_MODE_MASK
565 | UVD_CGC_CTRL__VCPU_MODE_MASK
566 | UVD_CGC_CTRL__SCPU_MODE_MASK);
567 WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
568
569 /* turn on */
570 data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_GATE);
571 data |= (UVD_SUVD_CGC_GATE__SRE_MASK
572 | UVD_SUVD_CGC_GATE__SIT_MASK
573 | UVD_SUVD_CGC_GATE__SMP_MASK
574 | UVD_SUVD_CGC_GATE__SCM_MASK
575 | UVD_SUVD_CGC_GATE__SDB_MASK
576 | UVD_SUVD_CGC_GATE__SRE_H264_MASK
577 | UVD_SUVD_CGC_GATE__SRE_HEVC_MASK
578 | UVD_SUVD_CGC_GATE__SIT_H264_MASK
579 | UVD_SUVD_CGC_GATE__SIT_HEVC_MASK
580 | UVD_SUVD_CGC_GATE__SCM_H264_MASK
581 | UVD_SUVD_CGC_GATE__SCM_HEVC_MASK
582 | UVD_SUVD_CGC_GATE__SDB_H264_MASK
583 | UVD_SUVD_CGC_GATE__SDB_HEVC_MASK
584 | UVD_SUVD_CGC_GATE__SCLR_MASK
585 | UVD_SUVD_CGC_GATE__UVD_SC_MASK
586 | UVD_SUVD_CGC_GATE__ENT_MASK
587 | UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK
588 | UVD_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK
589 | UVD_SUVD_CGC_GATE__SITE_MASK
590 | UVD_SUVD_CGC_GATE__SRE_VP9_MASK
591 | UVD_SUVD_CGC_GATE__SCM_VP9_MASK
592 | UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK
593 | UVD_SUVD_CGC_GATE__SDB_VP9_MASK
594 | UVD_SUVD_CGC_GATE__IME_HEVC_MASK);
595 WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_GATE, data);
596
597 data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL);
598 data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
599 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
600 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
601 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
602 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
603 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
604 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
605 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
606 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
607 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
608 WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL, data);
609}
610
611static void vcn_v2_0_clock_gating_dpg_mode(struct amdgpu_device *adev,
612 uint8_t sram_sel, uint8_t indirect)
613{
614 uint32_t reg_data = 0;
615
616 /* enable sw clock gating control */
617 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
618 reg_data = 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
619 else
620 reg_data = 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
621 reg_data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
622 reg_data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
623 reg_data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
624 UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
625 UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
626 UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
627 UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
628 UVD_CGC_CTRL__SYS_MODE_MASK |
629 UVD_CGC_CTRL__UDEC_MODE_MASK |
630 UVD_CGC_CTRL__MPEG2_MODE_MASK |
631 UVD_CGC_CTRL__REGS_MODE_MASK |
632 UVD_CGC_CTRL__RBC_MODE_MASK |
633 UVD_CGC_CTRL__LMI_MC_MODE_MASK |
634 UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
635 UVD_CGC_CTRL__IDCT_MODE_MASK |
636 UVD_CGC_CTRL__MPRD_MODE_MASK |
637 UVD_CGC_CTRL__MPC_MODE_MASK |
638 UVD_CGC_CTRL__LBSI_MODE_MASK |
639 UVD_CGC_CTRL__LRBBM_MODE_MASK |
640 UVD_CGC_CTRL__WCB_MODE_MASK |
641 UVD_CGC_CTRL__VCPU_MODE_MASK |
642 UVD_CGC_CTRL__SCPU_MODE_MASK);
643 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
644 UVD, 0, mmUVD_CGC_CTRL), reg_data, sram_sel, indirect);
645
646 /* turn off clock gating */
647 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
648 UVD, 0, mmUVD_CGC_GATE), 0, sram_sel, indirect);
649
650 /* turn on SUVD clock gating */
651 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
652 UVD, 0, mmUVD_SUVD_CGC_GATE), 1, sram_sel, indirect);
653
654 /* turn on sw mode in UVD_SUVD_CGC_CTRL */
655 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
656 UVD, 0, mmUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect);
657}
658
659/**
660 * jpeg_v2_0_start - start JPEG block
661 *
662 * @adev: amdgpu_device pointer
663 *
664 * Setup and start the JPEG block
665 */
666static int jpeg_v2_0_start(struct amdgpu_device *adev)
667{
668 struct amdgpu_ring *ring = &adev->vcn.inst->ring_jpeg;
669 uint32_t tmp;
670 int r = 0;
671
672 /* disable power gating */
673 tmp = 1 << UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT;
674 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_PGFSM_CONFIG), tmp);
675
676 SOC15_WAIT_ON_RREG(VCN, 0,
677 mmUVD_PGFSM_STATUS, UVD_PGFSM_STATUS_UVDJ_PWR_ON,
678 UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK, r);
679
680 if (r) {
681 DRM_ERROR("amdgpu: JPEG disable power gating failed\n");
682 return r;
683 }
684
685 /* Removing the anti hang mechanism to indicate the UVDJ tile is ON */
686 tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_POWER_STATUS)) & ~0x1;
687 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_POWER_STATUS), tmp);
688
689 /* JPEG disable CGC */
690 tmp = RREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL);
691 tmp |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
692 tmp |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
693 tmp |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
694 WREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL, tmp);
695
696 tmp = RREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE);
697 tmp &= ~(JPEG_CGC_GATE__JPEG_DEC_MASK
698 | JPEG_CGC_GATE__JPEG2_DEC_MASK
699 | JPEG_CGC_GATE__JPEG_ENC_MASK
700 | JPEG_CGC_GATE__JMCIF_MASK
701 | JPEG_CGC_GATE__JRBBM_MASK);
702 WREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE, tmp);
703
704 /* enable JMI channel */
705 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_JMI_CNTL), 0,
706 ~UVD_JMI_CNTL__SOFT_RESET_MASK);
707
708 /* enable System Interrupt for JRBC */
709 WREG32_P(SOC15_REG_OFFSET(VCN, 0, mmJPEG_SYS_INT_EN),
710 JPEG_SYS_INT_EN__DJRBC_MASK,
711 ~JPEG_SYS_INT_EN__DJRBC_MASK);
712
713 WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_VMID, 0);
714 WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L));
715 WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW,
716 lower_32_bits(ring->gpu_addr));
717 WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH,
718 upper_32_bits(ring->gpu_addr));
719 WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_RPTR, 0);
720 WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR, 0);
721 WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, 0x00000002L);
722 WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_SIZE, ring->ring_size / 4);
723 ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR);
724
725 return 0;
726}
727
728/**
729 * jpeg_v2_0_stop - stop JPEG block
730 *
731 * @adev: amdgpu_device pointer
732 *
733 * stop the JPEG block
734 */
735static int jpeg_v2_0_stop(struct amdgpu_device *adev)
736{
737 uint32_t tmp;
738 int r = 0;
739
740 /* reset JMI */
741 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_JMI_CNTL),
742 UVD_JMI_CNTL__SOFT_RESET_MASK,
743 ~UVD_JMI_CNTL__SOFT_RESET_MASK);
744
745 /* enable JPEG CGC */
746 tmp = RREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL);
747 tmp |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
748 tmp |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
749 tmp |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
750 WREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL, tmp);
751
752
753 tmp = RREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE);
754 tmp |= (JPEG_CGC_GATE__JPEG_DEC_MASK
755 |JPEG_CGC_GATE__JPEG2_DEC_MASK
756 |JPEG_CGC_GATE__JPEG_ENC_MASK
757 |JPEG_CGC_GATE__JMCIF_MASK
758 |JPEG_CGC_GATE__JRBBM_MASK);
759 WREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE, tmp);
760
761 /* enable power gating */
762 tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_POWER_STATUS));
763 tmp &= ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK;
764 tmp |= 0x1; //UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_TILES_OFF;
765 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_POWER_STATUS), tmp);
766
767 tmp = 2 << UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT;
768 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_PGFSM_CONFIG), tmp);
769
770 SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS,
771 (2 << UVD_PGFSM_STATUS__UVDJ_PWR_STATUS__SHIFT),
772 UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK, r);
773
774 if (r) {
775 DRM_ERROR("amdgpu: JPEG enable power gating failed\n");
776 return r;
777 }
778
779 return r;
780}
781
782/**
783 * vcn_v2_0_enable_clock_gating - enable VCN clock gating
784 *
785 * @adev: amdgpu_device pointer
786 * @sw: enable SW clock gating
787 *
788 * Enable clock gating for VCN block
789 */
790static void vcn_v2_0_enable_clock_gating(struct amdgpu_device *adev)
791{
792 uint32_t data = 0;
793
794 /* enable UVD CGC */
795 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
796 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
797 data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
798 else
799 data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
800 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
801 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
802 WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
803
804 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
805 data |= (UVD_CGC_CTRL__UDEC_RE_MODE_MASK
806 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
807 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
808 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
809 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
810 | UVD_CGC_CTRL__SYS_MODE_MASK
811 | UVD_CGC_CTRL__UDEC_MODE_MASK
812 | UVD_CGC_CTRL__MPEG2_MODE_MASK
813 | UVD_CGC_CTRL__REGS_MODE_MASK
814 | UVD_CGC_CTRL__RBC_MODE_MASK
815 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
816 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
817 | UVD_CGC_CTRL__IDCT_MODE_MASK
818 | UVD_CGC_CTRL__MPRD_MODE_MASK
819 | UVD_CGC_CTRL__MPC_MODE_MASK
820 | UVD_CGC_CTRL__LBSI_MODE_MASK
821 | UVD_CGC_CTRL__LRBBM_MODE_MASK
822 | UVD_CGC_CTRL__WCB_MODE_MASK
823 | UVD_CGC_CTRL__VCPU_MODE_MASK
824 | UVD_CGC_CTRL__SCPU_MODE_MASK);
825 WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
826
827 data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL);
828 data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
829 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
830 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
831 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
832 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
833 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
834 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
835 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
836 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
837 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
838 WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL, data);
839}
840
841static void vcn_v2_0_disable_static_power_gating(struct amdgpu_device *adev)
842{
843 uint32_t data = 0;
844 int ret;
845
846 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
847 data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
848 | 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
849 | 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
850 | 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
851 | 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
852 | 2 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT
853 | 2 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT
854 | 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
855 | 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
856 | 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT);
857
858 WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data);
859 SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS,
860 UVD_PGFSM_STATUS__UVDM_UVDU_PWR_ON_2_0, 0xFFFFF, ret);
861 } else {
862 data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
863 | 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
864 | 1 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
865 | 1 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
866 | 1 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
867 | 1 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT
868 | 1 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT
869 | 1 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
870 | 1 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
871 | 1 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT);
872 WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data);
873 SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, 0, 0xFFFFF, ret);
874 }
875
876 /* polling UVD_PGFSM_STATUS to confirm UVDM_PWR_STATUS,
877 * UVDU_PWR_STATUS are 0 (power on) */
878
879 data = RREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS);
880 data &= ~0x103;
881 if (adev->pg_flags & AMD_PG_SUPPORT_VCN)
882 data |= UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON |
883 UVD_POWER_STATUS__UVD_PG_EN_MASK;
884
885 WREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS, data);
886}
887
888static void vcn_v2_0_enable_static_power_gating(struct amdgpu_device *adev)
889{
890 uint32_t data = 0;
891 int ret;
892
893 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
894 /* Before power off, this indicator has to be turned on */
895 data = RREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS);
896 data &= ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK;
897 data |= UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF;
898 WREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS, data);
899
900
901 data = (2 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
902 | 2 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
903 | 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
904 | 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
905 | 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
906 | 2 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT
907 | 2 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT
908 | 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
909 | 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
910 | 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT);
911
912 WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data);
913
914 data = (2 << UVD_PGFSM_STATUS__UVDM_PWR_STATUS__SHIFT
915 | 2 << UVD_PGFSM_STATUS__UVDU_PWR_STATUS__SHIFT
916 | 2 << UVD_PGFSM_STATUS__UVDF_PWR_STATUS__SHIFT
917 | 2 << UVD_PGFSM_STATUS__UVDC_PWR_STATUS__SHIFT
918 | 2 << UVD_PGFSM_STATUS__UVDB_PWR_STATUS__SHIFT
919 | 2 << UVD_PGFSM_STATUS__UVDIL_PWR_STATUS__SHIFT
920 | 2 << UVD_PGFSM_STATUS__UVDIR_PWR_STATUS__SHIFT
921 | 2 << UVD_PGFSM_STATUS__UVDTD_PWR_STATUS__SHIFT
922 | 2 << UVD_PGFSM_STATUS__UVDTE_PWR_STATUS__SHIFT
923 | 2 << UVD_PGFSM_STATUS__UVDE_PWR_STATUS__SHIFT);
924 SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, data, 0xFFFFF, ret);
925 }
926}
927
928static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect)
929{
930 struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
931 uint32_t rb_bufsz, tmp;
932
933 vcn_v2_0_enable_static_power_gating(adev);
934
935 /* enable dynamic power gating mode */
936 tmp = RREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS);
937 tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK;
938 tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK;
939 WREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS, tmp);
940
941 if (indirect)
942 adev->vcn.dpg_sram_curr_addr = (uint32_t*)adev->vcn.dpg_sram_cpu_addr;
943
944 /* enable clock gating */
945 vcn_v2_0_clock_gating_dpg_mode(adev, 0, indirect);
946
947 /* enable VCPU clock */
948 tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
949 tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
950 tmp |= UVD_VCPU_CNTL__MIF_WR_LOW_THRESHOLD_BP_MASK;
951 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
952 UVD, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect);
953
954 /* disable master interupt */
955 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
956 UVD, 0, mmUVD_MASTINT_EN), 0, 0, indirect);
957
958 /* setup mmUVD_LMI_CTRL */
959 tmp = (UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
960 UVD_LMI_CTRL__REQ_MODE_MASK |
961 UVD_LMI_CTRL__CRC_RESET_MASK |
962 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
963 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
964 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
965 (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
966 0x00100000L);
967 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
968 UVD, 0, mmUVD_LMI_CTRL), tmp, 0, indirect);
969
970 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
971 UVD, 0, mmUVD_MPC_CNTL),
972 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0, indirect);
973
974 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
975 UVD, 0, mmUVD_MPC_SET_MUXA0),
976 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
977 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
978 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
979 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0, indirect);
980
981 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
982 UVD, 0, mmUVD_MPC_SET_MUXB0),
983 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
984 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
985 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
986 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0, indirect);
987
988 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
989 UVD, 0, mmUVD_MPC_SET_MUX),
990 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
991 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
992 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect);
993
994 vcn_v2_0_mc_resume_dpg_mode(adev, indirect);
995
996 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
997 UVD, 0, mmUVD_REG_XX_MASK), 0x10, 0, indirect);
998 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
999 UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK), 0x3, 0, indirect);
1000
1001 /* release VCPU reset to boot */
1002 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
1003 UVD, 0, mmUVD_SOFT_RESET), 0, 0, indirect);
1004
1005 /* enable LMI MC and UMC channels */
1006 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
1007 UVD, 0, mmUVD_LMI_CTRL2),
1008 0x1F << UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM__SHIFT, 0, indirect);
1009
1010 /* enable master interrupt */
1011 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
1012 UVD, 0, mmUVD_MASTINT_EN),
1013 UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
1014
1015 if (indirect)
1016 psp_update_vcn_sram(adev, 0, adev->vcn.dpg_sram_gpu_addr,
1017 (uint32_t)((uintptr_t)adev->vcn.dpg_sram_curr_addr -
1018 (uintptr_t)adev->vcn.dpg_sram_cpu_addr));
1019
1020 /* force RBC into idle state */
1021 rb_bufsz = order_base_2(ring->ring_size);
1022 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
1023 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1024 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1025 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1026 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1027 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp);
1028
1029 /* set the write pointer delay */
1030 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR_CNTL, 0);
1031
1032 /* set the wb address */
1033 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR_ADDR,
1034 (upper_32_bits(ring->gpu_addr) >> 2));
1035
1036 /* programm the RB_BASE for ring buffer */
1037 WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
1038 lower_32_bits(ring->gpu_addr));
1039 WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
1040 upper_32_bits(ring->gpu_addr));
1041
1042 /* Initialize the ring buffer's read and write pointers */
1043 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR, 0);
1044
1045 WREG32_SOC15(UVD, 0, mmUVD_SCRATCH2, 0);
1046
1047 ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
1048 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
1049 lower_32_bits(ring->wptr));
1050
1051 return 0;
1052}
1053
1054static int vcn_v2_0_start(struct amdgpu_device *adev)
1055{
1056 struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
1057 uint32_t rb_bufsz, tmp;
1058 uint32_t lmi_swap_cntl;
1059 int i, j, r;
1060
1061 if (adev->pm.dpm_enabled)
1062 amdgpu_dpm_enable_uvd(adev, true);
1063
1064 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
1065 r = vcn_v2_0_start_dpg_mode(adev, adev->vcn.indirect_sram);
1066 if (r)
1067 return r;
1068 goto jpeg;
1069 }
1070
1071 vcn_v2_0_disable_static_power_gating(adev);
1072
1073 /* set uvd status busy */
1074 tmp = RREG32_SOC15(UVD, 0, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY;
1075 WREG32_SOC15(UVD, 0, mmUVD_STATUS, tmp);
1076
1077 /*SW clock gating */
1078 vcn_v2_0_disable_clock_gating(adev);
1079
1080 /* enable VCPU clock */
1081 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL),
1082 UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
1083
1084 /* disable master interrupt */
1085 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN), 0,
1086 ~UVD_MASTINT_EN__VCPU_EN_MASK);
1087
1088 /* setup mmUVD_LMI_CTRL */
1089 tmp = RREG32_SOC15(UVD, 0, mmUVD_LMI_CTRL);
1090 WREG32_SOC15(UVD, 0, mmUVD_LMI_CTRL, tmp |
1091 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
1092 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
1093 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
1094 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
1095
1096 /* setup mmUVD_MPC_CNTL */
1097 tmp = RREG32_SOC15(UVD, 0, mmUVD_MPC_CNTL);
1098 tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
1099 tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
1100 WREG32_SOC15(VCN, 0, mmUVD_MPC_CNTL, tmp);
1101
1102 /* setup UVD_MPC_SET_MUXA0 */
1103 WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXA0,
1104 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
1105 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
1106 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
1107 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
1108
1109 /* setup UVD_MPC_SET_MUXB0 */
1110 WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXB0,
1111 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
1112 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
1113 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
1114 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
1115
1116 /* setup mmUVD_MPC_SET_MUX */
1117 WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUX,
1118 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
1119 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
1120 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
1121
1122 vcn_v2_0_mc_resume(adev);
1123
1124 /* release VCPU reset to boot */
1125 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0,
1126 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1127
1128 /* enable LMI MC and UMC channels */
1129 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), 0,
1130 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1131
1132 tmp = RREG32_SOC15(VCN, 0, mmUVD_SOFT_RESET);
1133 tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
1134 tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
1135 WREG32_SOC15(VCN, 0, mmUVD_SOFT_RESET, tmp);
1136
1137 /* disable byte swapping */
1138 lmi_swap_cntl = 0;
1139#ifdef __BIG_ENDIAN
1140 /* swap (8 in 32) RB and IB */
1141 lmi_swap_cntl = 0xa;
1142#endif
1143 WREG32_SOC15(UVD, 0, mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
1144
1145 for (i = 0; i < 10; ++i) {
1146 uint32_t status;
1147
1148 for (j = 0; j < 100; ++j) {
1149 status = RREG32_SOC15(UVD, 0, mmUVD_STATUS);
1150 if (status & 2)
1151 break;
1152 mdelay(10);
1153 }
1154 r = 0;
1155 if (status & 2)
1156 break;
1157
1158 DRM_ERROR("VCN decode not responding, trying to reset the VCPU!!!\n");
1159 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
1160 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
1161 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1162 mdelay(10);
1163 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0,
1164 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1165 mdelay(10);
1166 r = -1;
1167 }
1168
1169 if (r) {
1170 DRM_ERROR("VCN decode not responding, giving up!!!\n");
1171 return r;
1172 }
1173
1174 /* enable master interrupt */
1175 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN),
1176 UVD_MASTINT_EN__VCPU_EN_MASK,
1177 ~UVD_MASTINT_EN__VCPU_EN_MASK);
1178
1179 /* clear the busy bit of VCN_STATUS */
1180 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS), 0,
1181 ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
1182
1183 WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_VMID, 0);
1184
1185 /* force RBC into idle state */
1186 rb_bufsz = order_base_2(ring->ring_size);
1187 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
1188 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1189 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1190 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1191 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1192 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp);
1193
1194 /* programm the RB_BASE for ring buffer */
1195 WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
1196 lower_32_bits(ring->gpu_addr));
1197 WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
1198 upper_32_bits(ring->gpu_addr));
1199
1200 /* Initialize the ring buffer's read and write pointers */
1201 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR, 0);
1202
1203 ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
1204 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
1205 lower_32_bits(ring->wptr));
1206
1207 ring = &adev->vcn.inst->ring_enc[0];
1208 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1209 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1210 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr);
1211 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1212 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4);
1213
1214 ring = &adev->vcn.inst->ring_enc[1];
1215 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1216 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1217 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1218 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1219 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
1220
1221jpeg:
1222 r = jpeg_v2_0_start(adev);
1223
1224 return r;
1225}
1226
1227static int vcn_v2_0_stop_dpg_mode(struct amdgpu_device *adev)
1228{
1229 int ret_code = 0;
1230 uint32_t tmp;
1231
1232 /* Wait for power status to be 1 */
1233 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, 1,
1234 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
1235
1236 /* wait for read ptr to be equal to write ptr */
1237 tmp = RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR);
1238 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF, ret_code);
1239
1240 tmp = RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2);
1241 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF, ret_code);
1242
1243 tmp = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR);
1244 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_JRBC_RB_RPTR, tmp, 0xFFFFFFFF, ret_code);
1245
1246 tmp = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF;
1247 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF, ret_code);
1248
1249 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, 1,
1250 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
1251
1252 /* disable dynamic power gating mode */
1253 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), 0,
1254 ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
1255
1256 return 0;
1257}
1258
1259static int vcn_v2_0_stop(struct amdgpu_device *adev)
1260{
1261 uint32_t tmp;
1262 int r;
1263
1264 r = jpeg_v2_0_stop(adev);
1265 if (r)
1266 return r;
1267
1268 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
1269 r = vcn_v2_0_stop_dpg_mode(adev);
1270 if (r)
1271 return r;
1272 goto power_off;
1273 }
1274
1275 /* wait for uvd idle */
1276 SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7, r);
1277 if (r)
1278 return r;
1279
1280 tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
1281 UVD_LMI_STATUS__READ_CLEAN_MASK |
1282 UVD_LMI_STATUS__WRITE_CLEAN_MASK |
1283 UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
1284 SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_LMI_STATUS, tmp, tmp, r);
1285 if (r)
1286 return r;
1287
1288 /* stall UMC channel */
1289 tmp = RREG32_SOC15(VCN, 0, mmUVD_LMI_CTRL2);
1290 tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
1291 WREG32_SOC15(VCN, 0, mmUVD_LMI_CTRL2, tmp);
1292
1293 tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK|
1294 UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
1295 SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_LMI_STATUS, tmp, tmp, r);
1296 if (r)
1297 return r;
1298
1299 /* disable VCPU clock */
1300 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL), 0,
1301 ~(UVD_VCPU_CNTL__CLK_EN_MASK));
1302
1303 /* reset LMI UMC */
1304 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
1305 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK,
1306 ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
1307
1308 /* reset LMI */
1309 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
1310 UVD_SOFT_RESET__LMI_SOFT_RESET_MASK,
1311 ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK);
1312
1313 /* reset VCPU */
1314 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
1315 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
1316 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1317
1318 /* clear status */
1319 WREG32_SOC15(VCN, 0, mmUVD_STATUS, 0);
1320
1321 vcn_v2_0_enable_clock_gating(adev);
1322 vcn_v2_0_enable_static_power_gating(adev);
1323
1324power_off:
1325 if (adev->pm.dpm_enabled)
1326 amdgpu_dpm_enable_uvd(adev, false);
1327
1328 return 0;
1329}
1330
1331static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev,
1332 struct dpg_pause_state *new_state)
1333{
1334 struct amdgpu_ring *ring;
1335 uint32_t reg_data = 0;
1336 int ret_code;
1337
1338 /* pause/unpause if state is changed */
1339 if (adev->vcn.pause_state.fw_based != new_state->fw_based) {
1340 DRM_DEBUG("dpg pause state changed %d -> %d",
1341 adev->vcn.pause_state.fw_based, new_state->fw_based);
1342 reg_data = RREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE) &
1343 (~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1344
1345 if (new_state->fw_based == VCN_DPG_STATE__PAUSE) {
1346 ret_code = 0;
1347 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, 0x1,
1348 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
1349
1350 if (!ret_code) {
1351 /* pause DPG */
1352 reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1353 WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
1354
1355 /* wait for ACK */
1356 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_DPG_PAUSE,
1357 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
1358 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, ret_code);
1359
1360 /* Restore */
1361 ring = &adev->vcn.inst->ring_enc[0];
1362 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr);
1363 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1364 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4);
1365 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1366 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1367
1368 ring = &adev->vcn.inst->ring_enc[1];
1369 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1370 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1371 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
1372 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1373 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1374
1375 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
1376 RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF);
1377
1378 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
1379 UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON,
1380 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
1381 }
1382 } else {
1383 /* unpause dpg, no need to wait */
1384 reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1385 WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
1386 }
1387 adev->vcn.pause_state.fw_based = new_state->fw_based;
1388 }
1389
1390 return 0;
1391}
1392
1393static bool vcn_v2_0_is_idle(void *handle)
1394{
1395 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1396
1397 return (RREG32_SOC15(VCN, 0, mmUVD_STATUS) == UVD_STATUS__IDLE);
1398}
1399
1400static int vcn_v2_0_wait_for_idle(void *handle)
1401{
1402 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1403 int ret = 0;
1404
1405 SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_STATUS, UVD_STATUS__IDLE,
1406 UVD_STATUS__IDLE, ret);
1407
1408 return ret;
1409}
1410
1411static int vcn_v2_0_set_clockgating_state(void *handle,
1412 enum amd_clockgating_state state)
1413{
1414 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1415 bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
1416
1417 if (enable) {
1418 /* wait for STATUS to clear */
1419 if (vcn_v2_0_is_idle(handle))
1420 return -EBUSY;
1421 vcn_v2_0_enable_clock_gating(adev);
1422 } else {
1423 /* disable HW gating and enable Sw gating */
1424 vcn_v2_0_disable_clock_gating(adev);
1425 }
1426 return 0;
1427}
1428
1429/**
1430 * vcn_v2_0_dec_ring_get_rptr - get read pointer
1431 *
1432 * @ring: amdgpu_ring pointer
1433 *
1434 * Returns the current hardware read pointer
1435 */
1436static uint64_t vcn_v2_0_dec_ring_get_rptr(struct amdgpu_ring *ring)
1437{
1438 struct amdgpu_device *adev = ring->adev;
1439
1440 return RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
1441}
1442
1443/**
1444 * vcn_v2_0_dec_ring_get_wptr - get write pointer
1445 *
1446 * @ring: amdgpu_ring pointer
1447 *
1448 * Returns the current hardware write pointer
1449 */
1450static uint64_t vcn_v2_0_dec_ring_get_wptr(struct amdgpu_ring *ring)
1451{
1452 struct amdgpu_device *adev = ring->adev;
1453
1454 if (ring->use_doorbell)
1455 return adev->wb.wb[ring->wptr_offs];
1456 else
1457 return RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR);
1458}
1459
1460/**
1461 * vcn_v2_0_dec_ring_set_wptr - set write pointer
1462 *
1463 * @ring: amdgpu_ring pointer
1464 *
1465 * Commits the write pointer to the hardware
1466 */
1467static void vcn_v2_0_dec_ring_set_wptr(struct amdgpu_ring *ring)
1468{
1469 struct amdgpu_device *adev = ring->adev;
1470
1471 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
1472 WREG32_SOC15(UVD, 0, mmUVD_SCRATCH2,
1473 lower_32_bits(ring->wptr) | 0x80000000);
1474
1475 if (ring->use_doorbell) {
1476 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
1477 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1478 } else {
1479 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
1480 }
1481}
1482
1483/**
1484 * vcn_v2_0_dec_ring_insert_start - insert a start command
1485 *
1486 * @ring: amdgpu_ring pointer
1487 *
1488 * Write a start command to the ring.
1489 */
1490void vcn_v2_0_dec_ring_insert_start(struct amdgpu_ring *ring)
1491{
1492 struct amdgpu_device *adev = ring->adev;
1493
1494 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data0, 0));
1495 amdgpu_ring_write(ring, 0);
1496 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0));
1497 amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_PACKET_START << 1));
1498}
1499
1500/**
1501 * vcn_v2_0_dec_ring_insert_end - insert a end command
1502 *
1503 * @ring: amdgpu_ring pointer
1504 *
1505 * Write a end command to the ring.
1506 */
1507void vcn_v2_0_dec_ring_insert_end(struct amdgpu_ring *ring)
1508{
1509 struct amdgpu_device *adev = ring->adev;
1510
1511 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0));
1512 amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_PACKET_END << 1));
1513}
1514
1515/**
1516 * vcn_v2_0_dec_ring_insert_nop - insert a nop command
1517 *
1518 * @ring: amdgpu_ring pointer
1519 *
1520 * Write a nop command to the ring.
1521 */
1522void vcn_v2_0_dec_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
1523{
1524 struct amdgpu_device *adev = ring->adev;
1525 int i;
1526
1527 WARN_ON(ring->wptr % 2 || count % 2);
1528
1529 for (i = 0; i < count / 2; i++) {
1530 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.nop, 0));
1531 amdgpu_ring_write(ring, 0);
1532 }
1533}
1534
1535/**
1536 * vcn_v2_0_dec_ring_emit_fence - emit an fence & trap command
1537 *
1538 * @ring: amdgpu_ring pointer
1539 * @fence: fence to emit
1540 *
1541 * Write a fence and a trap command to the ring.
1542 */
1543void vcn_v2_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
1544 unsigned flags)
1545{
1546 struct amdgpu_device *adev = ring->adev;
1547
1548 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1549 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.context_id, 0));
1550 amdgpu_ring_write(ring, seq);
1551
1552 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data0, 0));
1553 amdgpu_ring_write(ring, addr & 0xffffffff);
1554
1555 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data1, 0));
1556 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
1557
1558 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0));
1559 amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_FENCE << 1));
1560
1561 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data0, 0));
1562 amdgpu_ring_write(ring, 0);
1563
1564 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data1, 0));
1565 amdgpu_ring_write(ring, 0);
1566
1567 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0));
1568
1569 amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_TRAP << 1));
1570}
1571
1572/**
1573 * vcn_v2_0_dec_ring_emit_ib - execute indirect buffer
1574 *
1575 * @ring: amdgpu_ring pointer
1576 * @ib: indirect buffer to execute
1577 *
1578 * Write ring commands to execute the indirect buffer
1579 */
1580void vcn_v2_0_dec_ring_emit_ib(struct amdgpu_ring *ring,
1581 struct amdgpu_job *job,
1582 struct amdgpu_ib *ib,
1583 uint32_t flags)
1584{
1585 struct amdgpu_device *adev = ring->adev;
1586 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1587
1588 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.ib_vmid, 0));
1589 amdgpu_ring_write(ring, vmid);
1590
1591 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.ib_bar_low, 0));
1592 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1593 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.ib_bar_high, 0));
1594 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1595 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.ib_size, 0));
1596 amdgpu_ring_write(ring, ib->length_dw);
1597}
1598
1599void vcn_v2_0_dec_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
1600 uint32_t val, uint32_t mask)
1601{
1602 struct amdgpu_device *adev = ring->adev;
1603
1604 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data0, 0));
1605 amdgpu_ring_write(ring, reg << 2);
1606
1607 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data1, 0));
1608 amdgpu_ring_write(ring, val);
1609
1610 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.gp_scratch8, 0));
1611 amdgpu_ring_write(ring, mask);
1612
1613 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0));
1614
1615 amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_REG_READ_COND_WAIT << 1));
1616}
1617
1618void vcn_v2_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
1619 unsigned vmid, uint64_t pd_addr)
1620{
1621 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
1622 uint32_t data0, data1, mask;
1623
1624 pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1625
1626 /* wait for register write */
1627 data0 = hub->ctx0_ptb_addr_lo32 + vmid * 2;
1628 data1 = lower_32_bits(pd_addr);
1629 mask = 0xffffffff;
1630 vcn_v2_0_dec_ring_emit_reg_wait(ring, data0, data1, mask);
1631}
1632
1633void vcn_v2_0_dec_ring_emit_wreg(struct amdgpu_ring *ring,
1634 uint32_t reg, uint32_t val)
1635{
1636 struct amdgpu_device *adev = ring->adev;
1637
1638 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data0, 0));
1639 amdgpu_ring_write(ring, reg << 2);
1640
1641 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data1, 0));
1642 amdgpu_ring_write(ring, val);
1643
1644 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0));
1645
1646 amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_WRITE_REG << 1));
1647}
1648
1649/**
1650 * vcn_v2_0_enc_ring_get_rptr - get enc read pointer
1651 *
1652 * @ring: amdgpu_ring pointer
1653 *
1654 * Returns the current hardware enc read pointer
1655 */
1656static uint64_t vcn_v2_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
1657{
1658 struct amdgpu_device *adev = ring->adev;
1659
1660 if (ring == &adev->vcn.inst->ring_enc[0])
1661 return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR);
1662 else
1663 return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2);
1664}
1665
1666 /**
1667 * vcn_v2_0_enc_ring_get_wptr - get enc write pointer
1668 *
1669 * @ring: amdgpu_ring pointer
1670 *
1671 * Returns the current hardware enc write pointer
1672 */
1673static uint64_t vcn_v2_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
1674{
1675 struct amdgpu_device *adev = ring->adev;
1676
1677 if (ring == &adev->vcn.inst->ring_enc[0]) {
1678 if (ring->use_doorbell)
1679 return adev->wb.wb[ring->wptr_offs];
1680 else
1681 return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR);
1682 } else {
1683 if (ring->use_doorbell)
1684 return adev->wb.wb[ring->wptr_offs];
1685 else
1686 return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2);
1687 }
1688}
1689
1690 /**
1691 * vcn_v2_0_enc_ring_set_wptr - set enc write pointer
1692 *
1693 * @ring: amdgpu_ring pointer
1694 *
1695 * Commits the enc write pointer to the hardware
1696 */
1697static void vcn_v2_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
1698{
1699 struct amdgpu_device *adev = ring->adev;
1700
1701 if (ring == &adev->vcn.inst->ring_enc[0]) {
1702 if (ring->use_doorbell) {
1703 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
1704 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1705 } else {
1706 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1707 }
1708 } else {
1709 if (ring->use_doorbell) {
1710 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
1711 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1712 } else {
1713 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1714 }
1715 }
1716}
1717
1718/**
1719 * vcn_v2_0_enc_ring_emit_fence - emit an enc fence & trap command
1720 *
1721 * @ring: amdgpu_ring pointer
1722 * @fence: fence to emit
1723 *
1724 * Write enc a fence and a trap command to the ring.
1725 */
1726void vcn_v2_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
1727 u64 seq, unsigned flags)
1728{
1729 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1730
1731 amdgpu_ring_write(ring, VCN_ENC_CMD_FENCE);
1732 amdgpu_ring_write(ring, addr);
1733 amdgpu_ring_write(ring, upper_32_bits(addr));
1734 amdgpu_ring_write(ring, seq);
1735 amdgpu_ring_write(ring, VCN_ENC_CMD_TRAP);
1736}
1737
1738void vcn_v2_0_enc_ring_insert_end(struct amdgpu_ring *ring)
1739{
1740 amdgpu_ring_write(ring, VCN_ENC_CMD_END);
1741}
1742
1743/**
1744 * vcn_v2_0_enc_ring_emit_ib - enc execute indirect buffer
1745 *
1746 * @ring: amdgpu_ring pointer
1747 * @ib: indirect buffer to execute
1748 *
1749 * Write enc ring commands to execute the indirect buffer
1750 */
1751void vcn_v2_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
1752 struct amdgpu_job *job,
1753 struct amdgpu_ib *ib,
1754 uint32_t flags)
1755{
1756 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1757
1758 amdgpu_ring_write(ring, VCN_ENC_CMD_IB);
1759 amdgpu_ring_write(ring, vmid);
1760 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1761 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1762 amdgpu_ring_write(ring, ib->length_dw);
1763}
1764
1765void vcn_v2_0_enc_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
1766 uint32_t val, uint32_t mask)
1767{
1768 amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WAIT);
1769 amdgpu_ring_write(ring, reg << 2);
1770 amdgpu_ring_write(ring, mask);
1771 amdgpu_ring_write(ring, val);
1772}
1773
1774void vcn_v2_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
1775 unsigned int vmid, uint64_t pd_addr)
1776{
1777 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
1778
1779 pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1780
1781 /* wait for reg writes */
1782 vcn_v2_0_enc_ring_emit_reg_wait(ring, hub->ctx0_ptb_addr_lo32 + vmid * 2,
1783 lower_32_bits(pd_addr), 0xffffffff);
1784}
1785
1786void vcn_v2_0_enc_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val)
1787{
1788 amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WRITE);
1789 amdgpu_ring_write(ring, reg << 2);
1790 amdgpu_ring_write(ring, val);
1791}
1792
1793/**
1794 * vcn_v2_0_jpeg_ring_get_rptr - get read pointer
1795 *
1796 * @ring: amdgpu_ring pointer
1797 *
1798 * Returns the current hardware read pointer
1799 */
1800static uint64_t vcn_v2_0_jpeg_ring_get_rptr(struct amdgpu_ring *ring)
1801{
1802 struct amdgpu_device *adev = ring->adev;
1803
1804 return RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_RPTR);
1805}
1806
1807/**
1808 * vcn_v2_0_jpeg_ring_get_wptr - get write pointer
1809 *
1810 * @ring: amdgpu_ring pointer
1811 *
1812 * Returns the current hardware write pointer
1813 */
1814static uint64_t vcn_v2_0_jpeg_ring_get_wptr(struct amdgpu_ring *ring)
1815{
1816 struct amdgpu_device *adev = ring->adev;
1817
1818 if (ring->use_doorbell)
1819 return adev->wb.wb[ring->wptr_offs];
1820 else
1821 return RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR);
1822}
1823
1824/**
1825 * vcn_v2_0_jpeg_ring_set_wptr - set write pointer
1826 *
1827 * @ring: amdgpu_ring pointer
1828 *
1829 * Commits the write pointer to the hardware
1830 */
1831static void vcn_v2_0_jpeg_ring_set_wptr(struct amdgpu_ring *ring)
1832{
1833 struct amdgpu_device *adev = ring->adev;
1834
1835 if (ring->use_doorbell) {
1836 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
1837 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1838 } else {
1839 WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr));
1840 }
1841}
1842
1843/**
1844 * vcn_v2_0_jpeg_ring_insert_start - insert a start command
1845 *
1846 * @ring: amdgpu_ring pointer
1847 *
1848 * Write a start command to the ring.
1849 */
1850void vcn_v2_0_jpeg_ring_insert_start(struct amdgpu_ring *ring)
1851{
1852 amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
1853 0, 0, PACKETJ_TYPE0));
1854 amdgpu_ring_write(ring, 0x68e04);
1855
1856 amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR,
1857 0, 0, PACKETJ_TYPE0));
1858 amdgpu_ring_write(ring, 0x80010000);
1859}
1860
1861/**
1862 * vcn_v2_0_jpeg_ring_insert_end - insert a end command
1863 *
1864 * @ring: amdgpu_ring pointer
1865 *
1866 * Write a end command to the ring.
1867 */
1868void vcn_v2_0_jpeg_ring_insert_end(struct amdgpu_ring *ring)
1869{
1870 amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
1871 0, 0, PACKETJ_TYPE0));
1872 amdgpu_ring_write(ring, 0x68e04);
1873
1874 amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR,
1875 0, 0, PACKETJ_TYPE0));
1876 amdgpu_ring_write(ring, 0x00010000);
1877}
1878
1879/**
1880 * vcn_v2_0_jpeg_ring_emit_fence - emit an fence & trap command
1881 *
1882 * @ring: amdgpu_ring pointer
1883 * @fence: fence to emit
1884 *
1885 * Write a fence and a trap command to the ring.
1886 */
1887void vcn_v2_0_jpeg_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
1888 unsigned flags)
1889{
1890 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1891
1892 amdgpu_ring_write(ring, PACKETJ(mmUVD_JPEG_GPCOM_DATA0_INTERNAL_OFFSET,
1893 0, 0, PACKETJ_TYPE0));
1894 amdgpu_ring_write(ring, seq);
1895
1896 amdgpu_ring_write(ring, PACKETJ(mmUVD_JPEG_GPCOM_DATA1_INTERNAL_OFFSET,
1897 0, 0, PACKETJ_TYPE0));
1898 amdgpu_ring_write(ring, seq);
1899
1900 amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW_INTERNAL_OFFSET,
1901 0, 0, PACKETJ_TYPE0));
1902 amdgpu_ring_write(ring, lower_32_bits(addr));
1903
1904 amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH_INTERNAL_OFFSET,
1905 0, 0, PACKETJ_TYPE0));
1906 amdgpu_ring_write(ring, upper_32_bits(addr));
1907
1908 amdgpu_ring_write(ring, PACKETJ(mmUVD_JPEG_GPCOM_CMD_INTERNAL_OFFSET,
1909 0, 0, PACKETJ_TYPE0));
1910 amdgpu_ring_write(ring, 0x8);
1911
1912 amdgpu_ring_write(ring, PACKETJ(mmUVD_JPEG_GPCOM_CMD_INTERNAL_OFFSET,
1913 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE4));
1914 amdgpu_ring_write(ring, 0);
1915
1916 amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
1917 0, 0, PACKETJ_TYPE0));
1918 amdgpu_ring_write(ring, 0x3fbc);
1919
1920 amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR,
1921 0, 0, PACKETJ_TYPE0));
1922 amdgpu_ring_write(ring, 0x1);
1923
1924 amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE7));
1925 amdgpu_ring_write(ring, 0);
1926}
1927
1928/**
1929 * vcn_v2_0_jpeg_ring_emit_ib - execute indirect buffer
1930 *
1931 * @ring: amdgpu_ring pointer
1932 * @ib: indirect buffer to execute
1933 *
1934 * Write ring commands to execute the indirect buffer.
1935 */
1936void vcn_v2_0_jpeg_ring_emit_ib(struct amdgpu_ring *ring,
1937 struct amdgpu_job *job,
1938 struct amdgpu_ib *ib,
1939 uint32_t flags)
1940{
1941 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1942
1943 amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_IB_VMID_INTERNAL_OFFSET,
1944 0, 0, PACKETJ_TYPE0));
1945 amdgpu_ring_write(ring, (vmid | (vmid << 4)));
1946
1947 amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JPEG_VMID_INTERNAL_OFFSET,
1948 0, 0, PACKETJ_TYPE0));
1949 amdgpu_ring_write(ring, (vmid | (vmid << 4)));
1950
1951 amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET,
1952 0, 0, PACKETJ_TYPE0));
1953 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1954
1955 amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET,
1956 0, 0, PACKETJ_TYPE0));
1957 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1958
1959 amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_IB_SIZE_INTERNAL_OFFSET,
1960 0, 0, PACKETJ_TYPE0));
1961 amdgpu_ring_write(ring, ib->length_dw);
1962
1963 amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW_INTERNAL_OFFSET,
1964 0, 0, PACKETJ_TYPE0));
1965 amdgpu_ring_write(ring, lower_32_bits(ring->gpu_addr));
1966
1967 amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH_INTERNAL_OFFSET,
1968 0, 0, PACKETJ_TYPE0));
1969 amdgpu_ring_write(ring, upper_32_bits(ring->gpu_addr));
1970
1971 amdgpu_ring_write(ring, PACKETJ(0, 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE2));
1972 amdgpu_ring_write(ring, 0);
1973
1974 amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_RB_COND_RD_TIMER_INTERNAL_OFFSET,
1975 0, 0, PACKETJ_TYPE0));
1976 amdgpu_ring_write(ring, 0x01400200);
1977
1978 amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_RB_REF_DATA_INTERNAL_OFFSET,
1979 0, 0, PACKETJ_TYPE0));
1980 amdgpu_ring_write(ring, 0x2);
1981
1982 amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_STATUS_INTERNAL_OFFSET,
1983 0, PACKETJ_CONDITION_CHECK3, PACKETJ_TYPE3));
1984 amdgpu_ring_write(ring, 0x2);
1985}
1986
1987void vcn_v2_0_jpeg_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
1988 uint32_t val, uint32_t mask)
1989{
1990 uint32_t reg_offset = (reg << 2);
1991
1992 amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_RB_COND_RD_TIMER_INTERNAL_OFFSET,
1993 0, 0, PACKETJ_TYPE0));
1994 amdgpu_ring_write(ring, 0x01400200);
1995
1996 amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_RB_REF_DATA_INTERNAL_OFFSET,
1997 0, 0, PACKETJ_TYPE0));
1998 amdgpu_ring_write(ring, val);
1999
2000 amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
2001 0, 0, PACKETJ_TYPE0));
2002 if (reg_offset >= 0x10000 && reg_offset <= 0x105ff) {
2003 amdgpu_ring_write(ring, 0);
2004 amdgpu_ring_write(ring,
2005 PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE3));
2006 } else {
2007 amdgpu_ring_write(ring, reg_offset);
2008 amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR,
2009 0, 0, PACKETJ_TYPE3));
2010 }
2011 amdgpu_ring_write(ring, mask);
2012}
2013
2014void vcn_v2_0_jpeg_ring_emit_vm_flush(struct amdgpu_ring *ring,
2015 unsigned vmid, uint64_t pd_addr)
2016{
2017 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
2018 uint32_t data0, data1, mask;
2019
2020 pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
2021
2022 /* wait for register write */
2023 data0 = hub->ctx0_ptb_addr_lo32 + vmid * 2;
2024 data1 = lower_32_bits(pd_addr);
2025 mask = 0xffffffff;
2026 vcn_v2_0_jpeg_ring_emit_reg_wait(ring, data0, data1, mask);
2027}
2028
2029void vcn_v2_0_jpeg_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val)
2030{
2031 uint32_t reg_offset = (reg << 2);
2032
2033 amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
2034 0, 0, PACKETJ_TYPE0));
2035 if (reg_offset >= 0x10000 && reg_offset <= 0x105ff) {
2036 amdgpu_ring_write(ring, 0);
2037 amdgpu_ring_write(ring,
2038 PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE0));
2039 } else {
2040 amdgpu_ring_write(ring, reg_offset);
2041 amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR,
2042 0, 0, PACKETJ_TYPE0));
2043 }
2044 amdgpu_ring_write(ring, val);
2045}
2046
2047void vcn_v2_0_jpeg_ring_nop(struct amdgpu_ring *ring, uint32_t count)
2048{
2049 int i;
2050
2051 WARN_ON(ring->wptr % 2 || count % 2);
2052
2053 for (i = 0; i < count / 2; i++) {
2054 amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE6));
2055 amdgpu_ring_write(ring, 0);
2056 }
2057}
2058
2059static int vcn_v2_0_set_interrupt_state(struct amdgpu_device *adev,
2060 struct amdgpu_irq_src *source,
2061 unsigned type,
2062 enum amdgpu_interrupt_state state)
2063{
2064 return 0;
2065}
2066
2067static int vcn_v2_0_process_interrupt(struct amdgpu_device *adev,
2068 struct amdgpu_irq_src *source,
2069 struct amdgpu_iv_entry *entry)
2070{
2071 DRM_DEBUG("IH: VCN TRAP\n");
2072
2073 switch (entry->src_id) {
2074 case VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT:
2075 amdgpu_fence_process(&adev->vcn.inst->ring_dec);
2076 break;
2077 case VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE:
2078 amdgpu_fence_process(&adev->vcn.inst->ring_enc[0]);
2079 break;
2080 case VCN_2_0__SRCID__UVD_ENC_LOW_LATENCY:
2081 amdgpu_fence_process(&adev->vcn.inst->ring_enc[1]);
2082 break;
2083 case VCN_2_0__SRCID__JPEG_DECODE:
2084 amdgpu_fence_process(&adev->vcn.inst->ring_jpeg);
2085 break;
2086 default:
2087 DRM_ERROR("Unhandled interrupt: %d %d\n",
2088 entry->src_id, entry->src_data[0]);
2089 break;
2090 }
2091
2092 return 0;
2093}
2094
2095static int vcn_v2_0_dec_ring_test_ring(struct amdgpu_ring *ring)
2096{
2097 struct amdgpu_device *adev = ring->adev;
2098 uint32_t tmp = 0;
2099 unsigned i;
2100 int r;
2101
2102 WREG32(adev->vcn.inst[ring->me].external.scratch9, 0xCAFEDEAD);
2103 r = amdgpu_ring_alloc(ring, 4);
2104 if (r)
2105 return r;
2106 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0));
2107 amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_PACKET_START << 1));
2108 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.scratch9, 0));
2109 amdgpu_ring_write(ring, 0xDEADBEEF);
2110 amdgpu_ring_commit(ring);
2111 for (i = 0; i < adev->usec_timeout; i++) {
2112 tmp = RREG32(adev->vcn.inst[ring->me].external.scratch9);
2113 if (tmp == 0xDEADBEEF)
2114 break;
2115 udelay(1);
2116 }
2117
2118 if (i >= adev->usec_timeout)
2119 r = -ETIMEDOUT;
2120
2121 return r;
2122}
2123
2124
2125static int vcn_v2_0_set_powergating_state(void *handle,
2126 enum amd_powergating_state state)
2127{
2128 /* This doesn't actually powergate the VCN block.
2129 * That's done in the dpm code via the SMC. This
2130 * just re-inits the block as necessary. The actual
2131 * gating still happens in the dpm code. We should
2132 * revisit this when there is a cleaner line between
2133 * the smc and the hw blocks
2134 */
2135 int ret;
2136 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2137
2138 if (state == adev->vcn.cur_state)
2139 return 0;
2140
2141 if (state == AMD_PG_STATE_GATE)
2142 ret = vcn_v2_0_stop(adev);
2143 else
2144 ret = vcn_v2_0_start(adev);
2145
2146 if (!ret)
2147 adev->vcn.cur_state = state;
2148 return ret;
2149}
2150
2151static const struct amd_ip_funcs vcn_v2_0_ip_funcs = {
2152 .name = "vcn_v2_0",
2153 .early_init = vcn_v2_0_early_init,
2154 .late_init = NULL,
2155 .sw_init = vcn_v2_0_sw_init,
2156 .sw_fini = vcn_v2_0_sw_fini,
2157 .hw_init = vcn_v2_0_hw_init,
2158 .hw_fini = vcn_v2_0_hw_fini,
2159 .suspend = vcn_v2_0_suspend,
2160 .resume = vcn_v2_0_resume,
2161 .is_idle = vcn_v2_0_is_idle,
2162 .wait_for_idle = vcn_v2_0_wait_for_idle,
2163 .check_soft_reset = NULL,
2164 .pre_soft_reset = NULL,
2165 .soft_reset = NULL,
2166 .post_soft_reset = NULL,
2167 .set_clockgating_state = vcn_v2_0_set_clockgating_state,
2168 .set_powergating_state = vcn_v2_0_set_powergating_state,
2169};
2170
2171static const struct amdgpu_ring_funcs vcn_v2_0_dec_ring_vm_funcs = {
2172 .type = AMDGPU_RING_TYPE_VCN_DEC,
2173 .align_mask = 0xf,
2174 .vmhub = AMDGPU_MMHUB_0,
2175 .get_rptr = vcn_v2_0_dec_ring_get_rptr,
2176 .get_wptr = vcn_v2_0_dec_ring_get_wptr,
2177 .set_wptr = vcn_v2_0_dec_ring_set_wptr,
2178 .emit_frame_size =
2179 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
2180 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
2181 8 + /* vcn_v2_0_dec_ring_emit_vm_flush */
2182 14 + 14 + /* vcn_v2_0_dec_ring_emit_fence x2 vm fence */
2183 6,
2184 .emit_ib_size = 8, /* vcn_v2_0_dec_ring_emit_ib */
2185 .emit_ib = vcn_v2_0_dec_ring_emit_ib,
2186 .emit_fence = vcn_v2_0_dec_ring_emit_fence,
2187 .emit_vm_flush = vcn_v2_0_dec_ring_emit_vm_flush,
2188 .test_ring = vcn_v2_0_dec_ring_test_ring,
2189 .test_ib = amdgpu_vcn_dec_ring_test_ib,
2190 .insert_nop = vcn_v2_0_dec_ring_insert_nop,
2191 .insert_start = vcn_v2_0_dec_ring_insert_start,
2192 .insert_end = vcn_v2_0_dec_ring_insert_end,
2193 .pad_ib = amdgpu_ring_generic_pad_ib,
2194 .begin_use = amdgpu_vcn_ring_begin_use,
2195 .end_use = amdgpu_vcn_ring_end_use,
2196 .emit_wreg = vcn_v2_0_dec_ring_emit_wreg,
2197 .emit_reg_wait = vcn_v2_0_dec_ring_emit_reg_wait,
2198 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
2199};
2200
2201static const struct amdgpu_ring_funcs vcn_v2_0_enc_ring_vm_funcs = {
2202 .type = AMDGPU_RING_TYPE_VCN_ENC,
2203 .align_mask = 0x3f,
2204 .nop = VCN_ENC_CMD_NO_OP,
2205 .vmhub = AMDGPU_MMHUB_0,
2206 .get_rptr = vcn_v2_0_enc_ring_get_rptr,
2207 .get_wptr = vcn_v2_0_enc_ring_get_wptr,
2208 .set_wptr = vcn_v2_0_enc_ring_set_wptr,
2209 .emit_frame_size =
2210 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
2211 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
2212 4 + /* vcn_v2_0_enc_ring_emit_vm_flush */
2213 5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */
2214 1, /* vcn_v2_0_enc_ring_insert_end */
2215 .emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */
2216 .emit_ib = vcn_v2_0_enc_ring_emit_ib,
2217 .emit_fence = vcn_v2_0_enc_ring_emit_fence,
2218 .emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush,
2219 .test_ring = amdgpu_vcn_enc_ring_test_ring,
2220 .test_ib = amdgpu_vcn_enc_ring_test_ib,
2221 .insert_nop = amdgpu_ring_insert_nop,
2222 .insert_end = vcn_v2_0_enc_ring_insert_end,
2223 .pad_ib = amdgpu_ring_generic_pad_ib,
2224 .begin_use = amdgpu_vcn_ring_begin_use,
2225 .end_use = amdgpu_vcn_ring_end_use,
2226 .emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
2227 .emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
2228 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
2229};
2230
2231static const struct amdgpu_ring_funcs vcn_v2_0_jpeg_ring_vm_funcs = {
2232 .type = AMDGPU_RING_TYPE_VCN_JPEG,
2233 .align_mask = 0xf,
2234 .vmhub = AMDGPU_MMHUB_0,
2235 .get_rptr = vcn_v2_0_jpeg_ring_get_rptr,
2236 .get_wptr = vcn_v2_0_jpeg_ring_get_wptr,
2237 .set_wptr = vcn_v2_0_jpeg_ring_set_wptr,
2238 .emit_frame_size =
2239 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
2240 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
2241 8 + /* vcn_v2_0_jpeg_ring_emit_vm_flush */
2242 18 + 18 + /* vcn_v2_0_jpeg_ring_emit_fence x2 vm fence */
2243 8 + 16,
2244 .emit_ib_size = 22, /* vcn_v2_0_jpeg_ring_emit_ib */
2245 .emit_ib = vcn_v2_0_jpeg_ring_emit_ib,
2246 .emit_fence = vcn_v2_0_jpeg_ring_emit_fence,
2247 .emit_vm_flush = vcn_v2_0_jpeg_ring_emit_vm_flush,
2248 .test_ring = amdgpu_vcn_jpeg_ring_test_ring,
2249 .test_ib = amdgpu_vcn_jpeg_ring_test_ib,
2250 .insert_nop = vcn_v2_0_jpeg_ring_nop,
2251 .insert_start = vcn_v2_0_jpeg_ring_insert_start,
2252 .insert_end = vcn_v2_0_jpeg_ring_insert_end,
2253 .pad_ib = amdgpu_ring_generic_pad_ib,
2254 .begin_use = amdgpu_vcn_ring_begin_use,
2255 .end_use = amdgpu_vcn_ring_end_use,
2256 .emit_wreg = vcn_v2_0_jpeg_ring_emit_wreg,
2257 .emit_reg_wait = vcn_v2_0_jpeg_ring_emit_reg_wait,
2258 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
2259};
2260
2261static void vcn_v2_0_set_dec_ring_funcs(struct amdgpu_device *adev)
2262{
2263 adev->vcn.inst->ring_dec.funcs = &vcn_v2_0_dec_ring_vm_funcs;
2264 DRM_INFO("VCN decode is enabled in VM mode\n");
2265}
2266
2267static void vcn_v2_0_set_enc_ring_funcs(struct amdgpu_device *adev)
2268{
2269 int i;
2270
2271 for (i = 0; i < adev->vcn.num_enc_rings; ++i)
2272 adev->vcn.inst->ring_enc[i].funcs = &vcn_v2_0_enc_ring_vm_funcs;
2273
2274 DRM_INFO("VCN encode is enabled in VM mode\n");
2275}
2276
2277static void vcn_v2_0_set_jpeg_ring_funcs(struct amdgpu_device *adev)
2278{
2279 adev->vcn.inst->ring_jpeg.funcs = &vcn_v2_0_jpeg_ring_vm_funcs;
2280 DRM_INFO("VCN jpeg decode is enabled in VM mode\n");
2281}
2282
2283static const struct amdgpu_irq_src_funcs vcn_v2_0_irq_funcs = {
2284 .set = vcn_v2_0_set_interrupt_state,
2285 .process = vcn_v2_0_process_interrupt,
2286};
2287
2288static void vcn_v2_0_set_irq_funcs(struct amdgpu_device *adev)
2289{
2290 adev->vcn.inst->irq.num_types = adev->vcn.num_enc_rings + 2;
2291 adev->vcn.inst->irq.funcs = &vcn_v2_0_irq_funcs;
2292}
2293
2294const struct amdgpu_ip_block_version vcn_v2_0_ip_block =
2295{
2296 .type = AMD_IP_BLOCK_TYPE_VCN,
2297 .major = 2,
2298 .minor = 0,
2299 .rev = 0,
2300 .funcs = &vcn_v2_0_ip_funcs,
2301};
1/*
2 * Copyright 2018 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/firmware.h>
25
26#include "amdgpu.h"
27#include "amdgpu_vcn.h"
28#include "soc15.h"
29#include "soc15d.h"
30#include "amdgpu_pm.h"
31#include "amdgpu_psp.h"
32#include "mmsch_v2_0.h"
33#include "vcn_v2_0.h"
34
35#include "vcn/vcn_2_0_0_offset.h"
36#include "vcn/vcn_2_0_0_sh_mask.h"
37#include "ivsrcid/vcn/irqsrcs_vcn_2_0.h"
38
39#define mmUVD_CONTEXT_ID_INTERNAL_OFFSET 0x1fd
40#define mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET 0x503
41#define mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET 0x504
42#define mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET 0x505
43#define mmUVD_NO_OP_INTERNAL_OFFSET 0x53f
44#define mmUVD_GP_SCRATCH8_INTERNAL_OFFSET 0x54a
45#define mmUVD_SCRATCH9_INTERNAL_OFFSET 0xc01d
46
47#define mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET 0x1e1
48#define mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x5a6
49#define mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET 0x5a7
50#define mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET 0x1e2
51
52static void vcn_v2_0_set_dec_ring_funcs(struct amdgpu_device *adev);
53static void vcn_v2_0_set_enc_ring_funcs(struct amdgpu_device *adev);
54static void vcn_v2_0_set_irq_funcs(struct amdgpu_device *adev);
55static int vcn_v2_0_set_powergating_state(void *handle,
56 enum amd_powergating_state state);
57static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev,
58 int inst_idx, struct dpg_pause_state *new_state);
59static int vcn_v2_0_start_sriov(struct amdgpu_device *adev);
60/**
61 * vcn_v2_0_early_init - set function pointers
62 *
63 * @handle: amdgpu_device pointer
64 *
65 * Set ring and irq function pointers
66 */
67static int vcn_v2_0_early_init(void *handle)
68{
69 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
70
71 adev->vcn.num_vcn_inst = 1;
72 if (amdgpu_sriov_vf(adev))
73 adev->vcn.num_enc_rings = 1;
74 else
75 adev->vcn.num_enc_rings = 2;
76
77 vcn_v2_0_set_dec_ring_funcs(adev);
78 vcn_v2_0_set_enc_ring_funcs(adev);
79 vcn_v2_0_set_irq_funcs(adev);
80
81 return 0;
82}
83
84/**
85 * vcn_v2_0_sw_init - sw init for VCN block
86 *
87 * @handle: amdgpu_device pointer
88 *
89 * Load firmware and sw initialization
90 */
91static int vcn_v2_0_sw_init(void *handle)
92{
93 struct amdgpu_ring *ring;
94 int i, r;
95 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
96 volatile struct amdgpu_fw_shared *fw_shared;
97
98 /* VCN DEC TRAP */
99 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
100 VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT,
101 &adev->vcn.inst->irq);
102 if (r)
103 return r;
104
105 /* VCN ENC TRAP */
106 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
107 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
108 i + VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE,
109 &adev->vcn.inst->irq);
110 if (r)
111 return r;
112 }
113
114 r = amdgpu_vcn_sw_init(adev);
115 if (r)
116 return r;
117
118 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
119 const struct common_firmware_header *hdr;
120 hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
121 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].ucode_id = AMDGPU_UCODE_ID_VCN;
122 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].fw = adev->vcn.fw;
123 adev->firmware.fw_size +=
124 ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
125 dev_info(adev->dev, "Will use PSP to load VCN firmware\n");
126 }
127
128 r = amdgpu_vcn_resume(adev);
129 if (r)
130 return r;
131
132 ring = &adev->vcn.inst->ring_dec;
133
134 ring->use_doorbell = true;
135 ring->doorbell_index = adev->doorbell_index.vcn.vcn_ring0_1 << 1;
136
137 sprintf(ring->name, "vcn_dec");
138 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
139 AMDGPU_RING_PRIO_DEFAULT, NULL);
140 if (r)
141 return r;
142
143 adev->vcn.internal.context_id = mmUVD_CONTEXT_ID_INTERNAL_OFFSET;
144 adev->vcn.internal.ib_vmid = mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET;
145 adev->vcn.internal.ib_bar_low = mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET;
146 adev->vcn.internal.ib_bar_high = mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET;
147 adev->vcn.internal.ib_size = mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET;
148 adev->vcn.internal.gp_scratch8 = mmUVD_GP_SCRATCH8_INTERNAL_OFFSET;
149
150 adev->vcn.internal.scratch9 = mmUVD_SCRATCH9_INTERNAL_OFFSET;
151 adev->vcn.inst->external.scratch9 = SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9);
152 adev->vcn.internal.data0 = mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET;
153 adev->vcn.inst->external.data0 = SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0);
154 adev->vcn.internal.data1 = mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET;
155 adev->vcn.inst->external.data1 = SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1);
156 adev->vcn.internal.cmd = mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET;
157 adev->vcn.inst->external.cmd = SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD);
158 adev->vcn.internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET;
159 adev->vcn.inst->external.nop = SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP);
160
161 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
162 ring = &adev->vcn.inst->ring_enc[i];
163 ring->use_doorbell = true;
164 if (!amdgpu_sriov_vf(adev))
165 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + i;
166 else
167 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1 + i;
168 sprintf(ring->name, "vcn_enc%d", i);
169 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
170 AMDGPU_RING_PRIO_DEFAULT, NULL);
171 if (r)
172 return r;
173 }
174
175 adev->vcn.pause_dpg_mode = vcn_v2_0_pause_dpg_mode;
176
177 r = amdgpu_virt_alloc_mm_table(adev);
178 if (r)
179 return r;
180
181 fw_shared = adev->vcn.inst->fw_shared_cpu_addr;
182 fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_VCN_MULTI_QUEUE_FLAG);
183 return 0;
184}
185
186/**
187 * vcn_v2_0_sw_fini - sw fini for VCN block
188 *
189 * @handle: amdgpu_device pointer
190 *
191 * VCN suspend and free up sw allocation
192 */
193static int vcn_v2_0_sw_fini(void *handle)
194{
195 int r;
196 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
197 volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared_cpu_addr;
198
199 fw_shared->present_flag_0 = 0;
200
201 amdgpu_virt_free_mm_table(adev);
202
203 r = amdgpu_vcn_suspend(adev);
204 if (r)
205 return r;
206
207 r = amdgpu_vcn_sw_fini(adev);
208
209 return r;
210}
211
212/**
213 * vcn_v2_0_hw_init - start and test VCN block
214 *
215 * @handle: amdgpu_device pointer
216 *
217 * Initialize the hardware, boot up the VCPU and do some testing
218 */
219static int vcn_v2_0_hw_init(void *handle)
220{
221 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
222 struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
223 int i, r;
224
225 adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
226 ring->doorbell_index, 0);
227
228 if (amdgpu_sriov_vf(adev))
229 vcn_v2_0_start_sriov(adev);
230
231 r = amdgpu_ring_test_helper(ring);
232 if (r)
233 goto done;
234
235 //Disable vcn decode for sriov
236 if (amdgpu_sriov_vf(adev))
237 ring->sched.ready = false;
238
239 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
240 ring = &adev->vcn.inst->ring_enc[i];
241 r = amdgpu_ring_test_helper(ring);
242 if (r)
243 goto done;
244 }
245
246done:
247 if (!r)
248 DRM_INFO("VCN decode and encode initialized successfully(under %s).\n",
249 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)?"DPG Mode":"SPG Mode");
250
251 return r;
252}
253
254/**
255 * vcn_v2_0_hw_fini - stop the hardware block
256 *
257 * @handle: amdgpu_device pointer
258 *
259 * Stop the VCN block, mark ring as not ready any more
260 */
261static int vcn_v2_0_hw_fini(void *handle)
262{
263 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
264
265 cancel_delayed_work_sync(&adev->vcn.idle_work);
266
267 if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
268 (adev->vcn.cur_state != AMD_PG_STATE_GATE &&
269 RREG32_SOC15(VCN, 0, mmUVD_STATUS)))
270 vcn_v2_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
271
272 return 0;
273}
274
275/**
276 * vcn_v2_0_suspend - suspend VCN block
277 *
278 * @handle: amdgpu_device pointer
279 *
280 * HW fini and suspend VCN block
281 */
282static int vcn_v2_0_suspend(void *handle)
283{
284 int r;
285 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
286
287 r = vcn_v2_0_hw_fini(adev);
288 if (r)
289 return r;
290
291 r = amdgpu_vcn_suspend(adev);
292
293 return r;
294}
295
296/**
297 * vcn_v2_0_resume - resume VCN block
298 *
299 * @handle: amdgpu_device pointer
300 *
301 * Resume firmware and hw init VCN block
302 */
303static int vcn_v2_0_resume(void *handle)
304{
305 int r;
306 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
307
308 r = amdgpu_vcn_resume(adev);
309 if (r)
310 return r;
311
312 r = vcn_v2_0_hw_init(adev);
313
314 return r;
315}
316
317/**
318 * vcn_v2_0_mc_resume - memory controller programming
319 *
320 * @adev: amdgpu_device pointer
321 *
322 * Let the VCN memory controller know it's offsets
323 */
324static void vcn_v2_0_mc_resume(struct amdgpu_device *adev)
325{
326 uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
327 uint32_t offset;
328
329 if (amdgpu_sriov_vf(adev))
330 return;
331
332 /* cache window 0: fw */
333 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
334 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
335 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo));
336 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
337 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi));
338 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, 0);
339 offset = 0;
340 } else {
341 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
342 lower_32_bits(adev->vcn.inst->gpu_addr));
343 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
344 upper_32_bits(adev->vcn.inst->gpu_addr));
345 offset = size;
346 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0,
347 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
348 }
349
350 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size);
351
352 /* cache window 1: stack */
353 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
354 lower_32_bits(adev->vcn.inst->gpu_addr + offset));
355 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
356 upper_32_bits(adev->vcn.inst->gpu_addr + offset));
357 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0);
358 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
359
360 /* cache window 2: context */
361 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
362 lower_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
363 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
364 upper_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
365 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0);
366 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
367
368 /* non-cache window */
369 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW,
370 lower_32_bits(adev->vcn.inst->fw_shared_gpu_addr));
371 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH,
372 upper_32_bits(adev->vcn.inst->fw_shared_gpu_addr));
373 WREG32_SOC15(UVD, 0, mmUVD_VCPU_NONCACHE_OFFSET0, 0);
374 WREG32_SOC15(UVD, 0, mmUVD_VCPU_NONCACHE_SIZE0,
375 AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)));
376
377 WREG32_SOC15(UVD, 0, mmUVD_GFX10_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
378}
379
380static void vcn_v2_0_mc_resume_dpg_mode(struct amdgpu_device *adev, bool indirect)
381{
382 uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
383 uint32_t offset;
384
385 /* cache window 0: fw */
386 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
387 if (!indirect) {
388 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
389 UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
390 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo), 0, indirect);
391 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
392 UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
393 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi), 0, indirect);
394 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
395 UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
396 } else {
397 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
398 UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect);
399 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
400 UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect);
401 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
402 UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
403 }
404 offset = 0;
405 } else {
406 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
407 UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
408 lower_32_bits(adev->vcn.inst->gpu_addr), 0, indirect);
409 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
410 UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
411 upper_32_bits(adev->vcn.inst->gpu_addr), 0, indirect);
412 offset = size;
413 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
414 UVD, 0, mmUVD_VCPU_CACHE_OFFSET0),
415 AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect);
416 }
417
418 if (!indirect)
419 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
420 UVD, 0, mmUVD_VCPU_CACHE_SIZE0), size, 0, indirect);
421 else
422 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
423 UVD, 0, mmUVD_VCPU_CACHE_SIZE0), 0, 0, indirect);
424
425 /* cache window 1: stack */
426 if (!indirect) {
427 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
428 UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
429 lower_32_bits(adev->vcn.inst->gpu_addr + offset), 0, indirect);
430 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
431 UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
432 upper_32_bits(adev->vcn.inst->gpu_addr + offset), 0, indirect);
433 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
434 UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
435 } else {
436 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
437 UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect);
438 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
439 UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect);
440 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
441 UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
442 }
443 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
444 UVD, 0, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect);
445
446 /* cache window 2: context */
447 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
448 UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
449 lower_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
450 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
451 UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
452 upper_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
453 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
454 UVD, 0, mmUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect);
455 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
456 UVD, 0, mmUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect);
457
458 /* non-cache window */
459 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
460 UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
461 lower_32_bits(adev->vcn.inst->fw_shared_gpu_addr), 0, indirect);
462 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
463 UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
464 upper_32_bits(adev->vcn.inst->fw_shared_gpu_addr), 0, indirect);
465 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
466 UVD, 0, mmUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
467 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
468 UVD, 0, mmUVD_VCPU_NONCACHE_SIZE0),
469 AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)), 0, indirect);
470
471 /* VCN global tiling registers */
472 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
473 UVD, 0, mmUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
474}
475
476/**
477 * vcn_v2_0_disable_clock_gating - disable VCN clock gating
478 *
479 * @adev: amdgpu_device pointer
480 *
481 * Disable clock gating for VCN block
482 */
483static void vcn_v2_0_disable_clock_gating(struct amdgpu_device *adev)
484{
485 uint32_t data;
486
487 if (amdgpu_sriov_vf(adev))
488 return;
489
490 /* UVD disable CGC */
491 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
492 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
493 data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
494 else
495 data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
496 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
497 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
498 WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
499
500 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_GATE);
501 data &= ~(UVD_CGC_GATE__SYS_MASK
502 | UVD_CGC_GATE__UDEC_MASK
503 | UVD_CGC_GATE__MPEG2_MASK
504 | UVD_CGC_GATE__REGS_MASK
505 | UVD_CGC_GATE__RBC_MASK
506 | UVD_CGC_GATE__LMI_MC_MASK
507 | UVD_CGC_GATE__LMI_UMC_MASK
508 | UVD_CGC_GATE__IDCT_MASK
509 | UVD_CGC_GATE__MPRD_MASK
510 | UVD_CGC_GATE__MPC_MASK
511 | UVD_CGC_GATE__LBSI_MASK
512 | UVD_CGC_GATE__LRBBM_MASK
513 | UVD_CGC_GATE__UDEC_RE_MASK
514 | UVD_CGC_GATE__UDEC_CM_MASK
515 | UVD_CGC_GATE__UDEC_IT_MASK
516 | UVD_CGC_GATE__UDEC_DB_MASK
517 | UVD_CGC_GATE__UDEC_MP_MASK
518 | UVD_CGC_GATE__WCB_MASK
519 | UVD_CGC_GATE__VCPU_MASK
520 | UVD_CGC_GATE__SCPU_MASK);
521 WREG32_SOC15(VCN, 0, mmUVD_CGC_GATE, data);
522
523 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
524 data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK
525 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
526 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
527 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
528 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
529 | UVD_CGC_CTRL__SYS_MODE_MASK
530 | UVD_CGC_CTRL__UDEC_MODE_MASK
531 | UVD_CGC_CTRL__MPEG2_MODE_MASK
532 | UVD_CGC_CTRL__REGS_MODE_MASK
533 | UVD_CGC_CTRL__RBC_MODE_MASK
534 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
535 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
536 | UVD_CGC_CTRL__IDCT_MODE_MASK
537 | UVD_CGC_CTRL__MPRD_MODE_MASK
538 | UVD_CGC_CTRL__MPC_MODE_MASK
539 | UVD_CGC_CTRL__LBSI_MODE_MASK
540 | UVD_CGC_CTRL__LRBBM_MODE_MASK
541 | UVD_CGC_CTRL__WCB_MODE_MASK
542 | UVD_CGC_CTRL__VCPU_MODE_MASK
543 | UVD_CGC_CTRL__SCPU_MODE_MASK);
544 WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
545
546 /* turn on */
547 data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_GATE);
548 data |= (UVD_SUVD_CGC_GATE__SRE_MASK
549 | UVD_SUVD_CGC_GATE__SIT_MASK
550 | UVD_SUVD_CGC_GATE__SMP_MASK
551 | UVD_SUVD_CGC_GATE__SCM_MASK
552 | UVD_SUVD_CGC_GATE__SDB_MASK
553 | UVD_SUVD_CGC_GATE__SRE_H264_MASK
554 | UVD_SUVD_CGC_GATE__SRE_HEVC_MASK
555 | UVD_SUVD_CGC_GATE__SIT_H264_MASK
556 | UVD_SUVD_CGC_GATE__SIT_HEVC_MASK
557 | UVD_SUVD_CGC_GATE__SCM_H264_MASK
558 | UVD_SUVD_CGC_GATE__SCM_HEVC_MASK
559 | UVD_SUVD_CGC_GATE__SDB_H264_MASK
560 | UVD_SUVD_CGC_GATE__SDB_HEVC_MASK
561 | UVD_SUVD_CGC_GATE__SCLR_MASK
562 | UVD_SUVD_CGC_GATE__UVD_SC_MASK
563 | UVD_SUVD_CGC_GATE__ENT_MASK
564 | UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK
565 | UVD_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK
566 | UVD_SUVD_CGC_GATE__SITE_MASK
567 | UVD_SUVD_CGC_GATE__SRE_VP9_MASK
568 | UVD_SUVD_CGC_GATE__SCM_VP9_MASK
569 | UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK
570 | UVD_SUVD_CGC_GATE__SDB_VP9_MASK
571 | UVD_SUVD_CGC_GATE__IME_HEVC_MASK);
572 WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_GATE, data);
573
574 data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL);
575 data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
576 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
577 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
578 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
579 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
580 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
581 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
582 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
583 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
584 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
585 WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL, data);
586}
587
588static void vcn_v2_0_clock_gating_dpg_mode(struct amdgpu_device *adev,
589 uint8_t sram_sel, uint8_t indirect)
590{
591 uint32_t reg_data = 0;
592
593 /* enable sw clock gating control */
594 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
595 reg_data = 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
596 else
597 reg_data = 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
598 reg_data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
599 reg_data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
600 reg_data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
601 UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
602 UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
603 UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
604 UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
605 UVD_CGC_CTRL__SYS_MODE_MASK |
606 UVD_CGC_CTRL__UDEC_MODE_MASK |
607 UVD_CGC_CTRL__MPEG2_MODE_MASK |
608 UVD_CGC_CTRL__REGS_MODE_MASK |
609 UVD_CGC_CTRL__RBC_MODE_MASK |
610 UVD_CGC_CTRL__LMI_MC_MODE_MASK |
611 UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
612 UVD_CGC_CTRL__IDCT_MODE_MASK |
613 UVD_CGC_CTRL__MPRD_MODE_MASK |
614 UVD_CGC_CTRL__MPC_MODE_MASK |
615 UVD_CGC_CTRL__LBSI_MODE_MASK |
616 UVD_CGC_CTRL__LRBBM_MODE_MASK |
617 UVD_CGC_CTRL__WCB_MODE_MASK |
618 UVD_CGC_CTRL__VCPU_MODE_MASK |
619 UVD_CGC_CTRL__SCPU_MODE_MASK);
620 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
621 UVD, 0, mmUVD_CGC_CTRL), reg_data, sram_sel, indirect);
622
623 /* turn off clock gating */
624 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
625 UVD, 0, mmUVD_CGC_GATE), 0, sram_sel, indirect);
626
627 /* turn on SUVD clock gating */
628 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
629 UVD, 0, mmUVD_SUVD_CGC_GATE), 1, sram_sel, indirect);
630
631 /* turn on sw mode in UVD_SUVD_CGC_CTRL */
632 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
633 UVD, 0, mmUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect);
634}
635
636/**
637 * vcn_v2_0_enable_clock_gating - enable VCN clock gating
638 *
639 * @adev: amdgpu_device pointer
640 *
641 * Enable clock gating for VCN block
642 */
643static void vcn_v2_0_enable_clock_gating(struct amdgpu_device *adev)
644{
645 uint32_t data = 0;
646
647 if (amdgpu_sriov_vf(adev))
648 return;
649
650 /* enable UVD CGC */
651 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
652 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
653 data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
654 else
655 data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
656 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
657 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
658 WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
659
660 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
661 data |= (UVD_CGC_CTRL__UDEC_RE_MODE_MASK
662 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
663 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
664 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
665 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
666 | UVD_CGC_CTRL__SYS_MODE_MASK
667 | UVD_CGC_CTRL__UDEC_MODE_MASK
668 | UVD_CGC_CTRL__MPEG2_MODE_MASK
669 | UVD_CGC_CTRL__REGS_MODE_MASK
670 | UVD_CGC_CTRL__RBC_MODE_MASK
671 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
672 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
673 | UVD_CGC_CTRL__IDCT_MODE_MASK
674 | UVD_CGC_CTRL__MPRD_MODE_MASK
675 | UVD_CGC_CTRL__MPC_MODE_MASK
676 | UVD_CGC_CTRL__LBSI_MODE_MASK
677 | UVD_CGC_CTRL__LRBBM_MODE_MASK
678 | UVD_CGC_CTRL__WCB_MODE_MASK
679 | UVD_CGC_CTRL__VCPU_MODE_MASK
680 | UVD_CGC_CTRL__SCPU_MODE_MASK);
681 WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
682
683 data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL);
684 data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
685 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
686 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
687 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
688 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
689 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
690 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
691 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
692 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
693 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
694 WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL, data);
695}
696
697static void vcn_v2_0_disable_static_power_gating(struct amdgpu_device *adev)
698{
699 uint32_t data = 0;
700
701 if (amdgpu_sriov_vf(adev))
702 return;
703
704 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
705 data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
706 | 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
707 | 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
708 | 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
709 | 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
710 | 2 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT
711 | 2 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT
712 | 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
713 | 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
714 | 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT);
715
716 WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data);
717 SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS,
718 UVD_PGFSM_STATUS__UVDM_UVDU_PWR_ON_2_0, 0xFFFFF);
719 } else {
720 data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
721 | 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
722 | 1 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
723 | 1 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
724 | 1 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
725 | 1 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT
726 | 1 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT
727 | 1 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
728 | 1 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
729 | 1 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT);
730 WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data);
731 SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, 0, 0xFFFFF);
732 }
733
734 /* polling UVD_PGFSM_STATUS to confirm UVDM_PWR_STATUS,
735 * UVDU_PWR_STATUS are 0 (power on) */
736
737 data = RREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS);
738 data &= ~0x103;
739 if (adev->pg_flags & AMD_PG_SUPPORT_VCN)
740 data |= UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON |
741 UVD_POWER_STATUS__UVD_PG_EN_MASK;
742
743 WREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS, data);
744}
745
746static void vcn_v2_0_enable_static_power_gating(struct amdgpu_device *adev)
747{
748 uint32_t data = 0;
749
750 if (amdgpu_sriov_vf(adev))
751 return;
752
753 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
754 /* Before power off, this indicator has to be turned on */
755 data = RREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS);
756 data &= ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK;
757 data |= UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF;
758 WREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS, data);
759
760
761 data = (2 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
762 | 2 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
763 | 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
764 | 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
765 | 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
766 | 2 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT
767 | 2 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT
768 | 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
769 | 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
770 | 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT);
771
772 WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data);
773
774 data = (2 << UVD_PGFSM_STATUS__UVDM_PWR_STATUS__SHIFT
775 | 2 << UVD_PGFSM_STATUS__UVDU_PWR_STATUS__SHIFT
776 | 2 << UVD_PGFSM_STATUS__UVDF_PWR_STATUS__SHIFT
777 | 2 << UVD_PGFSM_STATUS__UVDC_PWR_STATUS__SHIFT
778 | 2 << UVD_PGFSM_STATUS__UVDB_PWR_STATUS__SHIFT
779 | 2 << UVD_PGFSM_STATUS__UVDIL_PWR_STATUS__SHIFT
780 | 2 << UVD_PGFSM_STATUS__UVDIR_PWR_STATUS__SHIFT
781 | 2 << UVD_PGFSM_STATUS__UVDTD_PWR_STATUS__SHIFT
782 | 2 << UVD_PGFSM_STATUS__UVDTE_PWR_STATUS__SHIFT
783 | 2 << UVD_PGFSM_STATUS__UVDE_PWR_STATUS__SHIFT);
784 SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, data, 0xFFFFF);
785 }
786}
787
788static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect)
789{
790 volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared_cpu_addr;
791 struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
792 uint32_t rb_bufsz, tmp;
793
794 vcn_v2_0_enable_static_power_gating(adev);
795
796 /* enable dynamic power gating mode */
797 tmp = RREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS);
798 tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK;
799 tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK;
800 WREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS, tmp);
801
802 if (indirect)
803 adev->vcn.inst->dpg_sram_curr_addr = (uint32_t *)adev->vcn.inst->dpg_sram_cpu_addr;
804
805 /* enable clock gating */
806 vcn_v2_0_clock_gating_dpg_mode(adev, 0, indirect);
807
808 /* enable VCPU clock */
809 tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
810 tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
811 tmp |= UVD_VCPU_CNTL__MIF_WR_LOW_THRESHOLD_BP_MASK;
812 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
813 UVD, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect);
814
815 /* disable master interupt */
816 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
817 UVD, 0, mmUVD_MASTINT_EN), 0, 0, indirect);
818
819 /* setup mmUVD_LMI_CTRL */
820 tmp = (UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
821 UVD_LMI_CTRL__REQ_MODE_MASK |
822 UVD_LMI_CTRL__CRC_RESET_MASK |
823 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
824 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
825 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
826 (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
827 0x00100000L);
828 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
829 UVD, 0, mmUVD_LMI_CTRL), tmp, 0, indirect);
830
831 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
832 UVD, 0, mmUVD_MPC_CNTL),
833 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0, indirect);
834
835 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
836 UVD, 0, mmUVD_MPC_SET_MUXA0),
837 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
838 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
839 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
840 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0, indirect);
841
842 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
843 UVD, 0, mmUVD_MPC_SET_MUXB0),
844 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
845 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
846 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
847 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0, indirect);
848
849 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
850 UVD, 0, mmUVD_MPC_SET_MUX),
851 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
852 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
853 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect);
854
855 vcn_v2_0_mc_resume_dpg_mode(adev, indirect);
856
857 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
858 UVD, 0, mmUVD_REG_XX_MASK), 0x10, 0, indirect);
859 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
860 UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK), 0x3, 0, indirect);
861
862 /* release VCPU reset to boot */
863 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
864 UVD, 0, mmUVD_SOFT_RESET), 0, 0, indirect);
865
866 /* enable LMI MC and UMC channels */
867 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
868 UVD, 0, mmUVD_LMI_CTRL2),
869 0x1F << UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM__SHIFT, 0, indirect);
870
871 /* enable master interrupt */
872 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
873 UVD, 0, mmUVD_MASTINT_EN),
874 UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
875
876 if (indirect)
877 psp_update_vcn_sram(adev, 0, adev->vcn.inst->dpg_sram_gpu_addr,
878 (uint32_t)((uintptr_t)adev->vcn.inst->dpg_sram_curr_addr -
879 (uintptr_t)adev->vcn.inst->dpg_sram_cpu_addr));
880
881 /* force RBC into idle state */
882 rb_bufsz = order_base_2(ring->ring_size);
883 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
884 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
885 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
886 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
887 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
888 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp);
889
890 /* Stall DPG before WPTR/RPTR reset */
891 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS),
892 UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
893 ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
894 fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
895
896 /* set the write pointer delay */
897 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR_CNTL, 0);
898
899 /* set the wb address */
900 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR_ADDR,
901 (upper_32_bits(ring->gpu_addr) >> 2));
902
903 /* program the RB_BASE for ring buffer */
904 WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
905 lower_32_bits(ring->gpu_addr));
906 WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
907 upper_32_bits(ring->gpu_addr));
908
909 /* Initialize the ring buffer's read and write pointers */
910 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR, 0);
911
912 WREG32_SOC15(UVD, 0, mmUVD_SCRATCH2, 0);
913
914 ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
915 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
916 lower_32_bits(ring->wptr));
917
918 fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET;
919 /* Unstall DPG */
920 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS),
921 0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
922 return 0;
923}
924
925static int vcn_v2_0_start(struct amdgpu_device *adev)
926{
927 volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared_cpu_addr;
928 struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
929 uint32_t rb_bufsz, tmp;
930 uint32_t lmi_swap_cntl;
931 int i, j, r;
932
933 if (adev->pm.dpm_enabled)
934 amdgpu_dpm_enable_uvd(adev, true);
935
936 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
937 return vcn_v2_0_start_dpg_mode(adev, adev->vcn.indirect_sram);
938
939 vcn_v2_0_disable_static_power_gating(adev);
940
941 /* set uvd status busy */
942 tmp = RREG32_SOC15(UVD, 0, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY;
943 WREG32_SOC15(UVD, 0, mmUVD_STATUS, tmp);
944
945 /*SW clock gating */
946 vcn_v2_0_disable_clock_gating(adev);
947
948 /* enable VCPU clock */
949 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL),
950 UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
951
952 /* disable master interrupt */
953 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN), 0,
954 ~UVD_MASTINT_EN__VCPU_EN_MASK);
955
956 /* setup mmUVD_LMI_CTRL */
957 tmp = RREG32_SOC15(UVD, 0, mmUVD_LMI_CTRL);
958 WREG32_SOC15(UVD, 0, mmUVD_LMI_CTRL, tmp |
959 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
960 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
961 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
962 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
963
964 /* setup mmUVD_MPC_CNTL */
965 tmp = RREG32_SOC15(UVD, 0, mmUVD_MPC_CNTL);
966 tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
967 tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
968 WREG32_SOC15(VCN, 0, mmUVD_MPC_CNTL, tmp);
969
970 /* setup UVD_MPC_SET_MUXA0 */
971 WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXA0,
972 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
973 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
974 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
975 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
976
977 /* setup UVD_MPC_SET_MUXB0 */
978 WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXB0,
979 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
980 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
981 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
982 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
983
984 /* setup mmUVD_MPC_SET_MUX */
985 WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUX,
986 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
987 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
988 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
989
990 vcn_v2_0_mc_resume(adev);
991
992 /* release VCPU reset to boot */
993 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0,
994 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
995
996 /* enable LMI MC and UMC channels */
997 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), 0,
998 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
999
1000 tmp = RREG32_SOC15(VCN, 0, mmUVD_SOFT_RESET);
1001 tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
1002 tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
1003 WREG32_SOC15(VCN, 0, mmUVD_SOFT_RESET, tmp);
1004
1005 /* disable byte swapping */
1006 lmi_swap_cntl = 0;
1007#ifdef __BIG_ENDIAN
1008 /* swap (8 in 32) RB and IB */
1009 lmi_swap_cntl = 0xa;
1010#endif
1011 WREG32_SOC15(UVD, 0, mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
1012
1013 for (i = 0; i < 10; ++i) {
1014 uint32_t status;
1015
1016 for (j = 0; j < 100; ++j) {
1017 status = RREG32_SOC15(UVD, 0, mmUVD_STATUS);
1018 if (status & 2)
1019 break;
1020 mdelay(10);
1021 }
1022 r = 0;
1023 if (status & 2)
1024 break;
1025
1026 DRM_ERROR("VCN decode not responding, trying to reset the VCPU!!!\n");
1027 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
1028 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
1029 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1030 mdelay(10);
1031 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0,
1032 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1033 mdelay(10);
1034 r = -1;
1035 }
1036
1037 if (r) {
1038 DRM_ERROR("VCN decode not responding, giving up!!!\n");
1039 return r;
1040 }
1041
1042 /* enable master interrupt */
1043 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN),
1044 UVD_MASTINT_EN__VCPU_EN_MASK,
1045 ~UVD_MASTINT_EN__VCPU_EN_MASK);
1046
1047 /* clear the busy bit of VCN_STATUS */
1048 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS), 0,
1049 ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
1050
1051 WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_VMID, 0);
1052
1053 /* force RBC into idle state */
1054 rb_bufsz = order_base_2(ring->ring_size);
1055 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
1056 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1057 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1058 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1059 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1060 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp);
1061
1062 fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
1063 /* program the RB_BASE for ring buffer */
1064 WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
1065 lower_32_bits(ring->gpu_addr));
1066 WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
1067 upper_32_bits(ring->gpu_addr));
1068
1069 /* Initialize the ring buffer's read and write pointers */
1070 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR, 0);
1071
1072 ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
1073 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
1074 lower_32_bits(ring->wptr));
1075 fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET;
1076
1077 fw_shared->multi_queue.encode_generalpurpose_queue_mode |= FW_QUEUE_RING_RESET;
1078 ring = &adev->vcn.inst->ring_enc[0];
1079 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1080 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1081 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr);
1082 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1083 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4);
1084 fw_shared->multi_queue.encode_generalpurpose_queue_mode &= ~FW_QUEUE_RING_RESET;
1085
1086 fw_shared->multi_queue.encode_lowlatency_queue_mode |= FW_QUEUE_RING_RESET;
1087 ring = &adev->vcn.inst->ring_enc[1];
1088 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1089 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1090 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1091 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1092 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
1093 fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET;
1094
1095 return 0;
1096}
1097
1098static int vcn_v2_0_stop_dpg_mode(struct amdgpu_device *adev)
1099{
1100 uint32_t tmp;
1101
1102 /* Wait for power status to be 1 */
1103 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, 1,
1104 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1105
1106 /* wait for read ptr to be equal to write ptr */
1107 tmp = RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR);
1108 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF);
1109
1110 tmp = RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2);
1111 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF);
1112
1113 tmp = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF;
1114 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF);
1115
1116 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, 1,
1117 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1118
1119 /* disable dynamic power gating mode */
1120 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), 0,
1121 ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
1122
1123 return 0;
1124}
1125
1126static int vcn_v2_0_stop(struct amdgpu_device *adev)
1127{
1128 uint32_t tmp;
1129 int r;
1130
1131 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
1132 r = vcn_v2_0_stop_dpg_mode(adev);
1133 if (r)
1134 return r;
1135 goto power_off;
1136 }
1137
1138 /* wait for uvd idle */
1139 r = SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7);
1140 if (r)
1141 return r;
1142
1143 tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
1144 UVD_LMI_STATUS__READ_CLEAN_MASK |
1145 UVD_LMI_STATUS__WRITE_CLEAN_MASK |
1146 UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
1147 r = SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_LMI_STATUS, tmp, tmp);
1148 if (r)
1149 return r;
1150
1151 /* stall UMC channel */
1152 tmp = RREG32_SOC15(VCN, 0, mmUVD_LMI_CTRL2);
1153 tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
1154 WREG32_SOC15(VCN, 0, mmUVD_LMI_CTRL2, tmp);
1155
1156 tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK|
1157 UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
1158 r = SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_LMI_STATUS, tmp, tmp);
1159 if (r)
1160 return r;
1161
1162 /* disable VCPU clock */
1163 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL), 0,
1164 ~(UVD_VCPU_CNTL__CLK_EN_MASK));
1165
1166 /* reset LMI UMC */
1167 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
1168 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK,
1169 ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
1170
1171 /* reset LMI */
1172 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
1173 UVD_SOFT_RESET__LMI_SOFT_RESET_MASK,
1174 ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK);
1175
1176 /* reset VCPU */
1177 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
1178 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
1179 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1180
1181 /* clear status */
1182 WREG32_SOC15(VCN, 0, mmUVD_STATUS, 0);
1183
1184 vcn_v2_0_enable_clock_gating(adev);
1185 vcn_v2_0_enable_static_power_gating(adev);
1186
1187power_off:
1188 if (adev->pm.dpm_enabled)
1189 amdgpu_dpm_enable_uvd(adev, false);
1190
1191 return 0;
1192}
1193
1194static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev,
1195 int inst_idx, struct dpg_pause_state *new_state)
1196{
1197 struct amdgpu_ring *ring;
1198 uint32_t reg_data = 0;
1199 int ret_code;
1200
1201 /* pause/unpause if state is changed */
1202 if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) {
1203 DRM_DEBUG("dpg pause state changed %d -> %d",
1204 adev->vcn.inst[inst_idx].pause_state.fw_based, new_state->fw_based);
1205 reg_data = RREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE) &
1206 (~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1207
1208 if (new_state->fw_based == VCN_DPG_STATE__PAUSE) {
1209 ret_code = SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, 0x1,
1210 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1211
1212 if (!ret_code) {
1213 volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared_cpu_addr;
1214 /* pause DPG */
1215 reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1216 WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
1217
1218 /* wait for ACK */
1219 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_DPG_PAUSE,
1220 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
1221 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1222
1223 /* Stall DPG before WPTR/RPTR reset */
1224 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS),
1225 UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
1226 ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
1227 /* Restore */
1228 fw_shared->multi_queue.encode_generalpurpose_queue_mode |= FW_QUEUE_RING_RESET;
1229 ring = &adev->vcn.inst->ring_enc[0];
1230 ring->wptr = 0;
1231 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr);
1232 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1233 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4);
1234 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1235 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1236 fw_shared->multi_queue.encode_generalpurpose_queue_mode &= ~FW_QUEUE_RING_RESET;
1237
1238 fw_shared->multi_queue.encode_lowlatency_queue_mode |= FW_QUEUE_RING_RESET;
1239 ring = &adev->vcn.inst->ring_enc[1];
1240 ring->wptr = 0;
1241 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1242 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1243 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
1244 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1245 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1246 fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET;
1247
1248 fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
1249 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
1250 RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF);
1251 fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET;
1252 /* Unstall DPG */
1253 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS),
1254 0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
1255
1256 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
1257 UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON,
1258 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1259 }
1260 } else {
1261 /* unpause dpg, no need to wait */
1262 reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1263 WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
1264 }
1265 adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based;
1266 }
1267
1268 return 0;
1269}
1270
1271static bool vcn_v2_0_is_idle(void *handle)
1272{
1273 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1274
1275 return (RREG32_SOC15(VCN, 0, mmUVD_STATUS) == UVD_STATUS__IDLE);
1276}
1277
1278static int vcn_v2_0_wait_for_idle(void *handle)
1279{
1280 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1281 int ret;
1282
1283 ret = SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_STATUS, UVD_STATUS__IDLE,
1284 UVD_STATUS__IDLE);
1285
1286 return ret;
1287}
1288
1289static int vcn_v2_0_set_clockgating_state(void *handle,
1290 enum amd_clockgating_state state)
1291{
1292 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1293 bool enable = (state == AMD_CG_STATE_GATE);
1294
1295 if (amdgpu_sriov_vf(adev))
1296 return 0;
1297
1298 if (enable) {
1299 /* wait for STATUS to clear */
1300 if (!vcn_v2_0_is_idle(handle))
1301 return -EBUSY;
1302 vcn_v2_0_enable_clock_gating(adev);
1303 } else {
1304 /* disable HW gating and enable Sw gating */
1305 vcn_v2_0_disable_clock_gating(adev);
1306 }
1307 return 0;
1308}
1309
1310/**
1311 * vcn_v2_0_dec_ring_get_rptr - get read pointer
1312 *
1313 * @ring: amdgpu_ring pointer
1314 *
1315 * Returns the current hardware read pointer
1316 */
1317static uint64_t vcn_v2_0_dec_ring_get_rptr(struct amdgpu_ring *ring)
1318{
1319 struct amdgpu_device *adev = ring->adev;
1320
1321 return RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
1322}
1323
1324/**
1325 * vcn_v2_0_dec_ring_get_wptr - get write pointer
1326 *
1327 * @ring: amdgpu_ring pointer
1328 *
1329 * Returns the current hardware write pointer
1330 */
1331static uint64_t vcn_v2_0_dec_ring_get_wptr(struct amdgpu_ring *ring)
1332{
1333 struct amdgpu_device *adev = ring->adev;
1334
1335 if (ring->use_doorbell)
1336 return adev->wb.wb[ring->wptr_offs];
1337 else
1338 return RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR);
1339}
1340
1341/**
1342 * vcn_v2_0_dec_ring_set_wptr - set write pointer
1343 *
1344 * @ring: amdgpu_ring pointer
1345 *
1346 * Commits the write pointer to the hardware
1347 */
1348static void vcn_v2_0_dec_ring_set_wptr(struct amdgpu_ring *ring)
1349{
1350 struct amdgpu_device *adev = ring->adev;
1351
1352 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
1353 WREG32_SOC15(UVD, 0, mmUVD_SCRATCH2,
1354 lower_32_bits(ring->wptr) | 0x80000000);
1355
1356 if (ring->use_doorbell) {
1357 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
1358 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1359 } else {
1360 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
1361 }
1362}
1363
1364/**
1365 * vcn_v2_0_dec_ring_insert_start - insert a start command
1366 *
1367 * @ring: amdgpu_ring pointer
1368 *
1369 * Write a start command to the ring.
1370 */
1371void vcn_v2_0_dec_ring_insert_start(struct amdgpu_ring *ring)
1372{
1373 struct amdgpu_device *adev = ring->adev;
1374
1375 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data0, 0));
1376 amdgpu_ring_write(ring, 0);
1377 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0));
1378 amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_PACKET_START << 1));
1379}
1380
1381/**
1382 * vcn_v2_0_dec_ring_insert_end - insert a end command
1383 *
1384 * @ring: amdgpu_ring pointer
1385 *
1386 * Write a end command to the ring.
1387 */
1388void vcn_v2_0_dec_ring_insert_end(struct amdgpu_ring *ring)
1389{
1390 struct amdgpu_device *adev = ring->adev;
1391
1392 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0));
1393 amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_PACKET_END << 1));
1394}
1395
1396/**
1397 * vcn_v2_0_dec_ring_insert_nop - insert a nop command
1398 *
1399 * @ring: amdgpu_ring pointer
1400 * @count: the number of NOP packets to insert
1401 *
1402 * Write a nop command to the ring.
1403 */
1404void vcn_v2_0_dec_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
1405{
1406 struct amdgpu_device *adev = ring->adev;
1407 int i;
1408
1409 WARN_ON(ring->wptr % 2 || count % 2);
1410
1411 for (i = 0; i < count / 2; i++) {
1412 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.nop, 0));
1413 amdgpu_ring_write(ring, 0);
1414 }
1415}
1416
1417/**
1418 * vcn_v2_0_dec_ring_emit_fence - emit an fence & trap command
1419 *
1420 * @ring: amdgpu_ring pointer
1421 * @addr: address
1422 * @seq: sequence number
1423 * @flags: fence related flags
1424 *
1425 * Write a fence and a trap command to the ring.
1426 */
1427void vcn_v2_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
1428 unsigned flags)
1429{
1430 struct amdgpu_device *adev = ring->adev;
1431
1432 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1433 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.context_id, 0));
1434 amdgpu_ring_write(ring, seq);
1435
1436 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data0, 0));
1437 amdgpu_ring_write(ring, addr & 0xffffffff);
1438
1439 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data1, 0));
1440 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
1441
1442 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0));
1443 amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_FENCE << 1));
1444
1445 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data0, 0));
1446 amdgpu_ring_write(ring, 0);
1447
1448 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data1, 0));
1449 amdgpu_ring_write(ring, 0);
1450
1451 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0));
1452
1453 amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_TRAP << 1));
1454}
1455
1456/**
1457 * vcn_v2_0_dec_ring_emit_ib - execute indirect buffer
1458 *
1459 * @ring: amdgpu_ring pointer
1460 * @job: job to retrieve vmid from
1461 * @ib: indirect buffer to execute
1462 * @flags: unused
1463 *
1464 * Write ring commands to execute the indirect buffer
1465 */
1466void vcn_v2_0_dec_ring_emit_ib(struct amdgpu_ring *ring,
1467 struct amdgpu_job *job,
1468 struct amdgpu_ib *ib,
1469 uint32_t flags)
1470{
1471 struct amdgpu_device *adev = ring->adev;
1472 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1473
1474 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.ib_vmid, 0));
1475 amdgpu_ring_write(ring, vmid);
1476
1477 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.ib_bar_low, 0));
1478 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1479 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.ib_bar_high, 0));
1480 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1481 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.ib_size, 0));
1482 amdgpu_ring_write(ring, ib->length_dw);
1483}
1484
1485void vcn_v2_0_dec_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
1486 uint32_t val, uint32_t mask)
1487{
1488 struct amdgpu_device *adev = ring->adev;
1489
1490 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data0, 0));
1491 amdgpu_ring_write(ring, reg << 2);
1492
1493 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data1, 0));
1494 amdgpu_ring_write(ring, val);
1495
1496 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.gp_scratch8, 0));
1497 amdgpu_ring_write(ring, mask);
1498
1499 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0));
1500
1501 amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_REG_READ_COND_WAIT << 1));
1502}
1503
1504void vcn_v2_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
1505 unsigned vmid, uint64_t pd_addr)
1506{
1507 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
1508 uint32_t data0, data1, mask;
1509
1510 pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1511
1512 /* wait for register write */
1513 data0 = hub->ctx0_ptb_addr_lo32 + vmid * hub->ctx_addr_distance;
1514 data1 = lower_32_bits(pd_addr);
1515 mask = 0xffffffff;
1516 vcn_v2_0_dec_ring_emit_reg_wait(ring, data0, data1, mask);
1517}
1518
1519void vcn_v2_0_dec_ring_emit_wreg(struct amdgpu_ring *ring,
1520 uint32_t reg, uint32_t val)
1521{
1522 struct amdgpu_device *adev = ring->adev;
1523
1524 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data0, 0));
1525 amdgpu_ring_write(ring, reg << 2);
1526
1527 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data1, 0));
1528 amdgpu_ring_write(ring, val);
1529
1530 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0));
1531
1532 amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_WRITE_REG << 1));
1533}
1534
1535/**
1536 * vcn_v2_0_enc_ring_get_rptr - get enc read pointer
1537 *
1538 * @ring: amdgpu_ring pointer
1539 *
1540 * Returns the current hardware enc read pointer
1541 */
1542static uint64_t vcn_v2_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
1543{
1544 struct amdgpu_device *adev = ring->adev;
1545
1546 if (ring == &adev->vcn.inst->ring_enc[0])
1547 return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR);
1548 else
1549 return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2);
1550}
1551
1552 /**
1553 * vcn_v2_0_enc_ring_get_wptr - get enc write pointer
1554 *
1555 * @ring: amdgpu_ring pointer
1556 *
1557 * Returns the current hardware enc write pointer
1558 */
1559static uint64_t vcn_v2_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
1560{
1561 struct amdgpu_device *adev = ring->adev;
1562
1563 if (ring == &adev->vcn.inst->ring_enc[0]) {
1564 if (ring->use_doorbell)
1565 return adev->wb.wb[ring->wptr_offs];
1566 else
1567 return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR);
1568 } else {
1569 if (ring->use_doorbell)
1570 return adev->wb.wb[ring->wptr_offs];
1571 else
1572 return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2);
1573 }
1574}
1575
1576 /**
1577 * vcn_v2_0_enc_ring_set_wptr - set enc write pointer
1578 *
1579 * @ring: amdgpu_ring pointer
1580 *
1581 * Commits the enc write pointer to the hardware
1582 */
1583static void vcn_v2_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
1584{
1585 struct amdgpu_device *adev = ring->adev;
1586
1587 if (ring == &adev->vcn.inst->ring_enc[0]) {
1588 if (ring->use_doorbell) {
1589 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
1590 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1591 } else {
1592 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1593 }
1594 } else {
1595 if (ring->use_doorbell) {
1596 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
1597 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1598 } else {
1599 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1600 }
1601 }
1602}
1603
1604/**
1605 * vcn_v2_0_enc_ring_emit_fence - emit an enc fence & trap command
1606 *
1607 * @ring: amdgpu_ring pointer
1608 * @addr: address
1609 * @seq: sequence number
1610 * @flags: fence related flags
1611 *
1612 * Write enc a fence and a trap command to the ring.
1613 */
1614void vcn_v2_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
1615 u64 seq, unsigned flags)
1616{
1617 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1618
1619 amdgpu_ring_write(ring, VCN_ENC_CMD_FENCE);
1620 amdgpu_ring_write(ring, addr);
1621 amdgpu_ring_write(ring, upper_32_bits(addr));
1622 amdgpu_ring_write(ring, seq);
1623 amdgpu_ring_write(ring, VCN_ENC_CMD_TRAP);
1624}
1625
1626void vcn_v2_0_enc_ring_insert_end(struct amdgpu_ring *ring)
1627{
1628 amdgpu_ring_write(ring, VCN_ENC_CMD_END);
1629}
1630
1631/**
1632 * vcn_v2_0_enc_ring_emit_ib - enc execute indirect buffer
1633 *
1634 * @ring: amdgpu_ring pointer
1635 * @job: job to retrive vmid from
1636 * @ib: indirect buffer to execute
1637 * @flags: unused
1638 *
1639 * Write enc ring commands to execute the indirect buffer
1640 */
1641void vcn_v2_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
1642 struct amdgpu_job *job,
1643 struct amdgpu_ib *ib,
1644 uint32_t flags)
1645{
1646 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1647
1648 amdgpu_ring_write(ring, VCN_ENC_CMD_IB);
1649 amdgpu_ring_write(ring, vmid);
1650 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1651 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1652 amdgpu_ring_write(ring, ib->length_dw);
1653}
1654
1655void vcn_v2_0_enc_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
1656 uint32_t val, uint32_t mask)
1657{
1658 amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WAIT);
1659 amdgpu_ring_write(ring, reg << 2);
1660 amdgpu_ring_write(ring, mask);
1661 amdgpu_ring_write(ring, val);
1662}
1663
1664void vcn_v2_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
1665 unsigned int vmid, uint64_t pd_addr)
1666{
1667 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
1668
1669 pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1670
1671 /* wait for reg writes */
1672 vcn_v2_0_enc_ring_emit_reg_wait(ring, hub->ctx0_ptb_addr_lo32 +
1673 vmid * hub->ctx_addr_distance,
1674 lower_32_bits(pd_addr), 0xffffffff);
1675}
1676
1677void vcn_v2_0_enc_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val)
1678{
1679 amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WRITE);
1680 amdgpu_ring_write(ring, reg << 2);
1681 amdgpu_ring_write(ring, val);
1682}
1683
1684static int vcn_v2_0_set_interrupt_state(struct amdgpu_device *adev,
1685 struct amdgpu_irq_src *source,
1686 unsigned type,
1687 enum amdgpu_interrupt_state state)
1688{
1689 return 0;
1690}
1691
1692static int vcn_v2_0_process_interrupt(struct amdgpu_device *adev,
1693 struct amdgpu_irq_src *source,
1694 struct amdgpu_iv_entry *entry)
1695{
1696 DRM_DEBUG("IH: VCN TRAP\n");
1697
1698 switch (entry->src_id) {
1699 case VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT:
1700 amdgpu_fence_process(&adev->vcn.inst->ring_dec);
1701 break;
1702 case VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE:
1703 amdgpu_fence_process(&adev->vcn.inst->ring_enc[0]);
1704 break;
1705 case VCN_2_0__SRCID__UVD_ENC_LOW_LATENCY:
1706 amdgpu_fence_process(&adev->vcn.inst->ring_enc[1]);
1707 break;
1708 default:
1709 DRM_ERROR("Unhandled interrupt: %d %d\n",
1710 entry->src_id, entry->src_data[0]);
1711 break;
1712 }
1713
1714 return 0;
1715}
1716
1717int vcn_v2_0_dec_ring_test_ring(struct amdgpu_ring *ring)
1718{
1719 struct amdgpu_device *adev = ring->adev;
1720 uint32_t tmp = 0;
1721 unsigned i;
1722 int r;
1723
1724 if (amdgpu_sriov_vf(adev))
1725 return 0;
1726
1727 WREG32(adev->vcn.inst[ring->me].external.scratch9, 0xCAFEDEAD);
1728 r = amdgpu_ring_alloc(ring, 4);
1729 if (r)
1730 return r;
1731 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0));
1732 amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_PACKET_START << 1));
1733 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.scratch9, 0));
1734 amdgpu_ring_write(ring, 0xDEADBEEF);
1735 amdgpu_ring_commit(ring);
1736 for (i = 0; i < adev->usec_timeout; i++) {
1737 tmp = RREG32(adev->vcn.inst[ring->me].external.scratch9);
1738 if (tmp == 0xDEADBEEF)
1739 break;
1740 udelay(1);
1741 }
1742
1743 if (i >= adev->usec_timeout)
1744 r = -ETIMEDOUT;
1745
1746 return r;
1747}
1748
1749
1750static int vcn_v2_0_set_powergating_state(void *handle,
1751 enum amd_powergating_state state)
1752{
1753 /* This doesn't actually powergate the VCN block.
1754 * That's done in the dpm code via the SMC. This
1755 * just re-inits the block as necessary. The actual
1756 * gating still happens in the dpm code. We should
1757 * revisit this when there is a cleaner line between
1758 * the smc and the hw blocks
1759 */
1760 int ret;
1761 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1762
1763 if (amdgpu_sriov_vf(adev)) {
1764 adev->vcn.cur_state = AMD_PG_STATE_UNGATE;
1765 return 0;
1766 }
1767
1768 if (state == adev->vcn.cur_state)
1769 return 0;
1770
1771 if (state == AMD_PG_STATE_GATE)
1772 ret = vcn_v2_0_stop(adev);
1773 else
1774 ret = vcn_v2_0_start(adev);
1775
1776 if (!ret)
1777 adev->vcn.cur_state = state;
1778 return ret;
1779}
1780
1781static int vcn_v2_0_start_mmsch(struct amdgpu_device *adev,
1782 struct amdgpu_mm_table *table)
1783{
1784 uint32_t data = 0, loop;
1785 uint64_t addr = table->gpu_addr;
1786 struct mmsch_v2_0_init_header *header;
1787 uint32_t size;
1788 int i;
1789
1790 header = (struct mmsch_v2_0_init_header *)table->cpu_addr;
1791 size = header->header_size + header->vcn_table_size;
1792
1793 /* 1, write to vce_mmsch_vf_ctx_addr_lo/hi register with GPU mc addr
1794 * of memory descriptor location
1795 */
1796 WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_ADDR_LO, lower_32_bits(addr));
1797 WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_ADDR_HI, upper_32_bits(addr));
1798
1799 /* 2, update vmid of descriptor */
1800 data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_VMID);
1801 data &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK;
1802 /* use domain0 for MM scheduler */
1803 data |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT);
1804 WREG32_SOC15(UVD, 0, mmMMSCH_VF_VMID, data);
1805
1806 /* 3, notify mmsch about the size of this descriptor */
1807 WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_SIZE, size);
1808
1809 /* 4, set resp to zero */
1810 WREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP, 0);
1811
1812 adev->vcn.inst->ring_dec.wptr = 0;
1813 adev->vcn.inst->ring_dec.wptr_old = 0;
1814 vcn_v2_0_dec_ring_set_wptr(&adev->vcn.inst->ring_dec);
1815
1816 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
1817 adev->vcn.inst->ring_enc[i].wptr = 0;
1818 adev->vcn.inst->ring_enc[i].wptr_old = 0;
1819 vcn_v2_0_enc_ring_set_wptr(&adev->vcn.inst->ring_enc[i]);
1820 }
1821
1822 /* 5, kick off the initialization and wait until
1823 * VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero
1824 */
1825 WREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_HOST, 0x10000001);
1826
1827 data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP);
1828 loop = 1000;
1829 while ((data & 0x10000002) != 0x10000002) {
1830 udelay(10);
1831 data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP);
1832 loop--;
1833 if (!loop)
1834 break;
1835 }
1836
1837 if (!loop) {
1838 DRM_ERROR("failed to init MMSCH, " \
1839 "mmMMSCH_VF_MAILBOX_RESP = 0x%08x\n", data);
1840 return -EBUSY;
1841 }
1842
1843 return 0;
1844}
1845
1846static int vcn_v2_0_start_sriov(struct amdgpu_device *adev)
1847{
1848 int r;
1849 uint32_t tmp;
1850 struct amdgpu_ring *ring;
1851 uint32_t offset, size;
1852 uint32_t table_size = 0;
1853 struct mmsch_v2_0_cmd_direct_write direct_wt = { {0} };
1854 struct mmsch_v2_0_cmd_direct_read_modify_write direct_rd_mod_wt = { {0} };
1855 struct mmsch_v2_0_cmd_end end = { {0} };
1856 struct mmsch_v2_0_init_header *header;
1857 uint32_t *init_table = adev->virt.mm_table.cpu_addr;
1858 uint8_t i = 0;
1859
1860 header = (struct mmsch_v2_0_init_header *)init_table;
1861 direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE;
1862 direct_rd_mod_wt.cmd_header.command_type =
1863 MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
1864 end.cmd_header.command_type = MMSCH_COMMAND__END;
1865
1866 if (header->vcn_table_offset == 0 && header->vcn_table_size == 0) {
1867 header->version = MMSCH_VERSION;
1868 header->header_size = sizeof(struct mmsch_v2_0_init_header) >> 2;
1869
1870 header->vcn_table_offset = header->header_size;
1871
1872 init_table += header->vcn_table_offset;
1873
1874 size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
1875
1876 MMSCH_V2_0_INSERT_DIRECT_RD_MOD_WT(
1877 SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS),
1878 0xFFFFFFFF, 0x00000004);
1879
1880 /* mc resume*/
1881 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1882 tmp = AMDGPU_UCODE_ID_VCN;
1883 MMSCH_V2_0_INSERT_DIRECT_WT(
1884 SOC15_REG_OFFSET(UVD, i,
1885 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
1886 adev->firmware.ucode[tmp].tmr_mc_addr_lo);
1887 MMSCH_V2_0_INSERT_DIRECT_WT(
1888 SOC15_REG_OFFSET(UVD, i,
1889 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
1890 adev->firmware.ucode[tmp].tmr_mc_addr_hi);
1891 offset = 0;
1892 } else {
1893 MMSCH_V2_0_INSERT_DIRECT_WT(
1894 SOC15_REG_OFFSET(UVD, i,
1895 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
1896 lower_32_bits(adev->vcn.inst->gpu_addr));
1897 MMSCH_V2_0_INSERT_DIRECT_WT(
1898 SOC15_REG_OFFSET(UVD, i,
1899 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
1900 upper_32_bits(adev->vcn.inst->gpu_addr));
1901 offset = size;
1902 }
1903
1904 MMSCH_V2_0_INSERT_DIRECT_WT(
1905 SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET0),
1906 0);
1907 MMSCH_V2_0_INSERT_DIRECT_WT(
1908 SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE0),
1909 size);
1910
1911 MMSCH_V2_0_INSERT_DIRECT_WT(
1912 SOC15_REG_OFFSET(UVD, i,
1913 mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
1914 lower_32_bits(adev->vcn.inst->gpu_addr + offset));
1915 MMSCH_V2_0_INSERT_DIRECT_WT(
1916 SOC15_REG_OFFSET(UVD, i,
1917 mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
1918 upper_32_bits(adev->vcn.inst->gpu_addr + offset));
1919 MMSCH_V2_0_INSERT_DIRECT_WT(
1920 SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET1),
1921 0);
1922 MMSCH_V2_0_INSERT_DIRECT_WT(
1923 SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE1),
1924 AMDGPU_VCN_STACK_SIZE);
1925
1926 MMSCH_V2_0_INSERT_DIRECT_WT(
1927 SOC15_REG_OFFSET(UVD, i,
1928 mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
1929 lower_32_bits(adev->vcn.inst->gpu_addr + offset +
1930 AMDGPU_VCN_STACK_SIZE));
1931 MMSCH_V2_0_INSERT_DIRECT_WT(
1932 SOC15_REG_OFFSET(UVD, i,
1933 mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
1934 upper_32_bits(adev->vcn.inst->gpu_addr + offset +
1935 AMDGPU_VCN_STACK_SIZE));
1936 MMSCH_V2_0_INSERT_DIRECT_WT(
1937 SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET2),
1938 0);
1939 MMSCH_V2_0_INSERT_DIRECT_WT(
1940 SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE2),
1941 AMDGPU_VCN_CONTEXT_SIZE);
1942
1943 for (r = 0; r < adev->vcn.num_enc_rings; ++r) {
1944 ring = &adev->vcn.inst->ring_enc[r];
1945 ring->wptr = 0;
1946 MMSCH_V2_0_INSERT_DIRECT_WT(
1947 SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_LO),
1948 lower_32_bits(ring->gpu_addr));
1949 MMSCH_V2_0_INSERT_DIRECT_WT(
1950 SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_HI),
1951 upper_32_bits(ring->gpu_addr));
1952 MMSCH_V2_0_INSERT_DIRECT_WT(
1953 SOC15_REG_OFFSET(UVD, i, mmUVD_RB_SIZE),
1954 ring->ring_size / 4);
1955 }
1956
1957 ring = &adev->vcn.inst->ring_dec;
1958 ring->wptr = 0;
1959 MMSCH_V2_0_INSERT_DIRECT_WT(
1960 SOC15_REG_OFFSET(UVD, i,
1961 mmUVD_LMI_RBC_RB_64BIT_BAR_LOW),
1962 lower_32_bits(ring->gpu_addr));
1963 MMSCH_V2_0_INSERT_DIRECT_WT(
1964 SOC15_REG_OFFSET(UVD, i,
1965 mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH),
1966 upper_32_bits(ring->gpu_addr));
1967 /* force RBC into idle state */
1968 tmp = order_base_2(ring->ring_size);
1969 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, tmp);
1970 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1971 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1972 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1973 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1974 MMSCH_V2_0_INSERT_DIRECT_WT(
1975 SOC15_REG_OFFSET(UVD, i, mmUVD_RBC_RB_CNTL), tmp);
1976
1977 /* add end packet */
1978 tmp = sizeof(struct mmsch_v2_0_cmd_end);
1979 memcpy((void *)init_table, &end, tmp);
1980 table_size += (tmp / 4);
1981 header->vcn_table_size = table_size;
1982
1983 }
1984 return vcn_v2_0_start_mmsch(adev, &adev->virt.mm_table);
1985}
1986
1987static const struct amd_ip_funcs vcn_v2_0_ip_funcs = {
1988 .name = "vcn_v2_0",
1989 .early_init = vcn_v2_0_early_init,
1990 .late_init = NULL,
1991 .sw_init = vcn_v2_0_sw_init,
1992 .sw_fini = vcn_v2_0_sw_fini,
1993 .hw_init = vcn_v2_0_hw_init,
1994 .hw_fini = vcn_v2_0_hw_fini,
1995 .suspend = vcn_v2_0_suspend,
1996 .resume = vcn_v2_0_resume,
1997 .is_idle = vcn_v2_0_is_idle,
1998 .wait_for_idle = vcn_v2_0_wait_for_idle,
1999 .check_soft_reset = NULL,
2000 .pre_soft_reset = NULL,
2001 .soft_reset = NULL,
2002 .post_soft_reset = NULL,
2003 .set_clockgating_state = vcn_v2_0_set_clockgating_state,
2004 .set_powergating_state = vcn_v2_0_set_powergating_state,
2005};
2006
2007static const struct amdgpu_ring_funcs vcn_v2_0_dec_ring_vm_funcs = {
2008 .type = AMDGPU_RING_TYPE_VCN_DEC,
2009 .align_mask = 0xf,
2010 .vmhub = AMDGPU_MMHUB_0,
2011 .get_rptr = vcn_v2_0_dec_ring_get_rptr,
2012 .get_wptr = vcn_v2_0_dec_ring_get_wptr,
2013 .set_wptr = vcn_v2_0_dec_ring_set_wptr,
2014 .emit_frame_size =
2015 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
2016 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
2017 8 + /* vcn_v2_0_dec_ring_emit_vm_flush */
2018 14 + 14 + /* vcn_v2_0_dec_ring_emit_fence x2 vm fence */
2019 6,
2020 .emit_ib_size = 8, /* vcn_v2_0_dec_ring_emit_ib */
2021 .emit_ib = vcn_v2_0_dec_ring_emit_ib,
2022 .emit_fence = vcn_v2_0_dec_ring_emit_fence,
2023 .emit_vm_flush = vcn_v2_0_dec_ring_emit_vm_flush,
2024 .test_ring = vcn_v2_0_dec_ring_test_ring,
2025 .test_ib = amdgpu_vcn_dec_ring_test_ib,
2026 .insert_nop = vcn_v2_0_dec_ring_insert_nop,
2027 .insert_start = vcn_v2_0_dec_ring_insert_start,
2028 .insert_end = vcn_v2_0_dec_ring_insert_end,
2029 .pad_ib = amdgpu_ring_generic_pad_ib,
2030 .begin_use = amdgpu_vcn_ring_begin_use,
2031 .end_use = amdgpu_vcn_ring_end_use,
2032 .emit_wreg = vcn_v2_0_dec_ring_emit_wreg,
2033 .emit_reg_wait = vcn_v2_0_dec_ring_emit_reg_wait,
2034 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
2035};
2036
2037static const struct amdgpu_ring_funcs vcn_v2_0_enc_ring_vm_funcs = {
2038 .type = AMDGPU_RING_TYPE_VCN_ENC,
2039 .align_mask = 0x3f,
2040 .nop = VCN_ENC_CMD_NO_OP,
2041 .vmhub = AMDGPU_MMHUB_0,
2042 .get_rptr = vcn_v2_0_enc_ring_get_rptr,
2043 .get_wptr = vcn_v2_0_enc_ring_get_wptr,
2044 .set_wptr = vcn_v2_0_enc_ring_set_wptr,
2045 .emit_frame_size =
2046 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
2047 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
2048 4 + /* vcn_v2_0_enc_ring_emit_vm_flush */
2049 5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */
2050 1, /* vcn_v2_0_enc_ring_insert_end */
2051 .emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */
2052 .emit_ib = vcn_v2_0_enc_ring_emit_ib,
2053 .emit_fence = vcn_v2_0_enc_ring_emit_fence,
2054 .emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush,
2055 .test_ring = amdgpu_vcn_enc_ring_test_ring,
2056 .test_ib = amdgpu_vcn_enc_ring_test_ib,
2057 .insert_nop = amdgpu_ring_insert_nop,
2058 .insert_end = vcn_v2_0_enc_ring_insert_end,
2059 .pad_ib = amdgpu_ring_generic_pad_ib,
2060 .begin_use = amdgpu_vcn_ring_begin_use,
2061 .end_use = amdgpu_vcn_ring_end_use,
2062 .emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
2063 .emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
2064 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
2065};
2066
2067static void vcn_v2_0_set_dec_ring_funcs(struct amdgpu_device *adev)
2068{
2069 adev->vcn.inst->ring_dec.funcs = &vcn_v2_0_dec_ring_vm_funcs;
2070 DRM_INFO("VCN decode is enabled in VM mode\n");
2071}
2072
2073static void vcn_v2_0_set_enc_ring_funcs(struct amdgpu_device *adev)
2074{
2075 int i;
2076
2077 for (i = 0; i < adev->vcn.num_enc_rings; ++i)
2078 adev->vcn.inst->ring_enc[i].funcs = &vcn_v2_0_enc_ring_vm_funcs;
2079
2080 DRM_INFO("VCN encode is enabled in VM mode\n");
2081}
2082
2083static const struct amdgpu_irq_src_funcs vcn_v2_0_irq_funcs = {
2084 .set = vcn_v2_0_set_interrupt_state,
2085 .process = vcn_v2_0_process_interrupt,
2086};
2087
2088static void vcn_v2_0_set_irq_funcs(struct amdgpu_device *adev)
2089{
2090 adev->vcn.inst->irq.num_types = adev->vcn.num_enc_rings + 1;
2091 adev->vcn.inst->irq.funcs = &vcn_v2_0_irq_funcs;
2092}
2093
2094const struct amdgpu_ip_block_version vcn_v2_0_ip_block =
2095{
2096 .type = AMD_IP_BLOCK_TYPE_VCN,
2097 .major = 2,
2098 .minor = 0,
2099 .rev = 0,
2100 .funcs = &vcn_v2_0_ip_funcs,
2101};