Loading...
1/*
2 * Copyright 2018 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/firmware.h>
25#include <drm/drm_drv.h>
26
27#include "amdgpu.h"
28#include "amdgpu_vcn.h"
29#include "soc15.h"
30#include "soc15d.h"
31#include "amdgpu_pm.h"
32#include "amdgpu_psp.h"
33#include "mmsch_v2_0.h"
34#include "vcn_v2_0.h"
35
36#include "vcn/vcn_2_0_0_offset.h"
37#include "vcn/vcn_2_0_0_sh_mask.h"
38#include "ivsrcid/vcn/irqsrcs_vcn_2_0.h"
39
40#define VCN_VID_SOC_ADDRESS_2_0 0x1fa00
41#define VCN1_VID_SOC_ADDRESS_3_0 0x48200
42
43#define mmUVD_CONTEXT_ID_INTERNAL_OFFSET 0x1fd
44#define mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET 0x503
45#define mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET 0x504
46#define mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET 0x505
47#define mmUVD_NO_OP_INTERNAL_OFFSET 0x53f
48#define mmUVD_GP_SCRATCH8_INTERNAL_OFFSET 0x54a
49#define mmUVD_SCRATCH9_INTERNAL_OFFSET 0xc01d
50
51#define mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET 0x1e1
52#define mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x5a6
53#define mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET 0x5a7
54#define mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET 0x1e2
55
56static void vcn_v2_0_set_dec_ring_funcs(struct amdgpu_device *adev);
57static void vcn_v2_0_set_enc_ring_funcs(struct amdgpu_device *adev);
58static void vcn_v2_0_set_irq_funcs(struct amdgpu_device *adev);
59static int vcn_v2_0_set_powergating_state(void *handle,
60 enum amd_powergating_state state);
61static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev,
62 int inst_idx, struct dpg_pause_state *new_state);
63static int vcn_v2_0_start_sriov(struct amdgpu_device *adev);
64/**
65 * vcn_v2_0_early_init - set function pointers and load microcode
66 *
67 * @handle: amdgpu_device pointer
68 *
69 * Set ring and irq function pointers
70 * Load microcode from filesystem
71 */
72static int vcn_v2_0_early_init(void *handle)
73{
74 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
75
76 if (amdgpu_sriov_vf(adev))
77 adev->vcn.num_enc_rings = 1;
78 else
79 adev->vcn.num_enc_rings = 2;
80
81 vcn_v2_0_set_dec_ring_funcs(adev);
82 vcn_v2_0_set_enc_ring_funcs(adev);
83 vcn_v2_0_set_irq_funcs(adev);
84
85 return amdgpu_vcn_early_init(adev);
86}
87
88/**
89 * vcn_v2_0_sw_init - sw init for VCN block
90 *
91 * @handle: amdgpu_device pointer
92 *
93 * Load firmware and sw initialization
94 */
95static int vcn_v2_0_sw_init(void *handle)
96{
97 struct amdgpu_ring *ring;
98 int i, r;
99 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
100 volatile struct amdgpu_fw_shared *fw_shared;
101
102 /* VCN DEC TRAP */
103 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
104 VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT,
105 &adev->vcn.inst->irq);
106 if (r)
107 return r;
108
109 /* VCN ENC TRAP */
110 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
111 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
112 i + VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE,
113 &adev->vcn.inst->irq);
114 if (r)
115 return r;
116 }
117
118 r = amdgpu_vcn_sw_init(adev);
119 if (r)
120 return r;
121
122 amdgpu_vcn_setup_ucode(adev);
123
124 r = amdgpu_vcn_resume(adev);
125 if (r)
126 return r;
127
128 ring = &adev->vcn.inst->ring_dec;
129
130 ring->use_doorbell = true;
131 ring->doorbell_index = adev->doorbell_index.vcn.vcn_ring0_1 << 1;
132 ring->vm_hub = AMDGPU_MMHUB0(0);
133
134 sprintf(ring->name, "vcn_dec");
135 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
136 AMDGPU_RING_PRIO_DEFAULT, NULL);
137 if (r)
138 return r;
139
140 adev->vcn.internal.context_id = mmUVD_CONTEXT_ID_INTERNAL_OFFSET;
141 adev->vcn.internal.ib_vmid = mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET;
142 adev->vcn.internal.ib_bar_low = mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET;
143 adev->vcn.internal.ib_bar_high = mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET;
144 adev->vcn.internal.ib_size = mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET;
145 adev->vcn.internal.gp_scratch8 = mmUVD_GP_SCRATCH8_INTERNAL_OFFSET;
146
147 adev->vcn.internal.scratch9 = mmUVD_SCRATCH9_INTERNAL_OFFSET;
148 adev->vcn.inst->external.scratch9 = SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9);
149 adev->vcn.internal.data0 = mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET;
150 adev->vcn.inst->external.data0 = SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0);
151 adev->vcn.internal.data1 = mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET;
152 adev->vcn.inst->external.data1 = SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1);
153 adev->vcn.internal.cmd = mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET;
154 adev->vcn.inst->external.cmd = SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD);
155 adev->vcn.internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET;
156 adev->vcn.inst->external.nop = SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP);
157
158 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
159 enum amdgpu_ring_priority_level hw_prio = amdgpu_vcn_get_enc_ring_prio(i);
160
161 ring = &adev->vcn.inst->ring_enc[i];
162 ring->use_doorbell = true;
163 ring->vm_hub = AMDGPU_MMHUB0(0);
164 if (!amdgpu_sriov_vf(adev))
165 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + i;
166 else
167 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1 + i;
168 sprintf(ring->name, "vcn_enc%d", i);
169 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
170 hw_prio, NULL);
171 if (r)
172 return r;
173 }
174
175 adev->vcn.pause_dpg_mode = vcn_v2_0_pause_dpg_mode;
176
177 r = amdgpu_virt_alloc_mm_table(adev);
178 if (r)
179 return r;
180
181 fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
182 fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_VCN_MULTI_QUEUE_FLAG);
183
184 if (amdgpu_vcnfw_log)
185 amdgpu_vcn_fwlog_init(adev->vcn.inst);
186
187 return 0;
188}
189
190/**
191 * vcn_v2_0_sw_fini - sw fini for VCN block
192 *
193 * @handle: amdgpu_device pointer
194 *
195 * VCN suspend and free up sw allocation
196 */
197static int vcn_v2_0_sw_fini(void *handle)
198{
199 int r, idx;
200 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
201 volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
202
203 if (drm_dev_enter(adev_to_drm(adev), &idx)) {
204 fw_shared->present_flag_0 = 0;
205 drm_dev_exit(idx);
206 }
207
208 amdgpu_virt_free_mm_table(adev);
209
210 r = amdgpu_vcn_suspend(adev);
211 if (r)
212 return r;
213
214 r = amdgpu_vcn_sw_fini(adev);
215
216 return r;
217}
218
219/**
220 * vcn_v2_0_hw_init - start and test VCN block
221 *
222 * @handle: amdgpu_device pointer
223 *
224 * Initialize the hardware, boot up the VCPU and do some testing
225 */
226static int vcn_v2_0_hw_init(void *handle)
227{
228 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
229 struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
230 int i, r;
231
232 adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
233 ring->doorbell_index, 0);
234
235 if (amdgpu_sriov_vf(adev))
236 vcn_v2_0_start_sriov(adev);
237
238 r = amdgpu_ring_test_helper(ring);
239 if (r)
240 goto done;
241
242 //Disable vcn decode for sriov
243 if (amdgpu_sriov_vf(adev))
244 ring->sched.ready = false;
245
246 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
247 ring = &adev->vcn.inst->ring_enc[i];
248 r = amdgpu_ring_test_helper(ring);
249 if (r)
250 goto done;
251 }
252
253done:
254 if (!r)
255 DRM_INFO("VCN decode and encode initialized successfully(under %s).\n",
256 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)?"DPG Mode":"SPG Mode");
257
258 return r;
259}
260
261/**
262 * vcn_v2_0_hw_fini - stop the hardware block
263 *
264 * @handle: amdgpu_device pointer
265 *
266 * Stop the VCN block, mark ring as not ready any more
267 */
268static int vcn_v2_0_hw_fini(void *handle)
269{
270 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
271
272 cancel_delayed_work_sync(&adev->vcn.idle_work);
273
274 if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
275 (adev->vcn.cur_state != AMD_PG_STATE_GATE &&
276 RREG32_SOC15(VCN, 0, mmUVD_STATUS)))
277 vcn_v2_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
278
279 return 0;
280}
281
282/**
283 * vcn_v2_0_suspend - suspend VCN block
284 *
285 * @handle: amdgpu_device pointer
286 *
287 * HW fini and suspend VCN block
288 */
289static int vcn_v2_0_suspend(void *handle)
290{
291 int r;
292 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
293
294 r = vcn_v2_0_hw_fini(adev);
295 if (r)
296 return r;
297
298 r = amdgpu_vcn_suspend(adev);
299
300 return r;
301}
302
303/**
304 * vcn_v2_0_resume - resume VCN block
305 *
306 * @handle: amdgpu_device pointer
307 *
308 * Resume firmware and hw init VCN block
309 */
310static int vcn_v2_0_resume(void *handle)
311{
312 int r;
313 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
314
315 r = amdgpu_vcn_resume(adev);
316 if (r)
317 return r;
318
319 r = vcn_v2_0_hw_init(adev);
320
321 return r;
322}
323
324/**
325 * vcn_v2_0_mc_resume - memory controller programming
326 *
327 * @adev: amdgpu_device pointer
328 *
329 * Let the VCN memory controller know it's offsets
330 */
331static void vcn_v2_0_mc_resume(struct amdgpu_device *adev)
332{
333 uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[0]->size + 4);
334 uint32_t offset;
335
336 if (amdgpu_sriov_vf(adev))
337 return;
338
339 /* cache window 0: fw */
340 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
341 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
342 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo));
343 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
344 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi));
345 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, 0);
346 offset = 0;
347 } else {
348 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
349 lower_32_bits(adev->vcn.inst->gpu_addr));
350 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
351 upper_32_bits(adev->vcn.inst->gpu_addr));
352 offset = size;
353 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0,
354 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
355 }
356
357 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size);
358
359 /* cache window 1: stack */
360 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
361 lower_32_bits(adev->vcn.inst->gpu_addr + offset));
362 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
363 upper_32_bits(adev->vcn.inst->gpu_addr + offset));
364 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0);
365 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
366
367 /* cache window 2: context */
368 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
369 lower_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
370 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
371 upper_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
372 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0);
373 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
374
375 /* non-cache window */
376 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW,
377 lower_32_bits(adev->vcn.inst->fw_shared.gpu_addr));
378 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH,
379 upper_32_bits(adev->vcn.inst->fw_shared.gpu_addr));
380 WREG32_SOC15(UVD, 0, mmUVD_VCPU_NONCACHE_OFFSET0, 0);
381 WREG32_SOC15(UVD, 0, mmUVD_VCPU_NONCACHE_SIZE0,
382 AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)));
383
384 WREG32_SOC15(UVD, 0, mmUVD_GFX10_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
385}
386
387static void vcn_v2_0_mc_resume_dpg_mode(struct amdgpu_device *adev, bool indirect)
388{
389 uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[0]->size + 4);
390 uint32_t offset;
391
392 /* cache window 0: fw */
393 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
394 if (!indirect) {
395 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
396 UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
397 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo), 0, indirect);
398 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
399 UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
400 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi), 0, indirect);
401 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
402 UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
403 } else {
404 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
405 UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect);
406 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
407 UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect);
408 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
409 UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
410 }
411 offset = 0;
412 } else {
413 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
414 UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
415 lower_32_bits(adev->vcn.inst->gpu_addr), 0, indirect);
416 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
417 UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
418 upper_32_bits(adev->vcn.inst->gpu_addr), 0, indirect);
419 offset = size;
420 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
421 UVD, 0, mmUVD_VCPU_CACHE_OFFSET0),
422 AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect);
423 }
424
425 if (!indirect)
426 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
427 UVD, 0, mmUVD_VCPU_CACHE_SIZE0), size, 0, indirect);
428 else
429 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
430 UVD, 0, mmUVD_VCPU_CACHE_SIZE0), 0, 0, indirect);
431
432 /* cache window 1: stack */
433 if (!indirect) {
434 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
435 UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
436 lower_32_bits(adev->vcn.inst->gpu_addr + offset), 0, indirect);
437 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
438 UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
439 upper_32_bits(adev->vcn.inst->gpu_addr + offset), 0, indirect);
440 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
441 UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
442 } else {
443 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
444 UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect);
445 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
446 UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect);
447 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
448 UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
449 }
450 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
451 UVD, 0, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect);
452
453 /* cache window 2: context */
454 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
455 UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
456 lower_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
457 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
458 UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
459 upper_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
460 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
461 UVD, 0, mmUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect);
462 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
463 UVD, 0, mmUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect);
464
465 /* non-cache window */
466 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
467 UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
468 lower_32_bits(adev->vcn.inst->fw_shared.gpu_addr), 0, indirect);
469 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
470 UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
471 upper_32_bits(adev->vcn.inst->fw_shared.gpu_addr), 0, indirect);
472 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
473 UVD, 0, mmUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
474 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
475 UVD, 0, mmUVD_VCPU_NONCACHE_SIZE0),
476 AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)), 0, indirect);
477
478 /* VCN global tiling registers */
479 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
480 UVD, 0, mmUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
481}
482
483/**
484 * vcn_v2_0_disable_clock_gating - disable VCN clock gating
485 *
486 * @adev: amdgpu_device pointer
487 *
488 * Disable clock gating for VCN block
489 */
490static void vcn_v2_0_disable_clock_gating(struct amdgpu_device *adev)
491{
492 uint32_t data;
493
494 if (amdgpu_sriov_vf(adev))
495 return;
496
497 /* UVD disable CGC */
498 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
499 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
500 data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
501 else
502 data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
503 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
504 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
505 WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
506
507 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_GATE);
508 data &= ~(UVD_CGC_GATE__SYS_MASK
509 | UVD_CGC_GATE__UDEC_MASK
510 | UVD_CGC_GATE__MPEG2_MASK
511 | UVD_CGC_GATE__REGS_MASK
512 | UVD_CGC_GATE__RBC_MASK
513 | UVD_CGC_GATE__LMI_MC_MASK
514 | UVD_CGC_GATE__LMI_UMC_MASK
515 | UVD_CGC_GATE__IDCT_MASK
516 | UVD_CGC_GATE__MPRD_MASK
517 | UVD_CGC_GATE__MPC_MASK
518 | UVD_CGC_GATE__LBSI_MASK
519 | UVD_CGC_GATE__LRBBM_MASK
520 | UVD_CGC_GATE__UDEC_RE_MASK
521 | UVD_CGC_GATE__UDEC_CM_MASK
522 | UVD_CGC_GATE__UDEC_IT_MASK
523 | UVD_CGC_GATE__UDEC_DB_MASK
524 | UVD_CGC_GATE__UDEC_MP_MASK
525 | UVD_CGC_GATE__WCB_MASK
526 | UVD_CGC_GATE__VCPU_MASK
527 | UVD_CGC_GATE__SCPU_MASK);
528 WREG32_SOC15(VCN, 0, mmUVD_CGC_GATE, data);
529
530 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
531 data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK
532 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
533 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
534 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
535 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
536 | UVD_CGC_CTRL__SYS_MODE_MASK
537 | UVD_CGC_CTRL__UDEC_MODE_MASK
538 | UVD_CGC_CTRL__MPEG2_MODE_MASK
539 | UVD_CGC_CTRL__REGS_MODE_MASK
540 | UVD_CGC_CTRL__RBC_MODE_MASK
541 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
542 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
543 | UVD_CGC_CTRL__IDCT_MODE_MASK
544 | UVD_CGC_CTRL__MPRD_MODE_MASK
545 | UVD_CGC_CTRL__MPC_MODE_MASK
546 | UVD_CGC_CTRL__LBSI_MODE_MASK
547 | UVD_CGC_CTRL__LRBBM_MODE_MASK
548 | UVD_CGC_CTRL__WCB_MODE_MASK
549 | UVD_CGC_CTRL__VCPU_MODE_MASK
550 | UVD_CGC_CTRL__SCPU_MODE_MASK);
551 WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
552
553 /* turn on */
554 data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_GATE);
555 data |= (UVD_SUVD_CGC_GATE__SRE_MASK
556 | UVD_SUVD_CGC_GATE__SIT_MASK
557 | UVD_SUVD_CGC_GATE__SMP_MASK
558 | UVD_SUVD_CGC_GATE__SCM_MASK
559 | UVD_SUVD_CGC_GATE__SDB_MASK
560 | UVD_SUVD_CGC_GATE__SRE_H264_MASK
561 | UVD_SUVD_CGC_GATE__SRE_HEVC_MASK
562 | UVD_SUVD_CGC_GATE__SIT_H264_MASK
563 | UVD_SUVD_CGC_GATE__SIT_HEVC_MASK
564 | UVD_SUVD_CGC_GATE__SCM_H264_MASK
565 | UVD_SUVD_CGC_GATE__SCM_HEVC_MASK
566 | UVD_SUVD_CGC_GATE__SDB_H264_MASK
567 | UVD_SUVD_CGC_GATE__SDB_HEVC_MASK
568 | UVD_SUVD_CGC_GATE__SCLR_MASK
569 | UVD_SUVD_CGC_GATE__UVD_SC_MASK
570 | UVD_SUVD_CGC_GATE__ENT_MASK
571 | UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK
572 | UVD_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK
573 | UVD_SUVD_CGC_GATE__SITE_MASK
574 | UVD_SUVD_CGC_GATE__SRE_VP9_MASK
575 | UVD_SUVD_CGC_GATE__SCM_VP9_MASK
576 | UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK
577 | UVD_SUVD_CGC_GATE__SDB_VP9_MASK
578 | UVD_SUVD_CGC_GATE__IME_HEVC_MASK);
579 WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_GATE, data);
580
581 data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL);
582 data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
583 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
584 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
585 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
586 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
587 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
588 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
589 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
590 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
591 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
592 WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL, data);
593}
594
595static void vcn_v2_0_clock_gating_dpg_mode(struct amdgpu_device *adev,
596 uint8_t sram_sel, uint8_t indirect)
597{
598 uint32_t reg_data = 0;
599
600 /* enable sw clock gating control */
601 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
602 reg_data = 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
603 else
604 reg_data = 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
605 reg_data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
606 reg_data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
607 reg_data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
608 UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
609 UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
610 UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
611 UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
612 UVD_CGC_CTRL__SYS_MODE_MASK |
613 UVD_CGC_CTRL__UDEC_MODE_MASK |
614 UVD_CGC_CTRL__MPEG2_MODE_MASK |
615 UVD_CGC_CTRL__REGS_MODE_MASK |
616 UVD_CGC_CTRL__RBC_MODE_MASK |
617 UVD_CGC_CTRL__LMI_MC_MODE_MASK |
618 UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
619 UVD_CGC_CTRL__IDCT_MODE_MASK |
620 UVD_CGC_CTRL__MPRD_MODE_MASK |
621 UVD_CGC_CTRL__MPC_MODE_MASK |
622 UVD_CGC_CTRL__LBSI_MODE_MASK |
623 UVD_CGC_CTRL__LRBBM_MODE_MASK |
624 UVD_CGC_CTRL__WCB_MODE_MASK |
625 UVD_CGC_CTRL__VCPU_MODE_MASK |
626 UVD_CGC_CTRL__SCPU_MODE_MASK);
627 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
628 UVD, 0, mmUVD_CGC_CTRL), reg_data, sram_sel, indirect);
629
630 /* turn off clock gating */
631 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
632 UVD, 0, mmUVD_CGC_GATE), 0, sram_sel, indirect);
633
634 /* turn on SUVD clock gating */
635 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
636 UVD, 0, mmUVD_SUVD_CGC_GATE), 1, sram_sel, indirect);
637
638 /* turn on sw mode in UVD_SUVD_CGC_CTRL */
639 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
640 UVD, 0, mmUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect);
641}
642
643/**
644 * vcn_v2_0_enable_clock_gating - enable VCN clock gating
645 *
646 * @adev: amdgpu_device pointer
647 *
648 * Enable clock gating for VCN block
649 */
650static void vcn_v2_0_enable_clock_gating(struct amdgpu_device *adev)
651{
652 uint32_t data = 0;
653
654 if (amdgpu_sriov_vf(adev))
655 return;
656
657 /* enable UVD CGC */
658 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
659 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
660 data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
661 else
662 data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
663 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
664 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
665 WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
666
667 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
668 data |= (UVD_CGC_CTRL__UDEC_RE_MODE_MASK
669 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
670 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
671 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
672 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
673 | UVD_CGC_CTRL__SYS_MODE_MASK
674 | UVD_CGC_CTRL__UDEC_MODE_MASK
675 | UVD_CGC_CTRL__MPEG2_MODE_MASK
676 | UVD_CGC_CTRL__REGS_MODE_MASK
677 | UVD_CGC_CTRL__RBC_MODE_MASK
678 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
679 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
680 | UVD_CGC_CTRL__IDCT_MODE_MASK
681 | UVD_CGC_CTRL__MPRD_MODE_MASK
682 | UVD_CGC_CTRL__MPC_MODE_MASK
683 | UVD_CGC_CTRL__LBSI_MODE_MASK
684 | UVD_CGC_CTRL__LRBBM_MODE_MASK
685 | UVD_CGC_CTRL__WCB_MODE_MASK
686 | UVD_CGC_CTRL__VCPU_MODE_MASK
687 | UVD_CGC_CTRL__SCPU_MODE_MASK);
688 WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
689
690 data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL);
691 data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
692 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
693 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
694 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
695 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
696 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
697 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
698 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
699 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
700 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
701 WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL, data);
702}
703
704static void vcn_v2_0_disable_static_power_gating(struct amdgpu_device *adev)
705{
706 uint32_t data = 0;
707
708 if (amdgpu_sriov_vf(adev))
709 return;
710
711 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
712 data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
713 | 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
714 | 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
715 | 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
716 | 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
717 | 2 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT
718 | 2 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT
719 | 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
720 | 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
721 | 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT);
722
723 WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data);
724 SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS,
725 UVD_PGFSM_STATUS__UVDM_UVDU_PWR_ON_2_0, 0xFFFFF);
726 } else {
727 data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
728 | 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
729 | 1 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
730 | 1 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
731 | 1 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
732 | 1 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT
733 | 1 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT
734 | 1 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
735 | 1 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
736 | 1 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT);
737 WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data);
738 SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, 0, 0xFFFFF);
739 }
740
741 /* polling UVD_PGFSM_STATUS to confirm UVDM_PWR_STATUS,
742 * UVDU_PWR_STATUS are 0 (power on) */
743
744 data = RREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS);
745 data &= ~0x103;
746 if (adev->pg_flags & AMD_PG_SUPPORT_VCN)
747 data |= UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON |
748 UVD_POWER_STATUS__UVD_PG_EN_MASK;
749
750 WREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS, data);
751}
752
753static void vcn_v2_0_enable_static_power_gating(struct amdgpu_device *adev)
754{
755 uint32_t data = 0;
756
757 if (amdgpu_sriov_vf(adev))
758 return;
759
760 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
761 /* Before power off, this indicator has to be turned on */
762 data = RREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS);
763 data &= ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK;
764 data |= UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF;
765 WREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS, data);
766
767
768 data = (2 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
769 | 2 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
770 | 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
771 | 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
772 | 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
773 | 2 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT
774 | 2 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT
775 | 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
776 | 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
777 | 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT);
778
779 WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data);
780
781 data = (2 << UVD_PGFSM_STATUS__UVDM_PWR_STATUS__SHIFT
782 | 2 << UVD_PGFSM_STATUS__UVDU_PWR_STATUS__SHIFT
783 | 2 << UVD_PGFSM_STATUS__UVDF_PWR_STATUS__SHIFT
784 | 2 << UVD_PGFSM_STATUS__UVDC_PWR_STATUS__SHIFT
785 | 2 << UVD_PGFSM_STATUS__UVDB_PWR_STATUS__SHIFT
786 | 2 << UVD_PGFSM_STATUS__UVDIL_PWR_STATUS__SHIFT
787 | 2 << UVD_PGFSM_STATUS__UVDIR_PWR_STATUS__SHIFT
788 | 2 << UVD_PGFSM_STATUS__UVDTD_PWR_STATUS__SHIFT
789 | 2 << UVD_PGFSM_STATUS__UVDTE_PWR_STATUS__SHIFT
790 | 2 << UVD_PGFSM_STATUS__UVDE_PWR_STATUS__SHIFT);
791 SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, data, 0xFFFFF);
792 }
793}
794
795static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect)
796{
797 volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
798 struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
799 uint32_t rb_bufsz, tmp;
800
801 vcn_v2_0_enable_static_power_gating(adev);
802
803 /* enable dynamic power gating mode */
804 tmp = RREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS);
805 tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK;
806 tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK;
807 WREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS, tmp);
808
809 if (indirect)
810 adev->vcn.inst->dpg_sram_curr_addr = (uint32_t *)adev->vcn.inst->dpg_sram_cpu_addr;
811
812 /* enable clock gating */
813 vcn_v2_0_clock_gating_dpg_mode(adev, 0, indirect);
814
815 /* enable VCPU clock */
816 tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
817 tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
818 tmp |= UVD_VCPU_CNTL__MIF_WR_LOW_THRESHOLD_BP_MASK;
819 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
820 UVD, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect);
821
822 /* disable master interupt */
823 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
824 UVD, 0, mmUVD_MASTINT_EN), 0, 0, indirect);
825
826 /* setup mmUVD_LMI_CTRL */
827 tmp = (UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
828 UVD_LMI_CTRL__REQ_MODE_MASK |
829 UVD_LMI_CTRL__CRC_RESET_MASK |
830 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
831 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
832 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
833 (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
834 0x00100000L);
835 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
836 UVD, 0, mmUVD_LMI_CTRL), tmp, 0, indirect);
837
838 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
839 UVD, 0, mmUVD_MPC_CNTL),
840 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0, indirect);
841
842 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
843 UVD, 0, mmUVD_MPC_SET_MUXA0),
844 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
845 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
846 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
847 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0, indirect);
848
849 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
850 UVD, 0, mmUVD_MPC_SET_MUXB0),
851 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
852 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
853 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
854 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0, indirect);
855
856 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
857 UVD, 0, mmUVD_MPC_SET_MUX),
858 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
859 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
860 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect);
861
862 vcn_v2_0_mc_resume_dpg_mode(adev, indirect);
863
864 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
865 UVD, 0, mmUVD_REG_XX_MASK), 0x10, 0, indirect);
866 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
867 UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK), 0x3, 0, indirect);
868
869 /* release VCPU reset to boot */
870 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
871 UVD, 0, mmUVD_SOFT_RESET), 0, 0, indirect);
872
873 /* enable LMI MC and UMC channels */
874 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
875 UVD, 0, mmUVD_LMI_CTRL2),
876 0x1F << UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM__SHIFT, 0, indirect);
877
878 /* enable master interrupt */
879 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
880 UVD, 0, mmUVD_MASTINT_EN),
881 UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
882
883 if (indirect)
884 amdgpu_vcn_psp_update_sram(adev, 0, 0);
885
886 /* force RBC into idle state */
887 rb_bufsz = order_base_2(ring->ring_size);
888 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
889 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
890 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
891 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
892 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
893 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp);
894
895 /* Stall DPG before WPTR/RPTR reset */
896 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS),
897 UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
898 ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
899 fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
900
901 /* set the write pointer delay */
902 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR_CNTL, 0);
903
904 /* set the wb address */
905 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR_ADDR,
906 (upper_32_bits(ring->gpu_addr) >> 2));
907
908 /* program the RB_BASE for ring buffer */
909 WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
910 lower_32_bits(ring->gpu_addr));
911 WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
912 upper_32_bits(ring->gpu_addr));
913
914 /* Initialize the ring buffer's read and write pointers */
915 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR, 0);
916
917 WREG32_SOC15(UVD, 0, mmUVD_SCRATCH2, 0);
918
919 ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
920 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
921 lower_32_bits(ring->wptr));
922
923 fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET;
924 /* Unstall DPG */
925 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS),
926 0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
927 return 0;
928}
929
930static int vcn_v2_0_start(struct amdgpu_device *adev)
931{
932 volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
933 struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
934 uint32_t rb_bufsz, tmp;
935 uint32_t lmi_swap_cntl;
936 int i, j, r;
937
938 if (adev->pm.dpm_enabled)
939 amdgpu_dpm_enable_uvd(adev, true);
940
941 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
942 return vcn_v2_0_start_dpg_mode(adev, adev->vcn.indirect_sram);
943
944 vcn_v2_0_disable_static_power_gating(adev);
945
946 /* set uvd status busy */
947 tmp = RREG32_SOC15(UVD, 0, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY;
948 WREG32_SOC15(UVD, 0, mmUVD_STATUS, tmp);
949
950 /*SW clock gating */
951 vcn_v2_0_disable_clock_gating(adev);
952
953 /* enable VCPU clock */
954 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL),
955 UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
956
957 /* disable master interrupt */
958 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN), 0,
959 ~UVD_MASTINT_EN__VCPU_EN_MASK);
960
961 /* setup mmUVD_LMI_CTRL */
962 tmp = RREG32_SOC15(UVD, 0, mmUVD_LMI_CTRL);
963 WREG32_SOC15(UVD, 0, mmUVD_LMI_CTRL, tmp |
964 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
965 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
966 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
967 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
968
969 /* setup mmUVD_MPC_CNTL */
970 tmp = RREG32_SOC15(UVD, 0, mmUVD_MPC_CNTL);
971 tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
972 tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
973 WREG32_SOC15(VCN, 0, mmUVD_MPC_CNTL, tmp);
974
975 /* setup UVD_MPC_SET_MUXA0 */
976 WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXA0,
977 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
978 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
979 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
980 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
981
982 /* setup UVD_MPC_SET_MUXB0 */
983 WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXB0,
984 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
985 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
986 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
987 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
988
989 /* setup mmUVD_MPC_SET_MUX */
990 WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUX,
991 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
992 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
993 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
994
995 vcn_v2_0_mc_resume(adev);
996
997 /* release VCPU reset to boot */
998 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0,
999 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1000
1001 /* enable LMI MC and UMC channels */
1002 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), 0,
1003 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1004
1005 tmp = RREG32_SOC15(VCN, 0, mmUVD_SOFT_RESET);
1006 tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
1007 tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
1008 WREG32_SOC15(VCN, 0, mmUVD_SOFT_RESET, tmp);
1009
1010 /* disable byte swapping */
1011 lmi_swap_cntl = 0;
1012#ifdef __BIG_ENDIAN
1013 /* swap (8 in 32) RB and IB */
1014 lmi_swap_cntl = 0xa;
1015#endif
1016 WREG32_SOC15(UVD, 0, mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
1017
1018 for (i = 0; i < 10; ++i) {
1019 uint32_t status;
1020
1021 for (j = 0; j < 100; ++j) {
1022 status = RREG32_SOC15(UVD, 0, mmUVD_STATUS);
1023 if (status & 2)
1024 break;
1025 mdelay(10);
1026 }
1027 r = 0;
1028 if (status & 2)
1029 break;
1030
1031 DRM_ERROR("VCN decode not responding, trying to reset the VCPU!!!\n");
1032 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
1033 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
1034 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1035 mdelay(10);
1036 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0,
1037 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1038 mdelay(10);
1039 r = -1;
1040 }
1041
1042 if (r) {
1043 DRM_ERROR("VCN decode not responding, giving up!!!\n");
1044 return r;
1045 }
1046
1047 /* enable master interrupt */
1048 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN),
1049 UVD_MASTINT_EN__VCPU_EN_MASK,
1050 ~UVD_MASTINT_EN__VCPU_EN_MASK);
1051
1052 /* clear the busy bit of VCN_STATUS */
1053 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS), 0,
1054 ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
1055
1056 WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_VMID, 0);
1057
1058 /* force RBC into idle state */
1059 rb_bufsz = order_base_2(ring->ring_size);
1060 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
1061 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1062 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1063 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1064 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1065 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp);
1066
1067 fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
1068 /* program the RB_BASE for ring buffer */
1069 WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
1070 lower_32_bits(ring->gpu_addr));
1071 WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
1072 upper_32_bits(ring->gpu_addr));
1073
1074 /* Initialize the ring buffer's read and write pointers */
1075 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR, 0);
1076
1077 ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
1078 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
1079 lower_32_bits(ring->wptr));
1080 fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET;
1081
1082 fw_shared->multi_queue.encode_generalpurpose_queue_mode |= FW_QUEUE_RING_RESET;
1083 ring = &adev->vcn.inst->ring_enc[0];
1084 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1085 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1086 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr);
1087 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1088 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4);
1089 fw_shared->multi_queue.encode_generalpurpose_queue_mode &= ~FW_QUEUE_RING_RESET;
1090
1091 fw_shared->multi_queue.encode_lowlatency_queue_mode |= FW_QUEUE_RING_RESET;
1092 ring = &adev->vcn.inst->ring_enc[1];
1093 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1094 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1095 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1096 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1097 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
1098 fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET;
1099
1100 return 0;
1101}
1102
1103static int vcn_v2_0_stop_dpg_mode(struct amdgpu_device *adev)
1104{
1105 struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__UNPAUSE};
1106 uint32_t tmp;
1107
1108 vcn_v2_0_pause_dpg_mode(adev, 0, &state);
1109 /* Wait for power status to be 1 */
1110 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, 1,
1111 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1112
1113 /* wait for read ptr to be equal to write ptr */
1114 tmp = RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR);
1115 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF);
1116
1117 tmp = RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2);
1118 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF);
1119
1120 tmp = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF;
1121 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF);
1122
1123 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, 1,
1124 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1125
1126 /* disable dynamic power gating mode */
1127 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), 0,
1128 ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
1129
1130 return 0;
1131}
1132
1133static int vcn_v2_0_stop(struct amdgpu_device *adev)
1134{
1135 uint32_t tmp;
1136 int r;
1137
1138 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
1139 r = vcn_v2_0_stop_dpg_mode(adev);
1140 if (r)
1141 return r;
1142 goto power_off;
1143 }
1144
1145 /* wait for uvd idle */
1146 r = SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7);
1147 if (r)
1148 return r;
1149
1150 tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
1151 UVD_LMI_STATUS__READ_CLEAN_MASK |
1152 UVD_LMI_STATUS__WRITE_CLEAN_MASK |
1153 UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
1154 r = SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_LMI_STATUS, tmp, tmp);
1155 if (r)
1156 return r;
1157
1158 /* stall UMC channel */
1159 tmp = RREG32_SOC15(VCN, 0, mmUVD_LMI_CTRL2);
1160 tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
1161 WREG32_SOC15(VCN, 0, mmUVD_LMI_CTRL2, tmp);
1162
1163 tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK|
1164 UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
1165 r = SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_LMI_STATUS, tmp, tmp);
1166 if (r)
1167 return r;
1168
1169 /* disable VCPU clock */
1170 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL), 0,
1171 ~(UVD_VCPU_CNTL__CLK_EN_MASK));
1172
1173 /* reset LMI UMC */
1174 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
1175 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK,
1176 ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
1177
1178 /* reset LMI */
1179 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
1180 UVD_SOFT_RESET__LMI_SOFT_RESET_MASK,
1181 ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK);
1182
1183 /* reset VCPU */
1184 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
1185 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
1186 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1187
1188 /* clear status */
1189 WREG32_SOC15(VCN, 0, mmUVD_STATUS, 0);
1190
1191 vcn_v2_0_enable_clock_gating(adev);
1192 vcn_v2_0_enable_static_power_gating(adev);
1193
1194power_off:
1195 if (adev->pm.dpm_enabled)
1196 amdgpu_dpm_enable_uvd(adev, false);
1197
1198 return 0;
1199}
1200
1201static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev,
1202 int inst_idx, struct dpg_pause_state *new_state)
1203{
1204 struct amdgpu_ring *ring;
1205 uint32_t reg_data = 0;
1206 int ret_code;
1207
1208 /* pause/unpause if state is changed */
1209 if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) {
1210 DRM_DEBUG("dpg pause state changed %d -> %d",
1211 adev->vcn.inst[inst_idx].pause_state.fw_based, new_state->fw_based);
1212 reg_data = RREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE) &
1213 (~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1214
1215 if (new_state->fw_based == VCN_DPG_STATE__PAUSE) {
1216 ret_code = SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, 0x1,
1217 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1218
1219 if (!ret_code) {
1220 volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
1221 /* pause DPG */
1222 reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1223 WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
1224
1225 /* wait for ACK */
1226 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_DPG_PAUSE,
1227 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
1228 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1229
1230 /* Stall DPG before WPTR/RPTR reset */
1231 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS),
1232 UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
1233 ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
1234 /* Restore */
1235 fw_shared->multi_queue.encode_generalpurpose_queue_mode |= FW_QUEUE_RING_RESET;
1236 ring = &adev->vcn.inst->ring_enc[0];
1237 ring->wptr = 0;
1238 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr);
1239 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1240 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4);
1241 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1242 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1243 fw_shared->multi_queue.encode_generalpurpose_queue_mode &= ~FW_QUEUE_RING_RESET;
1244
1245 fw_shared->multi_queue.encode_lowlatency_queue_mode |= FW_QUEUE_RING_RESET;
1246 ring = &adev->vcn.inst->ring_enc[1];
1247 ring->wptr = 0;
1248 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1249 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1250 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
1251 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1252 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1253 fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET;
1254
1255 fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
1256 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
1257 RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF);
1258 fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET;
1259 /* Unstall DPG */
1260 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS),
1261 0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
1262
1263 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
1264 UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON,
1265 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1266 }
1267 } else {
1268 /* unpause dpg, no need to wait */
1269 reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1270 WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
1271 }
1272 adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based;
1273 }
1274
1275 return 0;
1276}
1277
1278static bool vcn_v2_0_is_idle(void *handle)
1279{
1280 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1281
1282 return (RREG32_SOC15(VCN, 0, mmUVD_STATUS) == UVD_STATUS__IDLE);
1283}
1284
1285static int vcn_v2_0_wait_for_idle(void *handle)
1286{
1287 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1288 int ret;
1289
1290 ret = SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_STATUS, UVD_STATUS__IDLE,
1291 UVD_STATUS__IDLE);
1292
1293 return ret;
1294}
1295
1296static int vcn_v2_0_set_clockgating_state(void *handle,
1297 enum amd_clockgating_state state)
1298{
1299 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1300 bool enable = (state == AMD_CG_STATE_GATE);
1301
1302 if (amdgpu_sriov_vf(adev))
1303 return 0;
1304
1305 if (enable) {
1306 /* wait for STATUS to clear */
1307 if (!vcn_v2_0_is_idle(handle))
1308 return -EBUSY;
1309 vcn_v2_0_enable_clock_gating(adev);
1310 } else {
1311 /* disable HW gating and enable Sw gating */
1312 vcn_v2_0_disable_clock_gating(adev);
1313 }
1314 return 0;
1315}
1316
1317/**
1318 * vcn_v2_0_dec_ring_get_rptr - get read pointer
1319 *
1320 * @ring: amdgpu_ring pointer
1321 *
1322 * Returns the current hardware read pointer
1323 */
1324static uint64_t vcn_v2_0_dec_ring_get_rptr(struct amdgpu_ring *ring)
1325{
1326 struct amdgpu_device *adev = ring->adev;
1327
1328 return RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
1329}
1330
1331/**
1332 * vcn_v2_0_dec_ring_get_wptr - get write pointer
1333 *
1334 * @ring: amdgpu_ring pointer
1335 *
1336 * Returns the current hardware write pointer
1337 */
1338static uint64_t vcn_v2_0_dec_ring_get_wptr(struct amdgpu_ring *ring)
1339{
1340 struct amdgpu_device *adev = ring->adev;
1341
1342 if (ring->use_doorbell)
1343 return *ring->wptr_cpu_addr;
1344 else
1345 return RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR);
1346}
1347
1348/**
1349 * vcn_v2_0_dec_ring_set_wptr - set write pointer
1350 *
1351 * @ring: amdgpu_ring pointer
1352 *
1353 * Commits the write pointer to the hardware
1354 */
1355static void vcn_v2_0_dec_ring_set_wptr(struct amdgpu_ring *ring)
1356{
1357 struct amdgpu_device *adev = ring->adev;
1358
1359 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
1360 WREG32_SOC15(UVD, 0, mmUVD_SCRATCH2,
1361 lower_32_bits(ring->wptr) | 0x80000000);
1362
1363 if (ring->use_doorbell) {
1364 *ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
1365 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1366 } else {
1367 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
1368 }
1369}
1370
1371/**
1372 * vcn_v2_0_dec_ring_insert_start - insert a start command
1373 *
1374 * @ring: amdgpu_ring pointer
1375 *
1376 * Write a start command to the ring.
1377 */
1378void vcn_v2_0_dec_ring_insert_start(struct amdgpu_ring *ring)
1379{
1380 struct amdgpu_device *adev = ring->adev;
1381
1382 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data0, 0));
1383 amdgpu_ring_write(ring, 0);
1384 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0));
1385 amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_PACKET_START << 1));
1386}
1387
1388/**
1389 * vcn_v2_0_dec_ring_insert_end - insert a end command
1390 *
1391 * @ring: amdgpu_ring pointer
1392 *
1393 * Write a end command to the ring.
1394 */
1395void vcn_v2_0_dec_ring_insert_end(struct amdgpu_ring *ring)
1396{
1397 struct amdgpu_device *adev = ring->adev;
1398
1399 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0));
1400 amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_PACKET_END << 1));
1401}
1402
1403/**
1404 * vcn_v2_0_dec_ring_insert_nop - insert a nop command
1405 *
1406 * @ring: amdgpu_ring pointer
1407 * @count: the number of NOP packets to insert
1408 *
1409 * Write a nop command to the ring.
1410 */
1411void vcn_v2_0_dec_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
1412{
1413 struct amdgpu_device *adev = ring->adev;
1414 int i;
1415
1416 WARN_ON(ring->wptr % 2 || count % 2);
1417
1418 for (i = 0; i < count / 2; i++) {
1419 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.nop, 0));
1420 amdgpu_ring_write(ring, 0);
1421 }
1422}
1423
1424/**
1425 * vcn_v2_0_dec_ring_emit_fence - emit an fence & trap command
1426 *
1427 * @ring: amdgpu_ring pointer
1428 * @addr: address
1429 * @seq: sequence number
1430 * @flags: fence related flags
1431 *
1432 * Write a fence and a trap command to the ring.
1433 */
1434void vcn_v2_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
1435 unsigned flags)
1436{
1437 struct amdgpu_device *adev = ring->adev;
1438
1439 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1440 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.context_id, 0));
1441 amdgpu_ring_write(ring, seq);
1442
1443 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data0, 0));
1444 amdgpu_ring_write(ring, addr & 0xffffffff);
1445
1446 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data1, 0));
1447 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
1448
1449 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0));
1450 amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_FENCE << 1));
1451
1452 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data0, 0));
1453 amdgpu_ring_write(ring, 0);
1454
1455 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data1, 0));
1456 amdgpu_ring_write(ring, 0);
1457
1458 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0));
1459
1460 amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_TRAP << 1));
1461}
1462
1463/**
1464 * vcn_v2_0_dec_ring_emit_ib - execute indirect buffer
1465 *
1466 * @ring: amdgpu_ring pointer
1467 * @job: job to retrieve vmid from
1468 * @ib: indirect buffer to execute
1469 * @flags: unused
1470 *
1471 * Write ring commands to execute the indirect buffer
1472 */
1473void vcn_v2_0_dec_ring_emit_ib(struct amdgpu_ring *ring,
1474 struct amdgpu_job *job,
1475 struct amdgpu_ib *ib,
1476 uint32_t flags)
1477{
1478 struct amdgpu_device *adev = ring->adev;
1479 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1480
1481 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.ib_vmid, 0));
1482 amdgpu_ring_write(ring, vmid);
1483
1484 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.ib_bar_low, 0));
1485 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1486 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.ib_bar_high, 0));
1487 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1488 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.ib_size, 0));
1489 amdgpu_ring_write(ring, ib->length_dw);
1490}
1491
1492void vcn_v2_0_dec_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
1493 uint32_t val, uint32_t mask)
1494{
1495 struct amdgpu_device *adev = ring->adev;
1496
1497 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data0, 0));
1498 amdgpu_ring_write(ring, reg << 2);
1499
1500 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data1, 0));
1501 amdgpu_ring_write(ring, val);
1502
1503 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.gp_scratch8, 0));
1504 amdgpu_ring_write(ring, mask);
1505
1506 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0));
1507
1508 amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_REG_READ_COND_WAIT << 1));
1509}
1510
1511void vcn_v2_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
1512 unsigned vmid, uint64_t pd_addr)
1513{
1514 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub];
1515 uint32_t data0, data1, mask;
1516
1517 pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1518
1519 /* wait for register write */
1520 data0 = hub->ctx0_ptb_addr_lo32 + vmid * hub->ctx_addr_distance;
1521 data1 = lower_32_bits(pd_addr);
1522 mask = 0xffffffff;
1523 vcn_v2_0_dec_ring_emit_reg_wait(ring, data0, data1, mask);
1524}
1525
1526void vcn_v2_0_dec_ring_emit_wreg(struct amdgpu_ring *ring,
1527 uint32_t reg, uint32_t val)
1528{
1529 struct amdgpu_device *adev = ring->adev;
1530
1531 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data0, 0));
1532 amdgpu_ring_write(ring, reg << 2);
1533
1534 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data1, 0));
1535 amdgpu_ring_write(ring, val);
1536
1537 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0));
1538
1539 amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_WRITE_REG << 1));
1540}
1541
1542/**
1543 * vcn_v2_0_enc_ring_get_rptr - get enc read pointer
1544 *
1545 * @ring: amdgpu_ring pointer
1546 *
1547 * Returns the current hardware enc read pointer
1548 */
1549static uint64_t vcn_v2_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
1550{
1551 struct amdgpu_device *adev = ring->adev;
1552
1553 if (ring == &adev->vcn.inst->ring_enc[0])
1554 return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR);
1555 else
1556 return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2);
1557}
1558
1559 /**
1560 * vcn_v2_0_enc_ring_get_wptr - get enc write pointer
1561 *
1562 * @ring: amdgpu_ring pointer
1563 *
1564 * Returns the current hardware enc write pointer
1565 */
1566static uint64_t vcn_v2_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
1567{
1568 struct amdgpu_device *adev = ring->adev;
1569
1570 if (ring == &adev->vcn.inst->ring_enc[0]) {
1571 if (ring->use_doorbell)
1572 return *ring->wptr_cpu_addr;
1573 else
1574 return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR);
1575 } else {
1576 if (ring->use_doorbell)
1577 return *ring->wptr_cpu_addr;
1578 else
1579 return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2);
1580 }
1581}
1582
1583 /**
1584 * vcn_v2_0_enc_ring_set_wptr - set enc write pointer
1585 *
1586 * @ring: amdgpu_ring pointer
1587 *
1588 * Commits the enc write pointer to the hardware
1589 */
1590static void vcn_v2_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
1591{
1592 struct amdgpu_device *adev = ring->adev;
1593
1594 if (ring == &adev->vcn.inst->ring_enc[0]) {
1595 if (ring->use_doorbell) {
1596 *ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
1597 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1598 } else {
1599 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1600 }
1601 } else {
1602 if (ring->use_doorbell) {
1603 *ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
1604 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1605 } else {
1606 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1607 }
1608 }
1609}
1610
1611/**
1612 * vcn_v2_0_enc_ring_emit_fence - emit an enc fence & trap command
1613 *
1614 * @ring: amdgpu_ring pointer
1615 * @addr: address
1616 * @seq: sequence number
1617 * @flags: fence related flags
1618 *
1619 * Write enc a fence and a trap command to the ring.
1620 */
1621void vcn_v2_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
1622 u64 seq, unsigned flags)
1623{
1624 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1625
1626 amdgpu_ring_write(ring, VCN_ENC_CMD_FENCE);
1627 amdgpu_ring_write(ring, addr);
1628 amdgpu_ring_write(ring, upper_32_bits(addr));
1629 amdgpu_ring_write(ring, seq);
1630 amdgpu_ring_write(ring, VCN_ENC_CMD_TRAP);
1631}
1632
1633void vcn_v2_0_enc_ring_insert_end(struct amdgpu_ring *ring)
1634{
1635 amdgpu_ring_write(ring, VCN_ENC_CMD_END);
1636}
1637
1638/**
1639 * vcn_v2_0_enc_ring_emit_ib - enc execute indirect buffer
1640 *
1641 * @ring: amdgpu_ring pointer
1642 * @job: job to retrive vmid from
1643 * @ib: indirect buffer to execute
1644 * @flags: unused
1645 *
1646 * Write enc ring commands to execute the indirect buffer
1647 */
1648void vcn_v2_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
1649 struct amdgpu_job *job,
1650 struct amdgpu_ib *ib,
1651 uint32_t flags)
1652{
1653 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1654
1655 amdgpu_ring_write(ring, VCN_ENC_CMD_IB);
1656 amdgpu_ring_write(ring, vmid);
1657 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1658 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1659 amdgpu_ring_write(ring, ib->length_dw);
1660}
1661
1662void vcn_v2_0_enc_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
1663 uint32_t val, uint32_t mask)
1664{
1665 amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WAIT);
1666 amdgpu_ring_write(ring, reg << 2);
1667 amdgpu_ring_write(ring, mask);
1668 amdgpu_ring_write(ring, val);
1669}
1670
1671void vcn_v2_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
1672 unsigned int vmid, uint64_t pd_addr)
1673{
1674 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub];
1675
1676 pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1677
1678 /* wait for reg writes */
1679 vcn_v2_0_enc_ring_emit_reg_wait(ring, hub->ctx0_ptb_addr_lo32 +
1680 vmid * hub->ctx_addr_distance,
1681 lower_32_bits(pd_addr), 0xffffffff);
1682}
1683
1684void vcn_v2_0_enc_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val)
1685{
1686 amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WRITE);
1687 amdgpu_ring_write(ring, reg << 2);
1688 amdgpu_ring_write(ring, val);
1689}
1690
1691static int vcn_v2_0_set_interrupt_state(struct amdgpu_device *adev,
1692 struct amdgpu_irq_src *source,
1693 unsigned type,
1694 enum amdgpu_interrupt_state state)
1695{
1696 return 0;
1697}
1698
1699static int vcn_v2_0_process_interrupt(struct amdgpu_device *adev,
1700 struct amdgpu_irq_src *source,
1701 struct amdgpu_iv_entry *entry)
1702{
1703 DRM_DEBUG("IH: VCN TRAP\n");
1704
1705 switch (entry->src_id) {
1706 case VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT:
1707 amdgpu_fence_process(&adev->vcn.inst->ring_dec);
1708 break;
1709 case VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE:
1710 amdgpu_fence_process(&adev->vcn.inst->ring_enc[0]);
1711 break;
1712 case VCN_2_0__SRCID__UVD_ENC_LOW_LATENCY:
1713 amdgpu_fence_process(&adev->vcn.inst->ring_enc[1]);
1714 break;
1715 default:
1716 DRM_ERROR("Unhandled interrupt: %d %d\n",
1717 entry->src_id, entry->src_data[0]);
1718 break;
1719 }
1720
1721 return 0;
1722}
1723
1724int vcn_v2_0_dec_ring_test_ring(struct amdgpu_ring *ring)
1725{
1726 struct amdgpu_device *adev = ring->adev;
1727 uint32_t tmp = 0;
1728 unsigned i;
1729 int r;
1730
1731 if (amdgpu_sriov_vf(adev))
1732 return 0;
1733
1734 WREG32(adev->vcn.inst[ring->me].external.scratch9, 0xCAFEDEAD);
1735 r = amdgpu_ring_alloc(ring, 4);
1736 if (r)
1737 return r;
1738 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0));
1739 amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_PACKET_START << 1));
1740 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.scratch9, 0));
1741 amdgpu_ring_write(ring, 0xDEADBEEF);
1742 amdgpu_ring_commit(ring);
1743 for (i = 0; i < adev->usec_timeout; i++) {
1744 tmp = RREG32(adev->vcn.inst[ring->me].external.scratch9);
1745 if (tmp == 0xDEADBEEF)
1746 break;
1747 udelay(1);
1748 }
1749
1750 if (i >= adev->usec_timeout)
1751 r = -ETIMEDOUT;
1752
1753 return r;
1754}
1755
1756
1757static int vcn_v2_0_set_powergating_state(void *handle,
1758 enum amd_powergating_state state)
1759{
1760 /* This doesn't actually powergate the VCN block.
1761 * That's done in the dpm code via the SMC. This
1762 * just re-inits the block as necessary. The actual
1763 * gating still happens in the dpm code. We should
1764 * revisit this when there is a cleaner line between
1765 * the smc and the hw blocks
1766 */
1767 int ret;
1768 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1769
1770 if (amdgpu_sriov_vf(adev)) {
1771 adev->vcn.cur_state = AMD_PG_STATE_UNGATE;
1772 return 0;
1773 }
1774
1775 if (state == adev->vcn.cur_state)
1776 return 0;
1777
1778 if (state == AMD_PG_STATE_GATE)
1779 ret = vcn_v2_0_stop(adev);
1780 else
1781 ret = vcn_v2_0_start(adev);
1782
1783 if (!ret)
1784 adev->vcn.cur_state = state;
1785 return ret;
1786}
1787
1788static int vcn_v2_0_start_mmsch(struct amdgpu_device *adev,
1789 struct amdgpu_mm_table *table)
1790{
1791 uint32_t data = 0, loop;
1792 uint64_t addr = table->gpu_addr;
1793 struct mmsch_v2_0_init_header *header;
1794 uint32_t size;
1795 int i;
1796
1797 header = (struct mmsch_v2_0_init_header *)table->cpu_addr;
1798 size = header->header_size + header->vcn_table_size;
1799
1800 /* 1, write to vce_mmsch_vf_ctx_addr_lo/hi register with GPU mc addr
1801 * of memory descriptor location
1802 */
1803 WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_ADDR_LO, lower_32_bits(addr));
1804 WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_ADDR_HI, upper_32_bits(addr));
1805
1806 /* 2, update vmid of descriptor */
1807 data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_VMID);
1808 data &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK;
1809 /* use domain0 for MM scheduler */
1810 data |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT);
1811 WREG32_SOC15(UVD, 0, mmMMSCH_VF_VMID, data);
1812
1813 /* 3, notify mmsch about the size of this descriptor */
1814 WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_SIZE, size);
1815
1816 /* 4, set resp to zero */
1817 WREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP, 0);
1818
1819 adev->vcn.inst->ring_dec.wptr = 0;
1820 adev->vcn.inst->ring_dec.wptr_old = 0;
1821 vcn_v2_0_dec_ring_set_wptr(&adev->vcn.inst->ring_dec);
1822
1823 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
1824 adev->vcn.inst->ring_enc[i].wptr = 0;
1825 adev->vcn.inst->ring_enc[i].wptr_old = 0;
1826 vcn_v2_0_enc_ring_set_wptr(&adev->vcn.inst->ring_enc[i]);
1827 }
1828
1829 /* 5, kick off the initialization and wait until
1830 * VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero
1831 */
1832 WREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_HOST, 0x10000001);
1833
1834 data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP);
1835 loop = 1000;
1836 while ((data & 0x10000002) != 0x10000002) {
1837 udelay(10);
1838 data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP);
1839 loop--;
1840 if (!loop)
1841 break;
1842 }
1843
1844 if (!loop) {
1845 DRM_ERROR("failed to init MMSCH, " \
1846 "mmMMSCH_VF_MAILBOX_RESP = 0x%08x\n", data);
1847 return -EBUSY;
1848 }
1849
1850 return 0;
1851}
1852
1853static int vcn_v2_0_start_sriov(struct amdgpu_device *adev)
1854{
1855 int r;
1856 uint32_t tmp;
1857 struct amdgpu_ring *ring;
1858 uint32_t offset, size;
1859 uint32_t table_size = 0;
1860 struct mmsch_v2_0_cmd_direct_write direct_wt = { {0} };
1861 struct mmsch_v2_0_cmd_direct_read_modify_write direct_rd_mod_wt = { {0} };
1862 struct mmsch_v2_0_cmd_end end = { {0} };
1863 struct mmsch_v2_0_init_header *header;
1864 uint32_t *init_table = adev->virt.mm_table.cpu_addr;
1865 uint8_t i = 0;
1866
1867 header = (struct mmsch_v2_0_init_header *)init_table;
1868 direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE;
1869 direct_rd_mod_wt.cmd_header.command_type =
1870 MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
1871 end.cmd_header.command_type = MMSCH_COMMAND__END;
1872
1873 if (header->vcn_table_offset == 0 && header->vcn_table_size == 0) {
1874 header->version = MMSCH_VERSION;
1875 header->header_size = sizeof(struct mmsch_v2_0_init_header) >> 2;
1876
1877 header->vcn_table_offset = header->header_size;
1878
1879 init_table += header->vcn_table_offset;
1880
1881 size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[0]->size + 4);
1882
1883 MMSCH_V2_0_INSERT_DIRECT_RD_MOD_WT(
1884 SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS),
1885 0xFFFFFFFF, 0x00000004);
1886
1887 /* mc resume*/
1888 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1889 MMSCH_V2_0_INSERT_DIRECT_WT(
1890 SOC15_REG_OFFSET(UVD, i,
1891 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
1892 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo);
1893 MMSCH_V2_0_INSERT_DIRECT_WT(
1894 SOC15_REG_OFFSET(UVD, i,
1895 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
1896 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi);
1897 offset = 0;
1898 } else {
1899 MMSCH_V2_0_INSERT_DIRECT_WT(
1900 SOC15_REG_OFFSET(UVD, i,
1901 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
1902 lower_32_bits(adev->vcn.inst->gpu_addr));
1903 MMSCH_V2_0_INSERT_DIRECT_WT(
1904 SOC15_REG_OFFSET(UVD, i,
1905 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
1906 upper_32_bits(adev->vcn.inst->gpu_addr));
1907 offset = size;
1908 }
1909
1910 MMSCH_V2_0_INSERT_DIRECT_WT(
1911 SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET0),
1912 0);
1913 MMSCH_V2_0_INSERT_DIRECT_WT(
1914 SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE0),
1915 size);
1916
1917 MMSCH_V2_0_INSERT_DIRECT_WT(
1918 SOC15_REG_OFFSET(UVD, i,
1919 mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
1920 lower_32_bits(adev->vcn.inst->gpu_addr + offset));
1921 MMSCH_V2_0_INSERT_DIRECT_WT(
1922 SOC15_REG_OFFSET(UVD, i,
1923 mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
1924 upper_32_bits(adev->vcn.inst->gpu_addr + offset));
1925 MMSCH_V2_0_INSERT_DIRECT_WT(
1926 SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET1),
1927 0);
1928 MMSCH_V2_0_INSERT_DIRECT_WT(
1929 SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE1),
1930 AMDGPU_VCN_STACK_SIZE);
1931
1932 MMSCH_V2_0_INSERT_DIRECT_WT(
1933 SOC15_REG_OFFSET(UVD, i,
1934 mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
1935 lower_32_bits(adev->vcn.inst->gpu_addr + offset +
1936 AMDGPU_VCN_STACK_SIZE));
1937 MMSCH_V2_0_INSERT_DIRECT_WT(
1938 SOC15_REG_OFFSET(UVD, i,
1939 mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
1940 upper_32_bits(adev->vcn.inst->gpu_addr + offset +
1941 AMDGPU_VCN_STACK_SIZE));
1942 MMSCH_V2_0_INSERT_DIRECT_WT(
1943 SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET2),
1944 0);
1945 MMSCH_V2_0_INSERT_DIRECT_WT(
1946 SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE2),
1947 AMDGPU_VCN_CONTEXT_SIZE);
1948
1949 for (r = 0; r < adev->vcn.num_enc_rings; ++r) {
1950 ring = &adev->vcn.inst->ring_enc[r];
1951 ring->wptr = 0;
1952 MMSCH_V2_0_INSERT_DIRECT_WT(
1953 SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_LO),
1954 lower_32_bits(ring->gpu_addr));
1955 MMSCH_V2_0_INSERT_DIRECT_WT(
1956 SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_HI),
1957 upper_32_bits(ring->gpu_addr));
1958 MMSCH_V2_0_INSERT_DIRECT_WT(
1959 SOC15_REG_OFFSET(UVD, i, mmUVD_RB_SIZE),
1960 ring->ring_size / 4);
1961 }
1962
1963 ring = &adev->vcn.inst->ring_dec;
1964 ring->wptr = 0;
1965 MMSCH_V2_0_INSERT_DIRECT_WT(
1966 SOC15_REG_OFFSET(UVD, i,
1967 mmUVD_LMI_RBC_RB_64BIT_BAR_LOW),
1968 lower_32_bits(ring->gpu_addr));
1969 MMSCH_V2_0_INSERT_DIRECT_WT(
1970 SOC15_REG_OFFSET(UVD, i,
1971 mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH),
1972 upper_32_bits(ring->gpu_addr));
1973 /* force RBC into idle state */
1974 tmp = order_base_2(ring->ring_size);
1975 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, tmp);
1976 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1977 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1978 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1979 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1980 MMSCH_V2_0_INSERT_DIRECT_WT(
1981 SOC15_REG_OFFSET(UVD, i, mmUVD_RBC_RB_CNTL), tmp);
1982
1983 /* add end packet */
1984 tmp = sizeof(struct mmsch_v2_0_cmd_end);
1985 memcpy((void *)init_table, &end, tmp);
1986 table_size += (tmp / 4);
1987 header->vcn_table_size = table_size;
1988
1989 }
1990 return vcn_v2_0_start_mmsch(adev, &adev->virt.mm_table);
1991}
1992
1993static const struct amd_ip_funcs vcn_v2_0_ip_funcs = {
1994 .name = "vcn_v2_0",
1995 .early_init = vcn_v2_0_early_init,
1996 .late_init = NULL,
1997 .sw_init = vcn_v2_0_sw_init,
1998 .sw_fini = vcn_v2_0_sw_fini,
1999 .hw_init = vcn_v2_0_hw_init,
2000 .hw_fini = vcn_v2_0_hw_fini,
2001 .suspend = vcn_v2_0_suspend,
2002 .resume = vcn_v2_0_resume,
2003 .is_idle = vcn_v2_0_is_idle,
2004 .wait_for_idle = vcn_v2_0_wait_for_idle,
2005 .check_soft_reset = NULL,
2006 .pre_soft_reset = NULL,
2007 .soft_reset = NULL,
2008 .post_soft_reset = NULL,
2009 .set_clockgating_state = vcn_v2_0_set_clockgating_state,
2010 .set_powergating_state = vcn_v2_0_set_powergating_state,
2011};
2012
2013static const struct amdgpu_ring_funcs vcn_v2_0_dec_ring_vm_funcs = {
2014 .type = AMDGPU_RING_TYPE_VCN_DEC,
2015 .align_mask = 0xf,
2016 .secure_submission_supported = true,
2017 .get_rptr = vcn_v2_0_dec_ring_get_rptr,
2018 .get_wptr = vcn_v2_0_dec_ring_get_wptr,
2019 .set_wptr = vcn_v2_0_dec_ring_set_wptr,
2020 .emit_frame_size =
2021 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
2022 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
2023 8 + /* vcn_v2_0_dec_ring_emit_vm_flush */
2024 14 + 14 + /* vcn_v2_0_dec_ring_emit_fence x2 vm fence */
2025 6,
2026 .emit_ib_size = 8, /* vcn_v2_0_dec_ring_emit_ib */
2027 .emit_ib = vcn_v2_0_dec_ring_emit_ib,
2028 .emit_fence = vcn_v2_0_dec_ring_emit_fence,
2029 .emit_vm_flush = vcn_v2_0_dec_ring_emit_vm_flush,
2030 .test_ring = vcn_v2_0_dec_ring_test_ring,
2031 .test_ib = amdgpu_vcn_dec_ring_test_ib,
2032 .insert_nop = vcn_v2_0_dec_ring_insert_nop,
2033 .insert_start = vcn_v2_0_dec_ring_insert_start,
2034 .insert_end = vcn_v2_0_dec_ring_insert_end,
2035 .pad_ib = amdgpu_ring_generic_pad_ib,
2036 .begin_use = amdgpu_vcn_ring_begin_use,
2037 .end_use = amdgpu_vcn_ring_end_use,
2038 .emit_wreg = vcn_v2_0_dec_ring_emit_wreg,
2039 .emit_reg_wait = vcn_v2_0_dec_ring_emit_reg_wait,
2040 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
2041};
2042
2043static const struct amdgpu_ring_funcs vcn_v2_0_enc_ring_vm_funcs = {
2044 .type = AMDGPU_RING_TYPE_VCN_ENC,
2045 .align_mask = 0x3f,
2046 .nop = VCN_ENC_CMD_NO_OP,
2047 .get_rptr = vcn_v2_0_enc_ring_get_rptr,
2048 .get_wptr = vcn_v2_0_enc_ring_get_wptr,
2049 .set_wptr = vcn_v2_0_enc_ring_set_wptr,
2050 .emit_frame_size =
2051 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
2052 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
2053 4 + /* vcn_v2_0_enc_ring_emit_vm_flush */
2054 5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */
2055 1, /* vcn_v2_0_enc_ring_insert_end */
2056 .emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */
2057 .emit_ib = vcn_v2_0_enc_ring_emit_ib,
2058 .emit_fence = vcn_v2_0_enc_ring_emit_fence,
2059 .emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush,
2060 .test_ring = amdgpu_vcn_enc_ring_test_ring,
2061 .test_ib = amdgpu_vcn_enc_ring_test_ib,
2062 .insert_nop = amdgpu_ring_insert_nop,
2063 .insert_end = vcn_v2_0_enc_ring_insert_end,
2064 .pad_ib = amdgpu_ring_generic_pad_ib,
2065 .begin_use = amdgpu_vcn_ring_begin_use,
2066 .end_use = amdgpu_vcn_ring_end_use,
2067 .emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
2068 .emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
2069 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
2070};
2071
2072static void vcn_v2_0_set_dec_ring_funcs(struct amdgpu_device *adev)
2073{
2074 adev->vcn.inst->ring_dec.funcs = &vcn_v2_0_dec_ring_vm_funcs;
2075 DRM_INFO("VCN decode is enabled in VM mode\n");
2076}
2077
2078static void vcn_v2_0_set_enc_ring_funcs(struct amdgpu_device *adev)
2079{
2080 int i;
2081
2082 for (i = 0; i < adev->vcn.num_enc_rings; ++i)
2083 adev->vcn.inst->ring_enc[i].funcs = &vcn_v2_0_enc_ring_vm_funcs;
2084
2085 DRM_INFO("VCN encode is enabled in VM mode\n");
2086}
2087
2088static const struct amdgpu_irq_src_funcs vcn_v2_0_irq_funcs = {
2089 .set = vcn_v2_0_set_interrupt_state,
2090 .process = vcn_v2_0_process_interrupt,
2091};
2092
2093static void vcn_v2_0_set_irq_funcs(struct amdgpu_device *adev)
2094{
2095 adev->vcn.inst->irq.num_types = adev->vcn.num_enc_rings + 1;
2096 adev->vcn.inst->irq.funcs = &vcn_v2_0_irq_funcs;
2097}
2098
2099const struct amdgpu_ip_block_version vcn_v2_0_ip_block =
2100{
2101 .type = AMD_IP_BLOCK_TYPE_VCN,
2102 .major = 2,
2103 .minor = 0,
2104 .rev = 0,
2105 .funcs = &vcn_v2_0_ip_funcs,
2106};
1/*
2 * Copyright 2018 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/firmware.h>
25
26#include "amdgpu.h"
27#include "amdgpu_vcn.h"
28#include "soc15.h"
29#include "soc15d.h"
30#include "amdgpu_pm.h"
31#include "amdgpu_psp.h"
32#include "mmsch_v2_0.h"
33#include "vcn_v2_0.h"
34
35#include "vcn/vcn_2_0_0_offset.h"
36#include "vcn/vcn_2_0_0_sh_mask.h"
37#include "ivsrcid/vcn/irqsrcs_vcn_2_0.h"
38
39#define mmUVD_CONTEXT_ID_INTERNAL_OFFSET 0x1fd
40#define mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET 0x503
41#define mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET 0x504
42#define mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET 0x505
43#define mmUVD_NO_OP_INTERNAL_OFFSET 0x53f
44#define mmUVD_GP_SCRATCH8_INTERNAL_OFFSET 0x54a
45#define mmUVD_SCRATCH9_INTERNAL_OFFSET 0xc01d
46
47#define mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET 0x1e1
48#define mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x5a6
49#define mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET 0x5a7
50#define mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET 0x1e2
51
52static void vcn_v2_0_set_dec_ring_funcs(struct amdgpu_device *adev);
53static void vcn_v2_0_set_enc_ring_funcs(struct amdgpu_device *adev);
54static void vcn_v2_0_set_irq_funcs(struct amdgpu_device *adev);
55static int vcn_v2_0_set_powergating_state(void *handle,
56 enum amd_powergating_state state);
57static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev,
58 int inst_idx, struct dpg_pause_state *new_state);
59static int vcn_v2_0_start_sriov(struct amdgpu_device *adev);
60/**
61 * vcn_v2_0_early_init - set function pointers
62 *
63 * @handle: amdgpu_device pointer
64 *
65 * Set ring and irq function pointers
66 */
67static int vcn_v2_0_early_init(void *handle)
68{
69 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
70
71 adev->vcn.num_vcn_inst = 1;
72 if (amdgpu_sriov_vf(adev))
73 adev->vcn.num_enc_rings = 1;
74 else
75 adev->vcn.num_enc_rings = 2;
76
77 vcn_v2_0_set_dec_ring_funcs(adev);
78 vcn_v2_0_set_enc_ring_funcs(adev);
79 vcn_v2_0_set_irq_funcs(adev);
80
81 return 0;
82}
83
84/**
85 * vcn_v2_0_sw_init - sw init for VCN block
86 *
87 * @handle: amdgpu_device pointer
88 *
89 * Load firmware and sw initialization
90 */
91static int vcn_v2_0_sw_init(void *handle)
92{
93 struct amdgpu_ring *ring;
94 int i, r;
95 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
96 volatile struct amdgpu_fw_shared *fw_shared;
97
98 /* VCN DEC TRAP */
99 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
100 VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT,
101 &adev->vcn.inst->irq);
102 if (r)
103 return r;
104
105 /* VCN ENC TRAP */
106 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
107 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
108 i + VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE,
109 &adev->vcn.inst->irq);
110 if (r)
111 return r;
112 }
113
114 r = amdgpu_vcn_sw_init(adev);
115 if (r)
116 return r;
117
118 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
119 const struct common_firmware_header *hdr;
120 hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
121 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].ucode_id = AMDGPU_UCODE_ID_VCN;
122 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].fw = adev->vcn.fw;
123 adev->firmware.fw_size +=
124 ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
125 DRM_INFO("PSP loading VCN firmware\n");
126 }
127
128 r = amdgpu_vcn_resume(adev);
129 if (r)
130 return r;
131
132 ring = &adev->vcn.inst->ring_dec;
133
134 ring->use_doorbell = true;
135 ring->doorbell_index = adev->doorbell_index.vcn.vcn_ring0_1 << 1;
136
137 sprintf(ring->name, "vcn_dec");
138 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
139 AMDGPU_RING_PRIO_DEFAULT);
140 if (r)
141 return r;
142
143 adev->vcn.internal.context_id = mmUVD_CONTEXT_ID_INTERNAL_OFFSET;
144 adev->vcn.internal.ib_vmid = mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET;
145 adev->vcn.internal.ib_bar_low = mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET;
146 adev->vcn.internal.ib_bar_high = mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET;
147 adev->vcn.internal.ib_size = mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET;
148 adev->vcn.internal.gp_scratch8 = mmUVD_GP_SCRATCH8_INTERNAL_OFFSET;
149
150 adev->vcn.internal.scratch9 = mmUVD_SCRATCH9_INTERNAL_OFFSET;
151 adev->vcn.inst->external.scratch9 = SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9);
152 adev->vcn.internal.data0 = mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET;
153 adev->vcn.inst->external.data0 = SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0);
154 adev->vcn.internal.data1 = mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET;
155 adev->vcn.inst->external.data1 = SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1);
156 adev->vcn.internal.cmd = mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET;
157 adev->vcn.inst->external.cmd = SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD);
158 adev->vcn.internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET;
159 adev->vcn.inst->external.nop = SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP);
160
161 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
162 ring = &adev->vcn.inst->ring_enc[i];
163 ring->use_doorbell = true;
164 if (!amdgpu_sriov_vf(adev))
165 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + i;
166 else
167 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1 + i;
168 sprintf(ring->name, "vcn_enc%d", i);
169 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
170 AMDGPU_RING_PRIO_DEFAULT);
171 if (r)
172 return r;
173 }
174
175 adev->vcn.pause_dpg_mode = vcn_v2_0_pause_dpg_mode;
176
177 r = amdgpu_virt_alloc_mm_table(adev);
178 if (r)
179 return r;
180
181 fw_shared = adev->vcn.inst->fw_shared_cpu_addr;
182 fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_VCN_MULTI_QUEUE_FLAG);
183 return 0;
184}
185
186/**
187 * vcn_v2_0_sw_fini - sw fini for VCN block
188 *
189 * @handle: amdgpu_device pointer
190 *
191 * VCN suspend and free up sw allocation
192 */
193static int vcn_v2_0_sw_fini(void *handle)
194{
195 int r;
196 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
197 volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared_cpu_addr;
198
199 fw_shared->present_flag_0 = 0;
200
201 amdgpu_virt_free_mm_table(adev);
202
203 r = amdgpu_vcn_suspend(adev);
204 if (r)
205 return r;
206
207 r = amdgpu_vcn_sw_fini(adev);
208
209 return r;
210}
211
212/**
213 * vcn_v2_0_hw_init - start and test VCN block
214 *
215 * @handle: amdgpu_device pointer
216 *
217 * Initialize the hardware, boot up the VCPU and do some testing
218 */
219static int vcn_v2_0_hw_init(void *handle)
220{
221 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
222 struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
223 int i, r;
224
225 adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
226 ring->doorbell_index, 0);
227
228 if (amdgpu_sriov_vf(adev))
229 vcn_v2_0_start_sriov(adev);
230
231 r = amdgpu_ring_test_helper(ring);
232 if (r)
233 goto done;
234
235 //Disable vcn decode for sriov
236 if (amdgpu_sriov_vf(adev))
237 ring->sched.ready = false;
238
239 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
240 ring = &adev->vcn.inst->ring_enc[i];
241 r = amdgpu_ring_test_helper(ring);
242 if (r)
243 goto done;
244 }
245
246done:
247 if (!r)
248 DRM_INFO("VCN decode and encode initialized successfully(under %s).\n",
249 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)?"DPG Mode":"SPG Mode");
250
251 return r;
252}
253
254/**
255 * vcn_v2_0_hw_fini - stop the hardware block
256 *
257 * @handle: amdgpu_device pointer
258 *
259 * Stop the VCN block, mark ring as not ready any more
260 */
261static int vcn_v2_0_hw_fini(void *handle)
262{
263 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
264
265 if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
266 (adev->vcn.cur_state != AMD_PG_STATE_GATE &&
267 RREG32_SOC15(VCN, 0, mmUVD_STATUS)))
268 vcn_v2_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
269
270 return 0;
271}
272
273/**
274 * vcn_v2_0_suspend - suspend VCN block
275 *
276 * @handle: amdgpu_device pointer
277 *
278 * HW fini and suspend VCN block
279 */
280static int vcn_v2_0_suspend(void *handle)
281{
282 int r;
283 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
284
285 r = vcn_v2_0_hw_fini(adev);
286 if (r)
287 return r;
288
289 r = amdgpu_vcn_suspend(adev);
290
291 return r;
292}
293
294/**
295 * vcn_v2_0_resume - resume VCN block
296 *
297 * @handle: amdgpu_device pointer
298 *
299 * Resume firmware and hw init VCN block
300 */
301static int vcn_v2_0_resume(void *handle)
302{
303 int r;
304 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
305
306 r = amdgpu_vcn_resume(adev);
307 if (r)
308 return r;
309
310 r = vcn_v2_0_hw_init(adev);
311
312 return r;
313}
314
315/**
316 * vcn_v2_0_mc_resume - memory controller programming
317 *
318 * @adev: amdgpu_device pointer
319 *
320 * Let the VCN memory controller know it's offsets
321 */
322static void vcn_v2_0_mc_resume(struct amdgpu_device *adev)
323{
324 uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
325 uint32_t offset;
326
327 if (amdgpu_sriov_vf(adev))
328 return;
329
330 /* cache window 0: fw */
331 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
332 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
333 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo));
334 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
335 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi));
336 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, 0);
337 offset = 0;
338 } else {
339 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
340 lower_32_bits(adev->vcn.inst->gpu_addr));
341 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
342 upper_32_bits(adev->vcn.inst->gpu_addr));
343 offset = size;
344 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0,
345 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
346 }
347
348 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size);
349
350 /* cache window 1: stack */
351 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
352 lower_32_bits(adev->vcn.inst->gpu_addr + offset));
353 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
354 upper_32_bits(adev->vcn.inst->gpu_addr + offset));
355 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0);
356 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
357
358 /* cache window 2: context */
359 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
360 lower_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
361 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
362 upper_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
363 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0);
364 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
365
366 /* non-cache window */
367 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW,
368 lower_32_bits(adev->vcn.inst->fw_shared_gpu_addr));
369 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH,
370 upper_32_bits(adev->vcn.inst->fw_shared_gpu_addr));
371 WREG32_SOC15(UVD, 0, mmUVD_VCPU_NONCACHE_OFFSET0, 0);
372 WREG32_SOC15(UVD, 0, mmUVD_VCPU_NONCACHE_SIZE0,
373 AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)));
374
375 WREG32_SOC15(UVD, 0, mmUVD_GFX10_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
376}
377
378static void vcn_v2_0_mc_resume_dpg_mode(struct amdgpu_device *adev, bool indirect)
379{
380 uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
381 uint32_t offset;
382
383 /* cache window 0: fw */
384 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
385 if (!indirect) {
386 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
387 UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
388 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo), 0, indirect);
389 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
390 UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
391 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi), 0, indirect);
392 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
393 UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
394 } else {
395 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
396 UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect);
397 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
398 UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect);
399 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
400 UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
401 }
402 offset = 0;
403 } else {
404 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
405 UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
406 lower_32_bits(adev->vcn.inst->gpu_addr), 0, indirect);
407 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
408 UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
409 upper_32_bits(adev->vcn.inst->gpu_addr), 0, indirect);
410 offset = size;
411 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
412 UVD, 0, mmUVD_VCPU_CACHE_OFFSET0),
413 AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect);
414 }
415
416 if (!indirect)
417 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
418 UVD, 0, mmUVD_VCPU_CACHE_SIZE0), size, 0, indirect);
419 else
420 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
421 UVD, 0, mmUVD_VCPU_CACHE_SIZE0), 0, 0, indirect);
422
423 /* cache window 1: stack */
424 if (!indirect) {
425 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
426 UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
427 lower_32_bits(adev->vcn.inst->gpu_addr + offset), 0, indirect);
428 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
429 UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
430 upper_32_bits(adev->vcn.inst->gpu_addr + offset), 0, indirect);
431 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
432 UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
433 } else {
434 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
435 UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect);
436 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
437 UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect);
438 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
439 UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
440 }
441 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
442 UVD, 0, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect);
443
444 /* cache window 2: context */
445 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
446 UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
447 lower_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
448 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
449 UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
450 upper_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
451 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
452 UVD, 0, mmUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect);
453 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
454 UVD, 0, mmUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect);
455
456 /* non-cache window */
457 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
458 UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
459 lower_32_bits(adev->vcn.inst->fw_shared_gpu_addr), 0, indirect);
460 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
461 UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
462 upper_32_bits(adev->vcn.inst->fw_shared_gpu_addr), 0, indirect);
463 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
464 UVD, 0, mmUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
465 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
466 UVD, 0, mmUVD_VCPU_NONCACHE_SIZE0),
467 AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)), 0, indirect);
468
469 /* VCN global tiling registers */
470 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
471 UVD, 0, mmUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
472}
473
474/**
475 * vcn_v2_0_disable_clock_gating - disable VCN clock gating
476 *
477 * @adev: amdgpu_device pointer
478 * @sw: enable SW clock gating
479 *
480 * Disable clock gating for VCN block
481 */
482static void vcn_v2_0_disable_clock_gating(struct amdgpu_device *adev)
483{
484 uint32_t data;
485
486 if (amdgpu_sriov_vf(adev))
487 return;
488
489 /* UVD disable CGC */
490 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
491 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
492 data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
493 else
494 data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
495 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
496 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
497 WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
498
499 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_GATE);
500 data &= ~(UVD_CGC_GATE__SYS_MASK
501 | UVD_CGC_GATE__UDEC_MASK
502 | UVD_CGC_GATE__MPEG2_MASK
503 | UVD_CGC_GATE__REGS_MASK
504 | UVD_CGC_GATE__RBC_MASK
505 | UVD_CGC_GATE__LMI_MC_MASK
506 | UVD_CGC_GATE__LMI_UMC_MASK
507 | UVD_CGC_GATE__IDCT_MASK
508 | UVD_CGC_GATE__MPRD_MASK
509 | UVD_CGC_GATE__MPC_MASK
510 | UVD_CGC_GATE__LBSI_MASK
511 | UVD_CGC_GATE__LRBBM_MASK
512 | UVD_CGC_GATE__UDEC_RE_MASK
513 | UVD_CGC_GATE__UDEC_CM_MASK
514 | UVD_CGC_GATE__UDEC_IT_MASK
515 | UVD_CGC_GATE__UDEC_DB_MASK
516 | UVD_CGC_GATE__UDEC_MP_MASK
517 | UVD_CGC_GATE__WCB_MASK
518 | UVD_CGC_GATE__VCPU_MASK
519 | UVD_CGC_GATE__SCPU_MASK);
520 WREG32_SOC15(VCN, 0, mmUVD_CGC_GATE, data);
521
522 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
523 data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK
524 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
525 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
526 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
527 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
528 | UVD_CGC_CTRL__SYS_MODE_MASK
529 | UVD_CGC_CTRL__UDEC_MODE_MASK
530 | UVD_CGC_CTRL__MPEG2_MODE_MASK
531 | UVD_CGC_CTRL__REGS_MODE_MASK
532 | UVD_CGC_CTRL__RBC_MODE_MASK
533 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
534 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
535 | UVD_CGC_CTRL__IDCT_MODE_MASK
536 | UVD_CGC_CTRL__MPRD_MODE_MASK
537 | UVD_CGC_CTRL__MPC_MODE_MASK
538 | UVD_CGC_CTRL__LBSI_MODE_MASK
539 | UVD_CGC_CTRL__LRBBM_MODE_MASK
540 | UVD_CGC_CTRL__WCB_MODE_MASK
541 | UVD_CGC_CTRL__VCPU_MODE_MASK
542 | UVD_CGC_CTRL__SCPU_MODE_MASK);
543 WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
544
545 /* turn on */
546 data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_GATE);
547 data |= (UVD_SUVD_CGC_GATE__SRE_MASK
548 | UVD_SUVD_CGC_GATE__SIT_MASK
549 | UVD_SUVD_CGC_GATE__SMP_MASK
550 | UVD_SUVD_CGC_GATE__SCM_MASK
551 | UVD_SUVD_CGC_GATE__SDB_MASK
552 | UVD_SUVD_CGC_GATE__SRE_H264_MASK
553 | UVD_SUVD_CGC_GATE__SRE_HEVC_MASK
554 | UVD_SUVD_CGC_GATE__SIT_H264_MASK
555 | UVD_SUVD_CGC_GATE__SIT_HEVC_MASK
556 | UVD_SUVD_CGC_GATE__SCM_H264_MASK
557 | UVD_SUVD_CGC_GATE__SCM_HEVC_MASK
558 | UVD_SUVD_CGC_GATE__SDB_H264_MASK
559 | UVD_SUVD_CGC_GATE__SDB_HEVC_MASK
560 | UVD_SUVD_CGC_GATE__SCLR_MASK
561 | UVD_SUVD_CGC_GATE__UVD_SC_MASK
562 | UVD_SUVD_CGC_GATE__ENT_MASK
563 | UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK
564 | UVD_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK
565 | UVD_SUVD_CGC_GATE__SITE_MASK
566 | UVD_SUVD_CGC_GATE__SRE_VP9_MASK
567 | UVD_SUVD_CGC_GATE__SCM_VP9_MASK
568 | UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK
569 | UVD_SUVD_CGC_GATE__SDB_VP9_MASK
570 | UVD_SUVD_CGC_GATE__IME_HEVC_MASK);
571 WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_GATE, data);
572
573 data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL);
574 data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
575 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
576 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
577 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
578 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
579 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
580 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
581 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
582 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
583 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
584 WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL, data);
585}
586
587static void vcn_v2_0_clock_gating_dpg_mode(struct amdgpu_device *adev,
588 uint8_t sram_sel, uint8_t indirect)
589{
590 uint32_t reg_data = 0;
591
592 /* enable sw clock gating control */
593 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
594 reg_data = 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
595 else
596 reg_data = 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
597 reg_data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
598 reg_data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
599 reg_data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
600 UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
601 UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
602 UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
603 UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
604 UVD_CGC_CTRL__SYS_MODE_MASK |
605 UVD_CGC_CTRL__UDEC_MODE_MASK |
606 UVD_CGC_CTRL__MPEG2_MODE_MASK |
607 UVD_CGC_CTRL__REGS_MODE_MASK |
608 UVD_CGC_CTRL__RBC_MODE_MASK |
609 UVD_CGC_CTRL__LMI_MC_MODE_MASK |
610 UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
611 UVD_CGC_CTRL__IDCT_MODE_MASK |
612 UVD_CGC_CTRL__MPRD_MODE_MASK |
613 UVD_CGC_CTRL__MPC_MODE_MASK |
614 UVD_CGC_CTRL__LBSI_MODE_MASK |
615 UVD_CGC_CTRL__LRBBM_MODE_MASK |
616 UVD_CGC_CTRL__WCB_MODE_MASK |
617 UVD_CGC_CTRL__VCPU_MODE_MASK |
618 UVD_CGC_CTRL__SCPU_MODE_MASK);
619 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
620 UVD, 0, mmUVD_CGC_CTRL), reg_data, sram_sel, indirect);
621
622 /* turn off clock gating */
623 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
624 UVD, 0, mmUVD_CGC_GATE), 0, sram_sel, indirect);
625
626 /* turn on SUVD clock gating */
627 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
628 UVD, 0, mmUVD_SUVD_CGC_GATE), 1, sram_sel, indirect);
629
630 /* turn on sw mode in UVD_SUVD_CGC_CTRL */
631 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
632 UVD, 0, mmUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect);
633}
634
635/**
636 * vcn_v2_0_enable_clock_gating - enable VCN clock gating
637 *
638 * @adev: amdgpu_device pointer
639 * @sw: enable SW clock gating
640 *
641 * Enable clock gating for VCN block
642 */
643static void vcn_v2_0_enable_clock_gating(struct amdgpu_device *adev)
644{
645 uint32_t data = 0;
646
647 if (amdgpu_sriov_vf(adev))
648 return;
649
650 /* enable UVD CGC */
651 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
652 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
653 data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
654 else
655 data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
656 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
657 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
658 WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
659
660 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
661 data |= (UVD_CGC_CTRL__UDEC_RE_MODE_MASK
662 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
663 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
664 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
665 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
666 | UVD_CGC_CTRL__SYS_MODE_MASK
667 | UVD_CGC_CTRL__UDEC_MODE_MASK
668 | UVD_CGC_CTRL__MPEG2_MODE_MASK
669 | UVD_CGC_CTRL__REGS_MODE_MASK
670 | UVD_CGC_CTRL__RBC_MODE_MASK
671 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
672 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
673 | UVD_CGC_CTRL__IDCT_MODE_MASK
674 | UVD_CGC_CTRL__MPRD_MODE_MASK
675 | UVD_CGC_CTRL__MPC_MODE_MASK
676 | UVD_CGC_CTRL__LBSI_MODE_MASK
677 | UVD_CGC_CTRL__LRBBM_MODE_MASK
678 | UVD_CGC_CTRL__WCB_MODE_MASK
679 | UVD_CGC_CTRL__VCPU_MODE_MASK
680 | UVD_CGC_CTRL__SCPU_MODE_MASK);
681 WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
682
683 data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL);
684 data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
685 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
686 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
687 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
688 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
689 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
690 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
691 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
692 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
693 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
694 WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL, data);
695}
696
697static void vcn_v2_0_disable_static_power_gating(struct amdgpu_device *adev)
698{
699 uint32_t data = 0;
700
701 if (amdgpu_sriov_vf(adev))
702 return;
703
704 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
705 data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
706 | 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
707 | 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
708 | 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
709 | 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
710 | 2 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT
711 | 2 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT
712 | 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
713 | 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
714 | 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT);
715
716 WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data);
717 SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS,
718 UVD_PGFSM_STATUS__UVDM_UVDU_PWR_ON_2_0, 0xFFFFF);
719 } else {
720 data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
721 | 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
722 | 1 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
723 | 1 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
724 | 1 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
725 | 1 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT
726 | 1 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT
727 | 1 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
728 | 1 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
729 | 1 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT);
730 WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data);
731 SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, 0, 0xFFFFF);
732 }
733
734 /* polling UVD_PGFSM_STATUS to confirm UVDM_PWR_STATUS,
735 * UVDU_PWR_STATUS are 0 (power on) */
736
737 data = RREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS);
738 data &= ~0x103;
739 if (adev->pg_flags & AMD_PG_SUPPORT_VCN)
740 data |= UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON |
741 UVD_POWER_STATUS__UVD_PG_EN_MASK;
742
743 WREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS, data);
744}
745
746static void vcn_v2_0_enable_static_power_gating(struct amdgpu_device *adev)
747{
748 uint32_t data = 0;
749
750 if (amdgpu_sriov_vf(adev))
751 return;
752
753 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
754 /* Before power off, this indicator has to be turned on */
755 data = RREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS);
756 data &= ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK;
757 data |= UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF;
758 WREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS, data);
759
760
761 data = (2 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
762 | 2 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
763 | 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
764 | 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
765 | 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
766 | 2 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT
767 | 2 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT
768 | 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
769 | 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
770 | 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT);
771
772 WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data);
773
774 data = (2 << UVD_PGFSM_STATUS__UVDM_PWR_STATUS__SHIFT
775 | 2 << UVD_PGFSM_STATUS__UVDU_PWR_STATUS__SHIFT
776 | 2 << UVD_PGFSM_STATUS__UVDF_PWR_STATUS__SHIFT
777 | 2 << UVD_PGFSM_STATUS__UVDC_PWR_STATUS__SHIFT
778 | 2 << UVD_PGFSM_STATUS__UVDB_PWR_STATUS__SHIFT
779 | 2 << UVD_PGFSM_STATUS__UVDIL_PWR_STATUS__SHIFT
780 | 2 << UVD_PGFSM_STATUS__UVDIR_PWR_STATUS__SHIFT
781 | 2 << UVD_PGFSM_STATUS__UVDTD_PWR_STATUS__SHIFT
782 | 2 << UVD_PGFSM_STATUS__UVDTE_PWR_STATUS__SHIFT
783 | 2 << UVD_PGFSM_STATUS__UVDE_PWR_STATUS__SHIFT);
784 SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, data, 0xFFFFF);
785 }
786}
787
788static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect)
789{
790 volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared_cpu_addr;
791 struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
792 uint32_t rb_bufsz, tmp;
793
794 vcn_v2_0_enable_static_power_gating(adev);
795
796 /* enable dynamic power gating mode */
797 tmp = RREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS);
798 tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK;
799 tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK;
800 WREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS, tmp);
801
802 if (indirect)
803 adev->vcn.inst->dpg_sram_curr_addr = (uint32_t*)adev->vcn.inst->dpg_sram_cpu_addr;
804
805 /* enable clock gating */
806 vcn_v2_0_clock_gating_dpg_mode(adev, 0, indirect);
807
808 /* enable VCPU clock */
809 tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
810 tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
811 tmp |= UVD_VCPU_CNTL__MIF_WR_LOW_THRESHOLD_BP_MASK;
812 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
813 UVD, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect);
814
815 /* disable master interupt */
816 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
817 UVD, 0, mmUVD_MASTINT_EN), 0, 0, indirect);
818
819 /* setup mmUVD_LMI_CTRL */
820 tmp = (UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
821 UVD_LMI_CTRL__REQ_MODE_MASK |
822 UVD_LMI_CTRL__CRC_RESET_MASK |
823 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
824 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
825 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
826 (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
827 0x00100000L);
828 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
829 UVD, 0, mmUVD_LMI_CTRL), tmp, 0, indirect);
830
831 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
832 UVD, 0, mmUVD_MPC_CNTL),
833 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0, indirect);
834
835 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
836 UVD, 0, mmUVD_MPC_SET_MUXA0),
837 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
838 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
839 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
840 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0, indirect);
841
842 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
843 UVD, 0, mmUVD_MPC_SET_MUXB0),
844 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
845 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
846 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
847 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0, indirect);
848
849 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
850 UVD, 0, mmUVD_MPC_SET_MUX),
851 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
852 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
853 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect);
854
855 vcn_v2_0_mc_resume_dpg_mode(adev, indirect);
856
857 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
858 UVD, 0, mmUVD_REG_XX_MASK), 0x10, 0, indirect);
859 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
860 UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK), 0x3, 0, indirect);
861
862 /* release VCPU reset to boot */
863 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
864 UVD, 0, mmUVD_SOFT_RESET), 0, 0, indirect);
865
866 /* enable LMI MC and UMC channels */
867 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
868 UVD, 0, mmUVD_LMI_CTRL2),
869 0x1F << UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM__SHIFT, 0, indirect);
870
871 /* enable master interrupt */
872 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
873 UVD, 0, mmUVD_MASTINT_EN),
874 UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
875
876 if (indirect)
877 psp_update_vcn_sram(adev, 0, adev->vcn.inst->dpg_sram_gpu_addr,
878 (uint32_t)((uintptr_t)adev->vcn.inst->dpg_sram_curr_addr -
879 (uintptr_t)adev->vcn.inst->dpg_sram_cpu_addr));
880
881 /* force RBC into idle state */
882 rb_bufsz = order_base_2(ring->ring_size);
883 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
884 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
885 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
886 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
887 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
888 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp);
889
890 /* Stall DPG before WPTR/RPTR reset */
891 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS),
892 UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
893 ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
894 fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
895
896 /* set the write pointer delay */
897 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR_CNTL, 0);
898
899 /* set the wb address */
900 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR_ADDR,
901 (upper_32_bits(ring->gpu_addr) >> 2));
902
903 /* programm the RB_BASE for ring buffer */
904 WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
905 lower_32_bits(ring->gpu_addr));
906 WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
907 upper_32_bits(ring->gpu_addr));
908
909 /* Initialize the ring buffer's read and write pointers */
910 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR, 0);
911
912 WREG32_SOC15(UVD, 0, mmUVD_SCRATCH2, 0);
913
914 ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
915 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
916 lower_32_bits(ring->wptr));
917
918 fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET;
919 /* Unstall DPG */
920 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS),
921 0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
922 return 0;
923}
924
925static int vcn_v2_0_start(struct amdgpu_device *adev)
926{
927 volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared_cpu_addr;
928 struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
929 uint32_t rb_bufsz, tmp;
930 uint32_t lmi_swap_cntl;
931 int i, j, r;
932
933 if (adev->pm.dpm_enabled)
934 amdgpu_dpm_enable_uvd(adev, true);
935
936 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
937 return vcn_v2_0_start_dpg_mode(adev, adev->vcn.indirect_sram);
938
939 vcn_v2_0_disable_static_power_gating(adev);
940
941 /* set uvd status busy */
942 tmp = RREG32_SOC15(UVD, 0, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY;
943 WREG32_SOC15(UVD, 0, mmUVD_STATUS, tmp);
944
945 /*SW clock gating */
946 vcn_v2_0_disable_clock_gating(adev);
947
948 /* enable VCPU clock */
949 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL),
950 UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
951
952 /* disable master interrupt */
953 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN), 0,
954 ~UVD_MASTINT_EN__VCPU_EN_MASK);
955
956 /* setup mmUVD_LMI_CTRL */
957 tmp = RREG32_SOC15(UVD, 0, mmUVD_LMI_CTRL);
958 WREG32_SOC15(UVD, 0, mmUVD_LMI_CTRL, tmp |
959 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
960 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
961 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
962 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
963
964 /* setup mmUVD_MPC_CNTL */
965 tmp = RREG32_SOC15(UVD, 0, mmUVD_MPC_CNTL);
966 tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
967 tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
968 WREG32_SOC15(VCN, 0, mmUVD_MPC_CNTL, tmp);
969
970 /* setup UVD_MPC_SET_MUXA0 */
971 WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXA0,
972 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
973 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
974 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
975 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
976
977 /* setup UVD_MPC_SET_MUXB0 */
978 WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXB0,
979 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
980 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
981 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
982 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
983
984 /* setup mmUVD_MPC_SET_MUX */
985 WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUX,
986 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
987 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
988 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
989
990 vcn_v2_0_mc_resume(adev);
991
992 /* release VCPU reset to boot */
993 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0,
994 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
995
996 /* enable LMI MC and UMC channels */
997 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), 0,
998 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
999
1000 tmp = RREG32_SOC15(VCN, 0, mmUVD_SOFT_RESET);
1001 tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
1002 tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
1003 WREG32_SOC15(VCN, 0, mmUVD_SOFT_RESET, tmp);
1004
1005 /* disable byte swapping */
1006 lmi_swap_cntl = 0;
1007#ifdef __BIG_ENDIAN
1008 /* swap (8 in 32) RB and IB */
1009 lmi_swap_cntl = 0xa;
1010#endif
1011 WREG32_SOC15(UVD, 0, mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
1012
1013 for (i = 0; i < 10; ++i) {
1014 uint32_t status;
1015
1016 for (j = 0; j < 100; ++j) {
1017 status = RREG32_SOC15(UVD, 0, mmUVD_STATUS);
1018 if (status & 2)
1019 break;
1020 mdelay(10);
1021 }
1022 r = 0;
1023 if (status & 2)
1024 break;
1025
1026 DRM_ERROR("VCN decode not responding, trying to reset the VCPU!!!\n");
1027 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
1028 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
1029 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1030 mdelay(10);
1031 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0,
1032 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1033 mdelay(10);
1034 r = -1;
1035 }
1036
1037 if (r) {
1038 DRM_ERROR("VCN decode not responding, giving up!!!\n");
1039 return r;
1040 }
1041
1042 /* enable master interrupt */
1043 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN),
1044 UVD_MASTINT_EN__VCPU_EN_MASK,
1045 ~UVD_MASTINT_EN__VCPU_EN_MASK);
1046
1047 /* clear the busy bit of VCN_STATUS */
1048 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS), 0,
1049 ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
1050
1051 WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_VMID, 0);
1052
1053 /* force RBC into idle state */
1054 rb_bufsz = order_base_2(ring->ring_size);
1055 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
1056 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1057 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1058 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1059 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1060 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp);
1061
1062 fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
1063 /* programm the RB_BASE for ring buffer */
1064 WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
1065 lower_32_bits(ring->gpu_addr));
1066 WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
1067 upper_32_bits(ring->gpu_addr));
1068
1069 /* Initialize the ring buffer's read and write pointers */
1070 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR, 0);
1071
1072 ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
1073 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
1074 lower_32_bits(ring->wptr));
1075 fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET;
1076
1077 fw_shared->multi_queue.encode_generalpurpose_queue_mode |= FW_QUEUE_RING_RESET;
1078 ring = &adev->vcn.inst->ring_enc[0];
1079 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1080 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1081 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr);
1082 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1083 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4);
1084 fw_shared->multi_queue.encode_generalpurpose_queue_mode &= ~FW_QUEUE_RING_RESET;
1085
1086 fw_shared->multi_queue.encode_lowlatency_queue_mode |= FW_QUEUE_RING_RESET;
1087 ring = &adev->vcn.inst->ring_enc[1];
1088 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1089 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1090 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1091 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1092 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
1093 fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET;
1094
1095 return 0;
1096}
1097
1098static int vcn_v2_0_stop_dpg_mode(struct amdgpu_device *adev)
1099{
1100 uint32_t tmp;
1101
1102 /* Wait for power status to be 1 */
1103 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, 1,
1104 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1105
1106 /* wait for read ptr to be equal to write ptr */
1107 tmp = RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR);
1108 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF);
1109
1110 tmp = RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2);
1111 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF);
1112
1113 tmp = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF;
1114 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF);
1115
1116 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, 1,
1117 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1118
1119 /* disable dynamic power gating mode */
1120 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), 0,
1121 ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
1122
1123 return 0;
1124}
1125
1126static int vcn_v2_0_stop(struct amdgpu_device *adev)
1127{
1128 uint32_t tmp;
1129 int r;
1130
1131 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
1132 r = vcn_v2_0_stop_dpg_mode(adev);
1133 if (r)
1134 return r;
1135 goto power_off;
1136 }
1137
1138 /* wait for uvd idle */
1139 r = SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7);
1140 if (r)
1141 return r;
1142
1143 tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
1144 UVD_LMI_STATUS__READ_CLEAN_MASK |
1145 UVD_LMI_STATUS__WRITE_CLEAN_MASK |
1146 UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
1147 r = SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_LMI_STATUS, tmp, tmp);
1148 if (r)
1149 return r;
1150
1151 /* stall UMC channel */
1152 tmp = RREG32_SOC15(VCN, 0, mmUVD_LMI_CTRL2);
1153 tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
1154 WREG32_SOC15(VCN, 0, mmUVD_LMI_CTRL2, tmp);
1155
1156 tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK|
1157 UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
1158 r = SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_LMI_STATUS, tmp, tmp);
1159 if (r)
1160 return r;
1161
1162 /* disable VCPU clock */
1163 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL), 0,
1164 ~(UVD_VCPU_CNTL__CLK_EN_MASK));
1165
1166 /* reset LMI UMC */
1167 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
1168 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK,
1169 ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
1170
1171 /* reset LMI */
1172 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
1173 UVD_SOFT_RESET__LMI_SOFT_RESET_MASK,
1174 ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK);
1175
1176 /* reset VCPU */
1177 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
1178 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
1179 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1180
1181 /* clear status */
1182 WREG32_SOC15(VCN, 0, mmUVD_STATUS, 0);
1183
1184 vcn_v2_0_enable_clock_gating(adev);
1185 vcn_v2_0_enable_static_power_gating(adev);
1186
1187power_off:
1188 if (adev->pm.dpm_enabled)
1189 amdgpu_dpm_enable_uvd(adev, false);
1190
1191 return 0;
1192}
1193
1194static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev,
1195 int inst_idx, struct dpg_pause_state *new_state)
1196{
1197 struct amdgpu_ring *ring;
1198 uint32_t reg_data = 0;
1199 int ret_code;
1200
1201 /* pause/unpause if state is changed */
1202 if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) {
1203 DRM_DEBUG("dpg pause state changed %d -> %d",
1204 adev->vcn.inst[inst_idx].pause_state.fw_based, new_state->fw_based);
1205 reg_data = RREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE) &
1206 (~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1207
1208 if (new_state->fw_based == VCN_DPG_STATE__PAUSE) {
1209 ret_code = SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, 0x1,
1210 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1211
1212 if (!ret_code) {
1213 volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared_cpu_addr;
1214 /* pause DPG */
1215 reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1216 WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
1217
1218 /* wait for ACK */
1219 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_DPG_PAUSE,
1220 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
1221 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1222
1223 /* Stall DPG before WPTR/RPTR reset */
1224 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS),
1225 UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
1226 ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
1227 /* Restore */
1228 fw_shared->multi_queue.encode_generalpurpose_queue_mode |= FW_QUEUE_RING_RESET;
1229 ring = &adev->vcn.inst->ring_enc[0];
1230 ring->wptr = 0;
1231 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr);
1232 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1233 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4);
1234 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1235 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1236 fw_shared->multi_queue.encode_generalpurpose_queue_mode &= ~FW_QUEUE_RING_RESET;
1237
1238 fw_shared->multi_queue.encode_lowlatency_queue_mode |= FW_QUEUE_RING_RESET;
1239 ring = &adev->vcn.inst->ring_enc[1];
1240 ring->wptr = 0;
1241 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1242 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1243 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
1244 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1245 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1246 fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET;
1247
1248 fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
1249 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
1250 RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF);
1251 fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET;
1252 /* Unstall DPG */
1253 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS),
1254 0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
1255
1256 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
1257 UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON,
1258 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1259 }
1260 } else {
1261 /* unpause dpg, no need to wait */
1262 reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1263 WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
1264 }
1265 adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based;
1266 }
1267
1268 return 0;
1269}
1270
1271static bool vcn_v2_0_is_idle(void *handle)
1272{
1273 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1274
1275 return (RREG32_SOC15(VCN, 0, mmUVD_STATUS) == UVD_STATUS__IDLE);
1276}
1277
1278static int vcn_v2_0_wait_for_idle(void *handle)
1279{
1280 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1281 int ret;
1282
1283 ret = SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_STATUS, UVD_STATUS__IDLE,
1284 UVD_STATUS__IDLE);
1285
1286 return ret;
1287}
1288
1289static int vcn_v2_0_set_clockgating_state(void *handle,
1290 enum amd_clockgating_state state)
1291{
1292 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1293 bool enable = (state == AMD_CG_STATE_GATE);
1294
1295 if (amdgpu_sriov_vf(adev))
1296 return 0;
1297
1298 if (enable) {
1299 /* wait for STATUS to clear */
1300 if (!vcn_v2_0_is_idle(handle))
1301 return -EBUSY;
1302 vcn_v2_0_enable_clock_gating(adev);
1303 } else {
1304 /* disable HW gating and enable Sw gating */
1305 vcn_v2_0_disable_clock_gating(adev);
1306 }
1307 return 0;
1308}
1309
1310/**
1311 * vcn_v2_0_dec_ring_get_rptr - get read pointer
1312 *
1313 * @ring: amdgpu_ring pointer
1314 *
1315 * Returns the current hardware read pointer
1316 */
1317static uint64_t vcn_v2_0_dec_ring_get_rptr(struct amdgpu_ring *ring)
1318{
1319 struct amdgpu_device *adev = ring->adev;
1320
1321 return RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
1322}
1323
1324/**
1325 * vcn_v2_0_dec_ring_get_wptr - get write pointer
1326 *
1327 * @ring: amdgpu_ring pointer
1328 *
1329 * Returns the current hardware write pointer
1330 */
1331static uint64_t vcn_v2_0_dec_ring_get_wptr(struct amdgpu_ring *ring)
1332{
1333 struct amdgpu_device *adev = ring->adev;
1334
1335 if (ring->use_doorbell)
1336 return adev->wb.wb[ring->wptr_offs];
1337 else
1338 return RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR);
1339}
1340
1341/**
1342 * vcn_v2_0_dec_ring_set_wptr - set write pointer
1343 *
1344 * @ring: amdgpu_ring pointer
1345 *
1346 * Commits the write pointer to the hardware
1347 */
1348static void vcn_v2_0_dec_ring_set_wptr(struct amdgpu_ring *ring)
1349{
1350 struct amdgpu_device *adev = ring->adev;
1351
1352 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
1353 WREG32_SOC15(UVD, 0, mmUVD_SCRATCH2,
1354 lower_32_bits(ring->wptr) | 0x80000000);
1355
1356 if (ring->use_doorbell) {
1357 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
1358 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1359 } else {
1360 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
1361 }
1362}
1363
1364/**
1365 * vcn_v2_0_dec_ring_insert_start - insert a start command
1366 *
1367 * @ring: amdgpu_ring pointer
1368 *
1369 * Write a start command to the ring.
1370 */
1371void vcn_v2_0_dec_ring_insert_start(struct amdgpu_ring *ring)
1372{
1373 struct amdgpu_device *adev = ring->adev;
1374
1375 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data0, 0));
1376 amdgpu_ring_write(ring, 0);
1377 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0));
1378 amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_PACKET_START << 1));
1379}
1380
1381/**
1382 * vcn_v2_0_dec_ring_insert_end - insert a end command
1383 *
1384 * @ring: amdgpu_ring pointer
1385 *
1386 * Write a end command to the ring.
1387 */
1388void vcn_v2_0_dec_ring_insert_end(struct amdgpu_ring *ring)
1389{
1390 struct amdgpu_device *adev = ring->adev;
1391
1392 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0));
1393 amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_PACKET_END << 1));
1394}
1395
1396/**
1397 * vcn_v2_0_dec_ring_insert_nop - insert a nop command
1398 *
1399 * @ring: amdgpu_ring pointer
1400 *
1401 * Write a nop command to the ring.
1402 */
1403void vcn_v2_0_dec_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
1404{
1405 struct amdgpu_device *adev = ring->adev;
1406 int i;
1407
1408 WARN_ON(ring->wptr % 2 || count % 2);
1409
1410 for (i = 0; i < count / 2; i++) {
1411 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.nop, 0));
1412 amdgpu_ring_write(ring, 0);
1413 }
1414}
1415
1416/**
1417 * vcn_v2_0_dec_ring_emit_fence - emit an fence & trap command
1418 *
1419 * @ring: amdgpu_ring pointer
1420 * @fence: fence to emit
1421 *
1422 * Write a fence and a trap command to the ring.
1423 */
1424void vcn_v2_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
1425 unsigned flags)
1426{
1427 struct amdgpu_device *adev = ring->adev;
1428
1429 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1430 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.context_id, 0));
1431 amdgpu_ring_write(ring, seq);
1432
1433 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data0, 0));
1434 amdgpu_ring_write(ring, addr & 0xffffffff);
1435
1436 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data1, 0));
1437 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
1438
1439 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0));
1440 amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_FENCE << 1));
1441
1442 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data0, 0));
1443 amdgpu_ring_write(ring, 0);
1444
1445 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data1, 0));
1446 amdgpu_ring_write(ring, 0);
1447
1448 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0));
1449
1450 amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_TRAP << 1));
1451}
1452
1453/**
1454 * vcn_v2_0_dec_ring_emit_ib - execute indirect buffer
1455 *
1456 * @ring: amdgpu_ring pointer
1457 * @ib: indirect buffer to execute
1458 *
1459 * Write ring commands to execute the indirect buffer
1460 */
1461void vcn_v2_0_dec_ring_emit_ib(struct amdgpu_ring *ring,
1462 struct amdgpu_job *job,
1463 struct amdgpu_ib *ib,
1464 uint32_t flags)
1465{
1466 struct amdgpu_device *adev = ring->adev;
1467 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1468
1469 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.ib_vmid, 0));
1470 amdgpu_ring_write(ring, vmid);
1471
1472 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.ib_bar_low, 0));
1473 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1474 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.ib_bar_high, 0));
1475 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1476 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.ib_size, 0));
1477 amdgpu_ring_write(ring, ib->length_dw);
1478}
1479
1480void vcn_v2_0_dec_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
1481 uint32_t val, uint32_t mask)
1482{
1483 struct amdgpu_device *adev = ring->adev;
1484
1485 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data0, 0));
1486 amdgpu_ring_write(ring, reg << 2);
1487
1488 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data1, 0));
1489 amdgpu_ring_write(ring, val);
1490
1491 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.gp_scratch8, 0));
1492 amdgpu_ring_write(ring, mask);
1493
1494 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0));
1495
1496 amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_REG_READ_COND_WAIT << 1));
1497}
1498
1499void vcn_v2_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
1500 unsigned vmid, uint64_t pd_addr)
1501{
1502 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
1503 uint32_t data0, data1, mask;
1504
1505 pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1506
1507 /* wait for register write */
1508 data0 = hub->ctx0_ptb_addr_lo32 + vmid * hub->ctx_addr_distance;
1509 data1 = lower_32_bits(pd_addr);
1510 mask = 0xffffffff;
1511 vcn_v2_0_dec_ring_emit_reg_wait(ring, data0, data1, mask);
1512}
1513
1514void vcn_v2_0_dec_ring_emit_wreg(struct amdgpu_ring *ring,
1515 uint32_t reg, uint32_t val)
1516{
1517 struct amdgpu_device *adev = ring->adev;
1518
1519 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data0, 0));
1520 amdgpu_ring_write(ring, reg << 2);
1521
1522 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data1, 0));
1523 amdgpu_ring_write(ring, val);
1524
1525 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0));
1526
1527 amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_WRITE_REG << 1));
1528}
1529
1530/**
1531 * vcn_v2_0_enc_ring_get_rptr - get enc read pointer
1532 *
1533 * @ring: amdgpu_ring pointer
1534 *
1535 * Returns the current hardware enc read pointer
1536 */
1537static uint64_t vcn_v2_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
1538{
1539 struct amdgpu_device *adev = ring->adev;
1540
1541 if (ring == &adev->vcn.inst->ring_enc[0])
1542 return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR);
1543 else
1544 return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2);
1545}
1546
1547 /**
1548 * vcn_v2_0_enc_ring_get_wptr - get enc write pointer
1549 *
1550 * @ring: amdgpu_ring pointer
1551 *
1552 * Returns the current hardware enc write pointer
1553 */
1554static uint64_t vcn_v2_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
1555{
1556 struct amdgpu_device *adev = ring->adev;
1557
1558 if (ring == &adev->vcn.inst->ring_enc[0]) {
1559 if (ring->use_doorbell)
1560 return adev->wb.wb[ring->wptr_offs];
1561 else
1562 return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR);
1563 } else {
1564 if (ring->use_doorbell)
1565 return adev->wb.wb[ring->wptr_offs];
1566 else
1567 return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2);
1568 }
1569}
1570
1571 /**
1572 * vcn_v2_0_enc_ring_set_wptr - set enc write pointer
1573 *
1574 * @ring: amdgpu_ring pointer
1575 *
1576 * Commits the enc write pointer to the hardware
1577 */
1578static void vcn_v2_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
1579{
1580 struct amdgpu_device *adev = ring->adev;
1581
1582 if (ring == &adev->vcn.inst->ring_enc[0]) {
1583 if (ring->use_doorbell) {
1584 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
1585 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1586 } else {
1587 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1588 }
1589 } else {
1590 if (ring->use_doorbell) {
1591 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
1592 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1593 } else {
1594 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1595 }
1596 }
1597}
1598
1599/**
1600 * vcn_v2_0_enc_ring_emit_fence - emit an enc fence & trap command
1601 *
1602 * @ring: amdgpu_ring pointer
1603 * @fence: fence to emit
1604 *
1605 * Write enc a fence and a trap command to the ring.
1606 */
1607void vcn_v2_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
1608 u64 seq, unsigned flags)
1609{
1610 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1611
1612 amdgpu_ring_write(ring, VCN_ENC_CMD_FENCE);
1613 amdgpu_ring_write(ring, addr);
1614 amdgpu_ring_write(ring, upper_32_bits(addr));
1615 amdgpu_ring_write(ring, seq);
1616 amdgpu_ring_write(ring, VCN_ENC_CMD_TRAP);
1617}
1618
1619void vcn_v2_0_enc_ring_insert_end(struct amdgpu_ring *ring)
1620{
1621 amdgpu_ring_write(ring, VCN_ENC_CMD_END);
1622}
1623
1624/**
1625 * vcn_v2_0_enc_ring_emit_ib - enc execute indirect buffer
1626 *
1627 * @ring: amdgpu_ring pointer
1628 * @ib: indirect buffer to execute
1629 *
1630 * Write enc ring commands to execute the indirect buffer
1631 */
1632void vcn_v2_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
1633 struct amdgpu_job *job,
1634 struct amdgpu_ib *ib,
1635 uint32_t flags)
1636{
1637 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1638
1639 amdgpu_ring_write(ring, VCN_ENC_CMD_IB);
1640 amdgpu_ring_write(ring, vmid);
1641 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1642 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1643 amdgpu_ring_write(ring, ib->length_dw);
1644}
1645
1646void vcn_v2_0_enc_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
1647 uint32_t val, uint32_t mask)
1648{
1649 amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WAIT);
1650 amdgpu_ring_write(ring, reg << 2);
1651 amdgpu_ring_write(ring, mask);
1652 amdgpu_ring_write(ring, val);
1653}
1654
1655void vcn_v2_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
1656 unsigned int vmid, uint64_t pd_addr)
1657{
1658 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
1659
1660 pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1661
1662 /* wait for reg writes */
1663 vcn_v2_0_enc_ring_emit_reg_wait(ring, hub->ctx0_ptb_addr_lo32 +
1664 vmid * hub->ctx_addr_distance,
1665 lower_32_bits(pd_addr), 0xffffffff);
1666}
1667
1668void vcn_v2_0_enc_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val)
1669{
1670 amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WRITE);
1671 amdgpu_ring_write(ring, reg << 2);
1672 amdgpu_ring_write(ring, val);
1673}
1674
1675static int vcn_v2_0_set_interrupt_state(struct amdgpu_device *adev,
1676 struct amdgpu_irq_src *source,
1677 unsigned type,
1678 enum amdgpu_interrupt_state state)
1679{
1680 return 0;
1681}
1682
1683static int vcn_v2_0_process_interrupt(struct amdgpu_device *adev,
1684 struct amdgpu_irq_src *source,
1685 struct amdgpu_iv_entry *entry)
1686{
1687 DRM_DEBUG("IH: VCN TRAP\n");
1688
1689 switch (entry->src_id) {
1690 case VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT:
1691 amdgpu_fence_process(&adev->vcn.inst->ring_dec);
1692 break;
1693 case VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE:
1694 amdgpu_fence_process(&adev->vcn.inst->ring_enc[0]);
1695 break;
1696 case VCN_2_0__SRCID__UVD_ENC_LOW_LATENCY:
1697 amdgpu_fence_process(&adev->vcn.inst->ring_enc[1]);
1698 break;
1699 default:
1700 DRM_ERROR("Unhandled interrupt: %d %d\n",
1701 entry->src_id, entry->src_data[0]);
1702 break;
1703 }
1704
1705 return 0;
1706}
1707
1708int vcn_v2_0_dec_ring_test_ring(struct amdgpu_ring *ring)
1709{
1710 struct amdgpu_device *adev = ring->adev;
1711 uint32_t tmp = 0;
1712 unsigned i;
1713 int r;
1714
1715 if (amdgpu_sriov_vf(adev))
1716 return 0;
1717
1718 WREG32(adev->vcn.inst[ring->me].external.scratch9, 0xCAFEDEAD);
1719 r = amdgpu_ring_alloc(ring, 4);
1720 if (r)
1721 return r;
1722 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0));
1723 amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_PACKET_START << 1));
1724 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.scratch9, 0));
1725 amdgpu_ring_write(ring, 0xDEADBEEF);
1726 amdgpu_ring_commit(ring);
1727 for (i = 0; i < adev->usec_timeout; i++) {
1728 tmp = RREG32(adev->vcn.inst[ring->me].external.scratch9);
1729 if (tmp == 0xDEADBEEF)
1730 break;
1731 udelay(1);
1732 }
1733
1734 if (i >= adev->usec_timeout)
1735 r = -ETIMEDOUT;
1736
1737 return r;
1738}
1739
1740
1741static int vcn_v2_0_set_powergating_state(void *handle,
1742 enum amd_powergating_state state)
1743{
1744 /* This doesn't actually powergate the VCN block.
1745 * That's done in the dpm code via the SMC. This
1746 * just re-inits the block as necessary. The actual
1747 * gating still happens in the dpm code. We should
1748 * revisit this when there is a cleaner line between
1749 * the smc and the hw blocks
1750 */
1751 int ret;
1752 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1753
1754 if (amdgpu_sriov_vf(adev)) {
1755 adev->vcn.cur_state = AMD_PG_STATE_UNGATE;
1756 return 0;
1757 }
1758
1759 if (state == adev->vcn.cur_state)
1760 return 0;
1761
1762 if (state == AMD_PG_STATE_GATE)
1763 ret = vcn_v2_0_stop(adev);
1764 else
1765 ret = vcn_v2_0_start(adev);
1766
1767 if (!ret)
1768 adev->vcn.cur_state = state;
1769 return ret;
1770}
1771
1772static int vcn_v2_0_start_mmsch(struct amdgpu_device *adev,
1773 struct amdgpu_mm_table *table)
1774{
1775 uint32_t data = 0, loop;
1776 uint64_t addr = table->gpu_addr;
1777 struct mmsch_v2_0_init_header *header;
1778 uint32_t size;
1779 int i;
1780
1781 header = (struct mmsch_v2_0_init_header *)table->cpu_addr;
1782 size = header->header_size + header->vcn_table_size;
1783
1784 /* 1, write to vce_mmsch_vf_ctx_addr_lo/hi register with GPU mc addr
1785 * of memory descriptor location
1786 */
1787 WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_ADDR_LO, lower_32_bits(addr));
1788 WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_ADDR_HI, upper_32_bits(addr));
1789
1790 /* 2, update vmid of descriptor */
1791 data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_VMID);
1792 data &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK;
1793 /* use domain0 for MM scheduler */
1794 data |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT);
1795 WREG32_SOC15(UVD, 0, mmMMSCH_VF_VMID, data);
1796
1797 /* 3, notify mmsch about the size of this descriptor */
1798 WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_SIZE, size);
1799
1800 /* 4, set resp to zero */
1801 WREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP, 0);
1802
1803 adev->vcn.inst->ring_dec.wptr = 0;
1804 adev->vcn.inst->ring_dec.wptr_old = 0;
1805 vcn_v2_0_dec_ring_set_wptr(&adev->vcn.inst->ring_dec);
1806
1807 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
1808 adev->vcn.inst->ring_enc[i].wptr = 0;
1809 adev->vcn.inst->ring_enc[i].wptr_old = 0;
1810 vcn_v2_0_enc_ring_set_wptr(&adev->vcn.inst->ring_enc[i]);
1811 }
1812
1813 /* 5, kick off the initialization and wait until
1814 * VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero
1815 */
1816 WREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_HOST, 0x10000001);
1817
1818 data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP);
1819 loop = 1000;
1820 while ((data & 0x10000002) != 0x10000002) {
1821 udelay(10);
1822 data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP);
1823 loop--;
1824 if (!loop)
1825 break;
1826 }
1827
1828 if (!loop) {
1829 DRM_ERROR("failed to init MMSCH, " \
1830 "mmMMSCH_VF_MAILBOX_RESP = 0x%08x\n", data);
1831 return -EBUSY;
1832 }
1833
1834 return 0;
1835}
1836
1837static int vcn_v2_0_start_sriov(struct amdgpu_device *adev)
1838{
1839 int r;
1840 uint32_t tmp;
1841 struct amdgpu_ring *ring;
1842 uint32_t offset, size;
1843 uint32_t table_size = 0;
1844 struct mmsch_v2_0_cmd_direct_write direct_wt = { {0} };
1845 struct mmsch_v2_0_cmd_direct_read_modify_write direct_rd_mod_wt = { {0} };
1846 struct mmsch_v2_0_cmd_end end = { {0} };
1847 struct mmsch_v2_0_init_header *header;
1848 uint32_t *init_table = adev->virt.mm_table.cpu_addr;
1849 uint8_t i = 0;
1850
1851 header = (struct mmsch_v2_0_init_header *)init_table;
1852 direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE;
1853 direct_rd_mod_wt.cmd_header.command_type =
1854 MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
1855 end.cmd_header.command_type = MMSCH_COMMAND__END;
1856
1857 if (header->vcn_table_offset == 0 && header->vcn_table_size == 0) {
1858 header->version = MMSCH_VERSION;
1859 header->header_size = sizeof(struct mmsch_v2_0_init_header) >> 2;
1860
1861 header->vcn_table_offset = header->header_size;
1862
1863 init_table += header->vcn_table_offset;
1864
1865 size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
1866
1867 MMSCH_V2_0_INSERT_DIRECT_RD_MOD_WT(
1868 SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS),
1869 0xFFFFFFFF, 0x00000004);
1870
1871 /* mc resume*/
1872 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1873 tmp = AMDGPU_UCODE_ID_VCN;
1874 MMSCH_V2_0_INSERT_DIRECT_WT(
1875 SOC15_REG_OFFSET(UVD, i,
1876 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
1877 adev->firmware.ucode[tmp].tmr_mc_addr_lo);
1878 MMSCH_V2_0_INSERT_DIRECT_WT(
1879 SOC15_REG_OFFSET(UVD, i,
1880 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
1881 adev->firmware.ucode[tmp].tmr_mc_addr_hi);
1882 offset = 0;
1883 } else {
1884 MMSCH_V2_0_INSERT_DIRECT_WT(
1885 SOC15_REG_OFFSET(UVD, i,
1886 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
1887 lower_32_bits(adev->vcn.inst->gpu_addr));
1888 MMSCH_V2_0_INSERT_DIRECT_WT(
1889 SOC15_REG_OFFSET(UVD, i,
1890 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
1891 upper_32_bits(adev->vcn.inst->gpu_addr));
1892 offset = size;
1893 }
1894
1895 MMSCH_V2_0_INSERT_DIRECT_WT(
1896 SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET0),
1897 0);
1898 MMSCH_V2_0_INSERT_DIRECT_WT(
1899 SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE0),
1900 size);
1901
1902 MMSCH_V2_0_INSERT_DIRECT_WT(
1903 SOC15_REG_OFFSET(UVD, i,
1904 mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
1905 lower_32_bits(adev->vcn.inst->gpu_addr + offset));
1906 MMSCH_V2_0_INSERT_DIRECT_WT(
1907 SOC15_REG_OFFSET(UVD, i,
1908 mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
1909 upper_32_bits(adev->vcn.inst->gpu_addr + offset));
1910 MMSCH_V2_0_INSERT_DIRECT_WT(
1911 SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET1),
1912 0);
1913 MMSCH_V2_0_INSERT_DIRECT_WT(
1914 SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE1),
1915 AMDGPU_VCN_STACK_SIZE);
1916
1917 MMSCH_V2_0_INSERT_DIRECT_WT(
1918 SOC15_REG_OFFSET(UVD, i,
1919 mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
1920 lower_32_bits(adev->vcn.inst->gpu_addr + offset +
1921 AMDGPU_VCN_STACK_SIZE));
1922 MMSCH_V2_0_INSERT_DIRECT_WT(
1923 SOC15_REG_OFFSET(UVD, i,
1924 mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
1925 upper_32_bits(adev->vcn.inst->gpu_addr + offset +
1926 AMDGPU_VCN_STACK_SIZE));
1927 MMSCH_V2_0_INSERT_DIRECT_WT(
1928 SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET2),
1929 0);
1930 MMSCH_V2_0_INSERT_DIRECT_WT(
1931 SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE2),
1932 AMDGPU_VCN_CONTEXT_SIZE);
1933
1934 for (r = 0; r < adev->vcn.num_enc_rings; ++r) {
1935 ring = &adev->vcn.inst->ring_enc[r];
1936 ring->wptr = 0;
1937 MMSCH_V2_0_INSERT_DIRECT_WT(
1938 SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_LO),
1939 lower_32_bits(ring->gpu_addr));
1940 MMSCH_V2_0_INSERT_DIRECT_WT(
1941 SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_HI),
1942 upper_32_bits(ring->gpu_addr));
1943 MMSCH_V2_0_INSERT_DIRECT_WT(
1944 SOC15_REG_OFFSET(UVD, i, mmUVD_RB_SIZE),
1945 ring->ring_size / 4);
1946 }
1947
1948 ring = &adev->vcn.inst->ring_dec;
1949 ring->wptr = 0;
1950 MMSCH_V2_0_INSERT_DIRECT_WT(
1951 SOC15_REG_OFFSET(UVD, i,
1952 mmUVD_LMI_RBC_RB_64BIT_BAR_LOW),
1953 lower_32_bits(ring->gpu_addr));
1954 MMSCH_V2_0_INSERT_DIRECT_WT(
1955 SOC15_REG_OFFSET(UVD, i,
1956 mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH),
1957 upper_32_bits(ring->gpu_addr));
1958 /* force RBC into idle state */
1959 tmp = order_base_2(ring->ring_size);
1960 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, tmp);
1961 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1962 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1963 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1964 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1965 MMSCH_V2_0_INSERT_DIRECT_WT(
1966 SOC15_REG_OFFSET(UVD, i, mmUVD_RBC_RB_CNTL), tmp);
1967
1968 /* add end packet */
1969 tmp = sizeof(struct mmsch_v2_0_cmd_end);
1970 memcpy((void *)init_table, &end, tmp);
1971 table_size += (tmp / 4);
1972 header->vcn_table_size = table_size;
1973
1974 }
1975 return vcn_v2_0_start_mmsch(adev, &adev->virt.mm_table);
1976}
1977
1978static const struct amd_ip_funcs vcn_v2_0_ip_funcs = {
1979 .name = "vcn_v2_0",
1980 .early_init = vcn_v2_0_early_init,
1981 .late_init = NULL,
1982 .sw_init = vcn_v2_0_sw_init,
1983 .sw_fini = vcn_v2_0_sw_fini,
1984 .hw_init = vcn_v2_0_hw_init,
1985 .hw_fini = vcn_v2_0_hw_fini,
1986 .suspend = vcn_v2_0_suspend,
1987 .resume = vcn_v2_0_resume,
1988 .is_idle = vcn_v2_0_is_idle,
1989 .wait_for_idle = vcn_v2_0_wait_for_idle,
1990 .check_soft_reset = NULL,
1991 .pre_soft_reset = NULL,
1992 .soft_reset = NULL,
1993 .post_soft_reset = NULL,
1994 .set_clockgating_state = vcn_v2_0_set_clockgating_state,
1995 .set_powergating_state = vcn_v2_0_set_powergating_state,
1996};
1997
1998static const struct amdgpu_ring_funcs vcn_v2_0_dec_ring_vm_funcs = {
1999 .type = AMDGPU_RING_TYPE_VCN_DEC,
2000 .align_mask = 0xf,
2001 .vmhub = AMDGPU_MMHUB_0,
2002 .get_rptr = vcn_v2_0_dec_ring_get_rptr,
2003 .get_wptr = vcn_v2_0_dec_ring_get_wptr,
2004 .set_wptr = vcn_v2_0_dec_ring_set_wptr,
2005 .emit_frame_size =
2006 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
2007 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
2008 8 + /* vcn_v2_0_dec_ring_emit_vm_flush */
2009 14 + 14 + /* vcn_v2_0_dec_ring_emit_fence x2 vm fence */
2010 6,
2011 .emit_ib_size = 8, /* vcn_v2_0_dec_ring_emit_ib */
2012 .emit_ib = vcn_v2_0_dec_ring_emit_ib,
2013 .emit_fence = vcn_v2_0_dec_ring_emit_fence,
2014 .emit_vm_flush = vcn_v2_0_dec_ring_emit_vm_flush,
2015 .test_ring = vcn_v2_0_dec_ring_test_ring,
2016 .test_ib = amdgpu_vcn_dec_ring_test_ib,
2017 .insert_nop = vcn_v2_0_dec_ring_insert_nop,
2018 .insert_start = vcn_v2_0_dec_ring_insert_start,
2019 .insert_end = vcn_v2_0_dec_ring_insert_end,
2020 .pad_ib = amdgpu_ring_generic_pad_ib,
2021 .begin_use = amdgpu_vcn_ring_begin_use,
2022 .end_use = amdgpu_vcn_ring_end_use,
2023 .emit_wreg = vcn_v2_0_dec_ring_emit_wreg,
2024 .emit_reg_wait = vcn_v2_0_dec_ring_emit_reg_wait,
2025 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
2026};
2027
2028static const struct amdgpu_ring_funcs vcn_v2_0_enc_ring_vm_funcs = {
2029 .type = AMDGPU_RING_TYPE_VCN_ENC,
2030 .align_mask = 0x3f,
2031 .nop = VCN_ENC_CMD_NO_OP,
2032 .vmhub = AMDGPU_MMHUB_0,
2033 .get_rptr = vcn_v2_0_enc_ring_get_rptr,
2034 .get_wptr = vcn_v2_0_enc_ring_get_wptr,
2035 .set_wptr = vcn_v2_0_enc_ring_set_wptr,
2036 .emit_frame_size =
2037 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
2038 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
2039 4 + /* vcn_v2_0_enc_ring_emit_vm_flush */
2040 5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */
2041 1, /* vcn_v2_0_enc_ring_insert_end */
2042 .emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */
2043 .emit_ib = vcn_v2_0_enc_ring_emit_ib,
2044 .emit_fence = vcn_v2_0_enc_ring_emit_fence,
2045 .emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush,
2046 .test_ring = amdgpu_vcn_enc_ring_test_ring,
2047 .test_ib = amdgpu_vcn_enc_ring_test_ib,
2048 .insert_nop = amdgpu_ring_insert_nop,
2049 .insert_end = vcn_v2_0_enc_ring_insert_end,
2050 .pad_ib = amdgpu_ring_generic_pad_ib,
2051 .begin_use = amdgpu_vcn_ring_begin_use,
2052 .end_use = amdgpu_vcn_ring_end_use,
2053 .emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
2054 .emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
2055 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
2056};
2057
2058static void vcn_v2_0_set_dec_ring_funcs(struct amdgpu_device *adev)
2059{
2060 adev->vcn.inst->ring_dec.funcs = &vcn_v2_0_dec_ring_vm_funcs;
2061 DRM_INFO("VCN decode is enabled in VM mode\n");
2062}
2063
2064static void vcn_v2_0_set_enc_ring_funcs(struct amdgpu_device *adev)
2065{
2066 int i;
2067
2068 for (i = 0; i < adev->vcn.num_enc_rings; ++i)
2069 adev->vcn.inst->ring_enc[i].funcs = &vcn_v2_0_enc_ring_vm_funcs;
2070
2071 DRM_INFO("VCN encode is enabled in VM mode\n");
2072}
2073
2074static const struct amdgpu_irq_src_funcs vcn_v2_0_irq_funcs = {
2075 .set = vcn_v2_0_set_interrupt_state,
2076 .process = vcn_v2_0_process_interrupt,
2077};
2078
2079static void vcn_v2_0_set_irq_funcs(struct amdgpu_device *adev)
2080{
2081 adev->vcn.inst->irq.num_types = adev->vcn.num_enc_rings + 1;
2082 adev->vcn.inst->irq.funcs = &vcn_v2_0_irq_funcs;
2083}
2084
2085const struct amdgpu_ip_block_version vcn_v2_0_ip_block =
2086{
2087 .type = AMD_IP_BLOCK_TYPE_VCN,
2088 .major = 2,
2089 .minor = 0,
2090 .rev = 0,
2091 .funcs = &vcn_v2_0_ip_funcs,
2092};