Loading...
1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26
27#include <linux/firmware.h>
28#include <linux/module.h>
29#include <linux/dmi.h>
30#include <linux/pci.h>
31#include <linux/debugfs.h>
32#include <drm/drm_drv.h>
33
34#include "amdgpu.h"
35#include "amdgpu_pm.h"
36#include "amdgpu_vcn.h"
37#include "soc15d.h"
38
39/* Firmware Names */
40#define FIRMWARE_RAVEN "amdgpu/raven_vcn.bin"
41#define FIRMWARE_PICASSO "amdgpu/picasso_vcn.bin"
42#define FIRMWARE_RAVEN2 "amdgpu/raven2_vcn.bin"
43#define FIRMWARE_ARCTURUS "amdgpu/arcturus_vcn.bin"
44#define FIRMWARE_RENOIR "amdgpu/renoir_vcn.bin"
45#define FIRMWARE_GREEN_SARDINE "amdgpu/green_sardine_vcn.bin"
46#define FIRMWARE_NAVI10 "amdgpu/navi10_vcn.bin"
47#define FIRMWARE_NAVI14 "amdgpu/navi14_vcn.bin"
48#define FIRMWARE_NAVI12 "amdgpu/navi12_vcn.bin"
49#define FIRMWARE_SIENNA_CICHLID "amdgpu/sienna_cichlid_vcn.bin"
50#define FIRMWARE_NAVY_FLOUNDER "amdgpu/navy_flounder_vcn.bin"
51#define FIRMWARE_VANGOGH "amdgpu/vangogh_vcn.bin"
52#define FIRMWARE_DIMGREY_CAVEFISH "amdgpu/dimgrey_cavefish_vcn.bin"
53#define FIRMWARE_ALDEBARAN "amdgpu/aldebaran_vcn.bin"
54#define FIRMWARE_BEIGE_GOBY "amdgpu/beige_goby_vcn.bin"
55#define FIRMWARE_YELLOW_CARP "amdgpu/yellow_carp_vcn.bin"
56#define FIRMWARE_VCN_3_1_2 "amdgpu/vcn_3_1_2.bin"
57#define FIRMWARE_VCN4_0_0 "amdgpu/vcn_4_0_0.bin"
58#define FIRMWARE_VCN4_0_2 "amdgpu/vcn_4_0_2.bin"
59#define FIRMWARE_VCN4_0_3 "amdgpu/vcn_4_0_3.bin"
60#define FIRMWARE_VCN4_0_4 "amdgpu/vcn_4_0_4.bin"
61#define FIRMWARE_VCN4_0_5 "amdgpu/vcn_4_0_5.bin"
62
63MODULE_FIRMWARE(FIRMWARE_RAVEN);
64MODULE_FIRMWARE(FIRMWARE_PICASSO);
65MODULE_FIRMWARE(FIRMWARE_RAVEN2);
66MODULE_FIRMWARE(FIRMWARE_ARCTURUS);
67MODULE_FIRMWARE(FIRMWARE_RENOIR);
68MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE);
69MODULE_FIRMWARE(FIRMWARE_ALDEBARAN);
70MODULE_FIRMWARE(FIRMWARE_NAVI10);
71MODULE_FIRMWARE(FIRMWARE_NAVI14);
72MODULE_FIRMWARE(FIRMWARE_NAVI12);
73MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID);
74MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER);
75MODULE_FIRMWARE(FIRMWARE_VANGOGH);
76MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH);
77MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY);
78MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP);
79MODULE_FIRMWARE(FIRMWARE_VCN_3_1_2);
80MODULE_FIRMWARE(FIRMWARE_VCN4_0_0);
81MODULE_FIRMWARE(FIRMWARE_VCN4_0_2);
82MODULE_FIRMWARE(FIRMWARE_VCN4_0_3);
83MODULE_FIRMWARE(FIRMWARE_VCN4_0_4);
84MODULE_FIRMWARE(FIRMWARE_VCN4_0_5);
85
86static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
87
88int amdgpu_vcn_early_init(struct amdgpu_device *adev)
89{
90 char ucode_prefix[30];
91 char fw_name[40];
92 int r;
93
94 amdgpu_ucode_ip_version_decode(adev, UVD_HWIP, ucode_prefix, sizeof(ucode_prefix));
95 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", ucode_prefix);
96 r = amdgpu_ucode_request(adev, &adev->vcn.fw, fw_name);
97 if (r)
98 amdgpu_ucode_release(&adev->vcn.fw);
99
100 return r;
101}
102
103int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
104{
105 unsigned long bo_size;
106 const struct common_firmware_header *hdr;
107 unsigned char fw_check;
108 unsigned int fw_shared_size, log_offset;
109 int i, r;
110
111 INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler);
112 mutex_init(&adev->vcn.vcn_pg_lock);
113 mutex_init(&adev->vcn.vcn1_jpeg1_workaround);
114 atomic_set(&adev->vcn.total_submission_cnt, 0);
115 for (i = 0; i < adev->vcn.num_vcn_inst; i++)
116 atomic_set(&adev->vcn.inst[i].dpg_enc_submission_cnt, 0);
117
118 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
119 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
120 adev->vcn.indirect_sram = true;
121
122 /*
123 * Some Steam Deck's BIOS versions are incompatible with the
124 * indirect SRAM mode, leading to amdgpu being unable to get
125 * properly probed (and even potentially crashing the kernel).
126 * Hence, check for these versions here - notice this is
127 * restricted to Vangogh (Deck's APU).
128 */
129 if (amdgpu_ip_version(adev, UVD_HWIP, 0) == IP_VERSION(3, 0, 2)) {
130 const char *bios_ver = dmi_get_system_info(DMI_BIOS_VERSION);
131
132 if (bios_ver && (!strncmp("F7A0113", bios_ver, 7) ||
133 !strncmp("F7A0114", bios_ver, 7))) {
134 adev->vcn.indirect_sram = false;
135 dev_info(adev->dev,
136 "Steam Deck quirk: indirect SRAM disabled on BIOS %s\n", bios_ver);
137 }
138 }
139
140 hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
141 adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version);
142
143 /* Bit 20-23, it is encode major and non-zero for new naming convention.
144 * This field is part of version minor and DRM_DISABLED_FLAG in old naming
145 * convention. Since the l:wq!atest version minor is 0x5B and DRM_DISABLED_FLAG
146 * is zero in old naming convention, this field is always zero so far.
147 * These four bits are used to tell which naming convention is present.
148 */
149 fw_check = (le32_to_cpu(hdr->ucode_version) >> 20) & 0xf;
150 if (fw_check) {
151 unsigned int dec_ver, enc_major, enc_minor, vep, fw_rev;
152
153 fw_rev = le32_to_cpu(hdr->ucode_version) & 0xfff;
154 enc_minor = (le32_to_cpu(hdr->ucode_version) >> 12) & 0xff;
155 enc_major = fw_check;
156 dec_ver = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xf;
157 vep = (le32_to_cpu(hdr->ucode_version) >> 28) & 0xf;
158 DRM_INFO("Found VCN firmware Version ENC: %u.%u DEC: %u VEP: %u Revision: %u\n",
159 enc_major, enc_minor, dec_ver, vep, fw_rev);
160 } else {
161 unsigned int version_major, version_minor, family_id;
162
163 family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
164 version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
165 version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
166 DRM_INFO("Found VCN firmware Version: %u.%u Family ID: %u\n",
167 version_major, version_minor, family_id);
168 }
169
170 bo_size = AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_CONTEXT_SIZE;
171 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
172 bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
173
174 if (amdgpu_ip_version(adev, UVD_HWIP, 0) >= IP_VERSION(4, 0, 0)) {
175 fw_shared_size = AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared));
176 log_offset = offsetof(struct amdgpu_vcn4_fw_shared, fw_log);
177 } else {
178 fw_shared_size = AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared));
179 log_offset = offsetof(struct amdgpu_fw_shared, fw_log);
180 }
181
182 bo_size += fw_shared_size;
183
184 if (amdgpu_vcnfw_log)
185 bo_size += AMDGPU_VCNFW_LOG_SIZE;
186
187 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
188 if (adev->vcn.harvest_config & (1 << i))
189 continue;
190
191 r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
192 AMDGPU_GEM_DOMAIN_VRAM |
193 AMDGPU_GEM_DOMAIN_GTT,
194 &adev->vcn.inst[i].vcpu_bo,
195 &adev->vcn.inst[i].gpu_addr,
196 &adev->vcn.inst[i].cpu_addr);
197 if (r) {
198 dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", r);
199 return r;
200 }
201
202 adev->vcn.inst[i].fw_shared.cpu_addr = adev->vcn.inst[i].cpu_addr +
203 bo_size - fw_shared_size;
204 adev->vcn.inst[i].fw_shared.gpu_addr = adev->vcn.inst[i].gpu_addr +
205 bo_size - fw_shared_size;
206
207 adev->vcn.inst[i].fw_shared.mem_size = fw_shared_size;
208
209 if (amdgpu_vcnfw_log) {
210 adev->vcn.inst[i].fw_shared.cpu_addr -= AMDGPU_VCNFW_LOG_SIZE;
211 adev->vcn.inst[i].fw_shared.gpu_addr -= AMDGPU_VCNFW_LOG_SIZE;
212 adev->vcn.inst[i].fw_shared.log_offset = log_offset;
213 }
214
215 if (adev->vcn.indirect_sram) {
216 r = amdgpu_bo_create_kernel(adev, 64 * 2 * 4, PAGE_SIZE,
217 AMDGPU_GEM_DOMAIN_VRAM |
218 AMDGPU_GEM_DOMAIN_GTT,
219 &adev->vcn.inst[i].dpg_sram_bo,
220 &adev->vcn.inst[i].dpg_sram_gpu_addr,
221 &adev->vcn.inst[i].dpg_sram_cpu_addr);
222 if (r) {
223 dev_err(adev->dev, "VCN %d (%d) failed to allocate DPG bo\n", i, r);
224 return r;
225 }
226 }
227 }
228
229 return 0;
230}
231
232int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
233{
234 int i, j;
235
236 for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
237 if (adev->vcn.harvest_config & (1 << j))
238 continue;
239
240 amdgpu_bo_free_kernel(
241 &adev->vcn.inst[j].dpg_sram_bo,
242 &adev->vcn.inst[j].dpg_sram_gpu_addr,
243 (void **)&adev->vcn.inst[j].dpg_sram_cpu_addr);
244
245 kvfree(adev->vcn.inst[j].saved_bo);
246
247 amdgpu_bo_free_kernel(&adev->vcn.inst[j].vcpu_bo,
248 &adev->vcn.inst[j].gpu_addr,
249 (void **)&adev->vcn.inst[j].cpu_addr);
250
251 amdgpu_ring_fini(&adev->vcn.inst[j].ring_dec);
252
253 for (i = 0; i < adev->vcn.num_enc_rings; ++i)
254 amdgpu_ring_fini(&adev->vcn.inst[j].ring_enc[i]);
255 }
256
257 amdgpu_ucode_release(&adev->vcn.fw);
258 mutex_destroy(&adev->vcn.vcn1_jpeg1_workaround);
259 mutex_destroy(&adev->vcn.vcn_pg_lock);
260
261 return 0;
262}
263
264/* from vcn4 and above, only unified queue is used */
265static bool amdgpu_vcn_using_unified_queue(struct amdgpu_ring *ring)
266{
267 struct amdgpu_device *adev = ring->adev;
268 bool ret = false;
269
270 if (amdgpu_ip_version(adev, UVD_HWIP, 0) >= IP_VERSION(4, 0, 0))
271 ret = true;
272
273 return ret;
274}
275
276bool amdgpu_vcn_is_disabled_vcn(struct amdgpu_device *adev, enum vcn_ring_type type, uint32_t vcn_instance)
277{
278 bool ret = false;
279 int vcn_config = adev->vcn.vcn_config[vcn_instance];
280
281 if ((type == VCN_ENCODE_RING) && (vcn_config & VCN_BLOCK_ENCODE_DISABLE_MASK))
282 ret = true;
283 else if ((type == VCN_DECODE_RING) && (vcn_config & VCN_BLOCK_DECODE_DISABLE_MASK))
284 ret = true;
285 else if ((type == VCN_UNIFIED_RING) && (vcn_config & VCN_BLOCK_QUEUE_DISABLE_MASK))
286 ret = true;
287
288 return ret;
289}
290
291int amdgpu_vcn_suspend(struct amdgpu_device *adev)
292{
293 unsigned int size;
294 void *ptr;
295 int i, idx;
296
297 bool in_ras_intr = amdgpu_ras_intr_triggered();
298
299 cancel_delayed_work_sync(&adev->vcn.idle_work);
300
301 /* err_event_athub will corrupt VCPU buffer, so we need to
302 * restore fw data and clear buffer in amdgpu_vcn_resume() */
303 if (in_ras_intr)
304 return 0;
305
306 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
307 if (adev->vcn.harvest_config & (1 << i))
308 continue;
309 if (adev->vcn.inst[i].vcpu_bo == NULL)
310 return 0;
311
312 size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo);
313 ptr = adev->vcn.inst[i].cpu_addr;
314
315 adev->vcn.inst[i].saved_bo = kvmalloc(size, GFP_KERNEL);
316 if (!adev->vcn.inst[i].saved_bo)
317 return -ENOMEM;
318
319 if (drm_dev_enter(adev_to_drm(adev), &idx)) {
320 memcpy_fromio(adev->vcn.inst[i].saved_bo, ptr, size);
321 drm_dev_exit(idx);
322 }
323 }
324 return 0;
325}
326
327int amdgpu_vcn_resume(struct amdgpu_device *adev)
328{
329 unsigned int size;
330 void *ptr;
331 int i, idx;
332
333 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
334 if (adev->vcn.harvest_config & (1 << i))
335 continue;
336 if (adev->vcn.inst[i].vcpu_bo == NULL)
337 return -EINVAL;
338
339 size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo);
340 ptr = adev->vcn.inst[i].cpu_addr;
341
342 if (adev->vcn.inst[i].saved_bo != NULL) {
343 if (drm_dev_enter(adev_to_drm(adev), &idx)) {
344 memcpy_toio(ptr, adev->vcn.inst[i].saved_bo, size);
345 drm_dev_exit(idx);
346 }
347 kvfree(adev->vcn.inst[i].saved_bo);
348 adev->vcn.inst[i].saved_bo = NULL;
349 } else {
350 const struct common_firmware_header *hdr;
351 unsigned int offset;
352
353 hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
354 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
355 offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
356 if (drm_dev_enter(adev_to_drm(adev), &idx)) {
357 memcpy_toio(adev->vcn.inst[i].cpu_addr, adev->vcn.fw->data + offset,
358 le32_to_cpu(hdr->ucode_size_bytes));
359 drm_dev_exit(idx);
360 }
361 size -= le32_to_cpu(hdr->ucode_size_bytes);
362 ptr += le32_to_cpu(hdr->ucode_size_bytes);
363 }
364 memset_io(ptr, 0, size);
365 }
366 }
367 return 0;
368}
369
370static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
371{
372 struct amdgpu_device *adev =
373 container_of(work, struct amdgpu_device, vcn.idle_work.work);
374 unsigned int fences = 0, fence[AMDGPU_MAX_VCN_INSTANCES] = {0};
375 unsigned int i, j;
376 int r = 0;
377
378 for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
379 if (adev->vcn.harvest_config & (1 << j))
380 continue;
381
382 for (i = 0; i < adev->vcn.num_enc_rings; ++i)
383 fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_enc[i]);
384
385 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
386 struct dpg_pause_state new_state;
387
388 if (fence[j] ||
389 unlikely(atomic_read(&adev->vcn.inst[j].dpg_enc_submission_cnt)))
390 new_state.fw_based = VCN_DPG_STATE__PAUSE;
391 else
392 new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
393
394 adev->vcn.pause_dpg_mode(adev, j, &new_state);
395 }
396
397 fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_dec);
398 fences += fence[j];
399 }
400
401 if (!fences && !atomic_read(&adev->vcn.total_submission_cnt)) {
402 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
403 AMD_PG_STATE_GATE);
404 r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO,
405 false);
406 if (r)
407 dev_warn(adev->dev, "(%d) failed to disable video power profile mode\n", r);
408 } else {
409 schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
410 }
411}
412
413void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
414{
415 struct amdgpu_device *adev = ring->adev;
416 int r = 0;
417
418 atomic_inc(&adev->vcn.total_submission_cnt);
419
420 if (!cancel_delayed_work_sync(&adev->vcn.idle_work)) {
421 r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO,
422 true);
423 if (r)
424 dev_warn(adev->dev, "(%d) failed to switch to video power profile mode\n", r);
425 }
426
427 mutex_lock(&adev->vcn.vcn_pg_lock);
428 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
429 AMD_PG_STATE_UNGATE);
430
431 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
432 struct dpg_pause_state new_state;
433
434 if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC) {
435 atomic_inc(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt);
436 new_state.fw_based = VCN_DPG_STATE__PAUSE;
437 } else {
438 unsigned int fences = 0;
439 unsigned int i;
440
441 for (i = 0; i < adev->vcn.num_enc_rings; ++i)
442 fences += amdgpu_fence_count_emitted(&adev->vcn.inst[ring->me].ring_enc[i]);
443
444 if (fences || atomic_read(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt))
445 new_state.fw_based = VCN_DPG_STATE__PAUSE;
446 else
447 new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
448 }
449
450 adev->vcn.pause_dpg_mode(adev, ring->me, &new_state);
451 }
452 mutex_unlock(&adev->vcn.vcn_pg_lock);
453}
454
455void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring)
456{
457 if (ring->adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
458 ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
459 atomic_dec(&ring->adev->vcn.inst[ring->me].dpg_enc_submission_cnt);
460
461 atomic_dec(&ring->adev->vcn.total_submission_cnt);
462
463 schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
464}
465
466int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
467{
468 struct amdgpu_device *adev = ring->adev;
469 uint32_t tmp = 0;
470 unsigned int i;
471 int r;
472
473 /* VCN in SRIOV does not support direct register read/write */
474 if (amdgpu_sriov_vf(adev))
475 return 0;
476
477 WREG32(adev->vcn.inst[ring->me].external.scratch9, 0xCAFEDEAD);
478 r = amdgpu_ring_alloc(ring, 3);
479 if (r)
480 return r;
481 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.scratch9, 0));
482 amdgpu_ring_write(ring, 0xDEADBEEF);
483 amdgpu_ring_commit(ring);
484 for (i = 0; i < adev->usec_timeout; i++) {
485 tmp = RREG32(adev->vcn.inst[ring->me].external.scratch9);
486 if (tmp == 0xDEADBEEF)
487 break;
488 udelay(1);
489 }
490
491 if (i >= adev->usec_timeout)
492 r = -ETIMEDOUT;
493
494 return r;
495}
496
497int amdgpu_vcn_dec_sw_ring_test_ring(struct amdgpu_ring *ring)
498{
499 struct amdgpu_device *adev = ring->adev;
500 uint32_t rptr;
501 unsigned int i;
502 int r;
503
504 if (amdgpu_sriov_vf(adev))
505 return 0;
506
507 r = amdgpu_ring_alloc(ring, 16);
508 if (r)
509 return r;
510
511 rptr = amdgpu_ring_get_rptr(ring);
512
513 amdgpu_ring_write(ring, VCN_DEC_SW_CMD_END);
514 amdgpu_ring_commit(ring);
515
516 for (i = 0; i < adev->usec_timeout; i++) {
517 if (amdgpu_ring_get_rptr(ring) != rptr)
518 break;
519 udelay(1);
520 }
521
522 if (i >= adev->usec_timeout)
523 r = -ETIMEDOUT;
524
525 return r;
526}
527
528static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
529 struct amdgpu_ib *ib_msg,
530 struct dma_fence **fence)
531{
532 u64 addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
533 struct amdgpu_device *adev = ring->adev;
534 struct dma_fence *f = NULL;
535 struct amdgpu_job *job;
536 struct amdgpu_ib *ib;
537 int i, r;
538
539 r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
540 64, AMDGPU_IB_POOL_DIRECT,
541 &job);
542 if (r)
543 goto err;
544
545 ib = &job->ibs[0];
546 ib->ptr[0] = PACKET0(adev->vcn.internal.data0, 0);
547 ib->ptr[1] = addr;
548 ib->ptr[2] = PACKET0(adev->vcn.internal.data1, 0);
549 ib->ptr[3] = addr >> 32;
550 ib->ptr[4] = PACKET0(adev->vcn.internal.cmd, 0);
551 ib->ptr[5] = 0;
552 for (i = 6; i < 16; i += 2) {
553 ib->ptr[i] = PACKET0(adev->vcn.internal.nop, 0);
554 ib->ptr[i+1] = 0;
555 }
556 ib->length_dw = 16;
557
558 r = amdgpu_job_submit_direct(job, ring, &f);
559 if (r)
560 goto err_free;
561
562 amdgpu_ib_free(adev, ib_msg, f);
563
564 if (fence)
565 *fence = dma_fence_get(f);
566 dma_fence_put(f);
567
568 return 0;
569
570err_free:
571 amdgpu_job_free(job);
572err:
573 amdgpu_ib_free(adev, ib_msg, f);
574 return r;
575}
576
577static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
578 struct amdgpu_ib *ib)
579{
580 struct amdgpu_device *adev = ring->adev;
581 uint32_t *msg;
582 int r, i;
583
584 memset(ib, 0, sizeof(*ib));
585 r = amdgpu_ib_get(adev, NULL, AMDGPU_GPU_PAGE_SIZE * 2,
586 AMDGPU_IB_POOL_DIRECT,
587 ib);
588 if (r)
589 return r;
590
591 msg = (uint32_t *)AMDGPU_GPU_PAGE_ALIGN((unsigned long)ib->ptr);
592 msg[0] = cpu_to_le32(0x00000028);
593 msg[1] = cpu_to_le32(0x00000038);
594 msg[2] = cpu_to_le32(0x00000001);
595 msg[3] = cpu_to_le32(0x00000000);
596 msg[4] = cpu_to_le32(handle);
597 msg[5] = cpu_to_le32(0x00000000);
598 msg[6] = cpu_to_le32(0x00000001);
599 msg[7] = cpu_to_le32(0x00000028);
600 msg[8] = cpu_to_le32(0x00000010);
601 msg[9] = cpu_to_le32(0x00000000);
602 msg[10] = cpu_to_le32(0x00000007);
603 msg[11] = cpu_to_le32(0x00000000);
604 msg[12] = cpu_to_le32(0x00000780);
605 msg[13] = cpu_to_le32(0x00000440);
606 for (i = 14; i < 1024; ++i)
607 msg[i] = cpu_to_le32(0x0);
608
609 return 0;
610}
611
612static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
613 struct amdgpu_ib *ib)
614{
615 struct amdgpu_device *adev = ring->adev;
616 uint32_t *msg;
617 int r, i;
618
619 memset(ib, 0, sizeof(*ib));
620 r = amdgpu_ib_get(adev, NULL, AMDGPU_GPU_PAGE_SIZE * 2,
621 AMDGPU_IB_POOL_DIRECT,
622 ib);
623 if (r)
624 return r;
625
626 msg = (uint32_t *)AMDGPU_GPU_PAGE_ALIGN((unsigned long)ib->ptr);
627 msg[0] = cpu_to_le32(0x00000028);
628 msg[1] = cpu_to_le32(0x00000018);
629 msg[2] = cpu_to_le32(0x00000000);
630 msg[3] = cpu_to_le32(0x00000002);
631 msg[4] = cpu_to_le32(handle);
632 msg[5] = cpu_to_le32(0x00000000);
633 for (i = 6; i < 1024; ++i)
634 msg[i] = cpu_to_le32(0x0);
635
636 return 0;
637}
638
639int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
640{
641 struct dma_fence *fence = NULL;
642 struct amdgpu_ib ib;
643 long r;
644
645 r = amdgpu_vcn_dec_get_create_msg(ring, 1, &ib);
646 if (r)
647 goto error;
648
649 r = amdgpu_vcn_dec_send_msg(ring, &ib, NULL);
650 if (r)
651 goto error;
652 r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &ib);
653 if (r)
654 goto error;
655
656 r = amdgpu_vcn_dec_send_msg(ring, &ib, &fence);
657 if (r)
658 goto error;
659
660 r = dma_fence_wait_timeout(fence, false, timeout);
661 if (r == 0)
662 r = -ETIMEDOUT;
663 else if (r > 0)
664 r = 0;
665
666 dma_fence_put(fence);
667error:
668 return r;
669}
670
671static uint32_t *amdgpu_vcn_unified_ring_ib_header(struct amdgpu_ib *ib,
672 uint32_t ib_pack_in_dw, bool enc)
673{
674 uint32_t *ib_checksum;
675
676 ib->ptr[ib->length_dw++] = 0x00000010; /* single queue checksum */
677 ib->ptr[ib->length_dw++] = 0x30000002;
678 ib_checksum = &ib->ptr[ib->length_dw++];
679 ib->ptr[ib->length_dw++] = ib_pack_in_dw;
680
681 ib->ptr[ib->length_dw++] = 0x00000010; /* engine info */
682 ib->ptr[ib->length_dw++] = 0x30000001;
683 ib->ptr[ib->length_dw++] = enc ? 0x2 : 0x3;
684 ib->ptr[ib->length_dw++] = ib_pack_in_dw * sizeof(uint32_t);
685
686 return ib_checksum;
687}
688
689static void amdgpu_vcn_unified_ring_ib_checksum(uint32_t **ib_checksum,
690 uint32_t ib_pack_in_dw)
691{
692 uint32_t i;
693 uint32_t checksum = 0;
694
695 for (i = 0; i < ib_pack_in_dw; i++)
696 checksum += *(*ib_checksum + 2 + i);
697
698 **ib_checksum = checksum;
699}
700
701static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
702 struct amdgpu_ib *ib_msg,
703 struct dma_fence **fence)
704{
705 struct amdgpu_vcn_decode_buffer *decode_buffer = NULL;
706 unsigned int ib_size_dw = 64;
707 struct amdgpu_device *adev = ring->adev;
708 struct dma_fence *f = NULL;
709 struct amdgpu_job *job;
710 struct amdgpu_ib *ib;
711 uint64_t addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
712 bool sq = amdgpu_vcn_using_unified_queue(ring);
713 uint32_t *ib_checksum;
714 uint32_t ib_pack_in_dw;
715 int i, r;
716
717 if (sq)
718 ib_size_dw += 8;
719
720 r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
721 ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
722 &job);
723 if (r)
724 goto err;
725
726 ib = &job->ibs[0];
727 ib->length_dw = 0;
728
729 /* single queue headers */
730 if (sq) {
731 ib_pack_in_dw = sizeof(struct amdgpu_vcn_decode_buffer) / sizeof(uint32_t)
732 + 4 + 2; /* engine info + decoding ib in dw */
733 ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, ib_pack_in_dw, false);
734 }
735
736 ib->ptr[ib->length_dw++] = sizeof(struct amdgpu_vcn_decode_buffer) + 8;
737 ib->ptr[ib->length_dw++] = cpu_to_le32(AMDGPU_VCN_IB_FLAG_DECODE_BUFFER);
738 decode_buffer = (struct amdgpu_vcn_decode_buffer *)&(ib->ptr[ib->length_dw]);
739 ib->length_dw += sizeof(struct amdgpu_vcn_decode_buffer) / 4;
740 memset(decode_buffer, 0, sizeof(struct amdgpu_vcn_decode_buffer));
741
742 decode_buffer->valid_buf_flag |= cpu_to_le32(AMDGPU_VCN_CMD_FLAG_MSG_BUFFER);
743 decode_buffer->msg_buffer_address_hi = cpu_to_le32(addr >> 32);
744 decode_buffer->msg_buffer_address_lo = cpu_to_le32(addr);
745
746 for (i = ib->length_dw; i < ib_size_dw; ++i)
747 ib->ptr[i] = 0x0;
748
749 if (sq)
750 amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, ib_pack_in_dw);
751
752 r = amdgpu_job_submit_direct(job, ring, &f);
753 if (r)
754 goto err_free;
755
756 amdgpu_ib_free(adev, ib_msg, f);
757
758 if (fence)
759 *fence = dma_fence_get(f);
760 dma_fence_put(f);
761
762 return 0;
763
764err_free:
765 amdgpu_job_free(job);
766err:
767 amdgpu_ib_free(adev, ib_msg, f);
768 return r;
769}
770
771int amdgpu_vcn_dec_sw_ring_test_ib(struct amdgpu_ring *ring, long timeout)
772{
773 struct dma_fence *fence = NULL;
774 struct amdgpu_ib ib;
775 long r;
776
777 r = amdgpu_vcn_dec_get_create_msg(ring, 1, &ib);
778 if (r)
779 goto error;
780
781 r = amdgpu_vcn_dec_sw_send_msg(ring, &ib, NULL);
782 if (r)
783 goto error;
784 r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &ib);
785 if (r)
786 goto error;
787
788 r = amdgpu_vcn_dec_sw_send_msg(ring, &ib, &fence);
789 if (r)
790 goto error;
791
792 r = dma_fence_wait_timeout(fence, false, timeout);
793 if (r == 0)
794 r = -ETIMEDOUT;
795 else if (r > 0)
796 r = 0;
797
798 dma_fence_put(fence);
799error:
800 return r;
801}
802
803int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
804{
805 struct amdgpu_device *adev = ring->adev;
806 uint32_t rptr;
807 unsigned int i;
808 int r;
809
810 if (amdgpu_sriov_vf(adev))
811 return 0;
812
813 r = amdgpu_ring_alloc(ring, 16);
814 if (r)
815 return r;
816
817 rptr = amdgpu_ring_get_rptr(ring);
818
819 amdgpu_ring_write(ring, VCN_ENC_CMD_END);
820 amdgpu_ring_commit(ring);
821
822 for (i = 0; i < adev->usec_timeout; i++) {
823 if (amdgpu_ring_get_rptr(ring) != rptr)
824 break;
825 udelay(1);
826 }
827
828 if (i >= adev->usec_timeout)
829 r = -ETIMEDOUT;
830
831 return r;
832}
833
834static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
835 struct amdgpu_ib *ib_msg,
836 struct dma_fence **fence)
837{
838 unsigned int ib_size_dw = 16;
839 struct amdgpu_job *job;
840 struct amdgpu_ib *ib;
841 struct dma_fence *f = NULL;
842 uint32_t *ib_checksum = NULL;
843 uint64_t addr;
844 bool sq = amdgpu_vcn_using_unified_queue(ring);
845 int i, r;
846
847 if (sq)
848 ib_size_dw += 8;
849
850 r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
851 ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
852 &job);
853 if (r)
854 return r;
855
856 ib = &job->ibs[0];
857 addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
858
859 ib->length_dw = 0;
860
861 if (sq)
862 ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, 0x11, true);
863
864 ib->ptr[ib->length_dw++] = 0x00000018;
865 ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
866 ib->ptr[ib->length_dw++] = handle;
867 ib->ptr[ib->length_dw++] = upper_32_bits(addr);
868 ib->ptr[ib->length_dw++] = addr;
869 ib->ptr[ib->length_dw++] = 0x0000000b;
870
871 ib->ptr[ib->length_dw++] = 0x00000014;
872 ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
873 ib->ptr[ib->length_dw++] = 0x0000001c;
874 ib->ptr[ib->length_dw++] = 0x00000000;
875 ib->ptr[ib->length_dw++] = 0x00000000;
876
877 ib->ptr[ib->length_dw++] = 0x00000008;
878 ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */
879
880 for (i = ib->length_dw; i < ib_size_dw; ++i)
881 ib->ptr[i] = 0x0;
882
883 if (sq)
884 amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, 0x11);
885
886 r = amdgpu_job_submit_direct(job, ring, &f);
887 if (r)
888 goto err;
889
890 if (fence)
891 *fence = dma_fence_get(f);
892 dma_fence_put(f);
893
894 return 0;
895
896err:
897 amdgpu_job_free(job);
898 return r;
899}
900
901static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
902 struct amdgpu_ib *ib_msg,
903 struct dma_fence **fence)
904{
905 unsigned int ib_size_dw = 16;
906 struct amdgpu_job *job;
907 struct amdgpu_ib *ib;
908 struct dma_fence *f = NULL;
909 uint32_t *ib_checksum = NULL;
910 uint64_t addr;
911 bool sq = amdgpu_vcn_using_unified_queue(ring);
912 int i, r;
913
914 if (sq)
915 ib_size_dw += 8;
916
917 r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
918 ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
919 &job);
920 if (r)
921 return r;
922
923 ib = &job->ibs[0];
924 addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
925
926 ib->length_dw = 0;
927
928 if (sq)
929 ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, 0x11, true);
930
931 ib->ptr[ib->length_dw++] = 0x00000018;
932 ib->ptr[ib->length_dw++] = 0x00000001;
933 ib->ptr[ib->length_dw++] = handle;
934 ib->ptr[ib->length_dw++] = upper_32_bits(addr);
935 ib->ptr[ib->length_dw++] = addr;
936 ib->ptr[ib->length_dw++] = 0x0000000b;
937
938 ib->ptr[ib->length_dw++] = 0x00000014;
939 ib->ptr[ib->length_dw++] = 0x00000002;
940 ib->ptr[ib->length_dw++] = 0x0000001c;
941 ib->ptr[ib->length_dw++] = 0x00000000;
942 ib->ptr[ib->length_dw++] = 0x00000000;
943
944 ib->ptr[ib->length_dw++] = 0x00000008;
945 ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */
946
947 for (i = ib->length_dw; i < ib_size_dw; ++i)
948 ib->ptr[i] = 0x0;
949
950 if (sq)
951 amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, 0x11);
952
953 r = amdgpu_job_submit_direct(job, ring, &f);
954 if (r)
955 goto err;
956
957 if (fence)
958 *fence = dma_fence_get(f);
959 dma_fence_put(f);
960
961 return 0;
962
963err:
964 amdgpu_job_free(job);
965 return r;
966}
967
968int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
969{
970 struct amdgpu_device *adev = ring->adev;
971 struct dma_fence *fence = NULL;
972 struct amdgpu_ib ib;
973 long r;
974
975 memset(&ib, 0, sizeof(ib));
976 r = amdgpu_ib_get(adev, NULL, (128 << 10) + AMDGPU_GPU_PAGE_SIZE,
977 AMDGPU_IB_POOL_DIRECT,
978 &ib);
979 if (r)
980 return r;
981
982 r = amdgpu_vcn_enc_get_create_msg(ring, 1, &ib, NULL);
983 if (r)
984 goto error;
985
986 r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, &ib, &fence);
987 if (r)
988 goto error;
989
990 r = dma_fence_wait_timeout(fence, false, timeout);
991 if (r == 0)
992 r = -ETIMEDOUT;
993 else if (r > 0)
994 r = 0;
995
996error:
997 amdgpu_ib_free(adev, &ib, fence);
998 dma_fence_put(fence);
999
1000 return r;
1001}
1002
1003int amdgpu_vcn_unified_ring_test_ib(struct amdgpu_ring *ring, long timeout)
1004{
1005 struct amdgpu_device *adev = ring->adev;
1006 long r;
1007
1008 if (amdgpu_ip_version(adev, UVD_HWIP, 0) != IP_VERSION(4, 0, 3)) {
1009 r = amdgpu_vcn_enc_ring_test_ib(ring, timeout);
1010 if (r)
1011 goto error;
1012 }
1013
1014 r = amdgpu_vcn_dec_sw_ring_test_ib(ring, timeout);
1015
1016error:
1017 return r;
1018}
1019
1020enum amdgpu_ring_priority_level amdgpu_vcn_get_enc_ring_prio(int ring)
1021{
1022 switch (ring) {
1023 case 0:
1024 return AMDGPU_RING_PRIO_0;
1025 case 1:
1026 return AMDGPU_RING_PRIO_1;
1027 case 2:
1028 return AMDGPU_RING_PRIO_2;
1029 default:
1030 return AMDGPU_RING_PRIO_0;
1031 }
1032}
1033
1034void amdgpu_vcn_setup_ucode(struct amdgpu_device *adev)
1035{
1036 int i;
1037 unsigned int idx;
1038
1039 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1040 const struct common_firmware_header *hdr;
1041
1042 hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
1043
1044 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
1045 if (adev->vcn.harvest_config & (1 << i))
1046 continue;
1047 /* currently only support 2 FW instances */
1048 if (i >= 2) {
1049 dev_info(adev->dev, "More then 2 VCN FW instances!\n");
1050 break;
1051 }
1052 idx = AMDGPU_UCODE_ID_VCN + i;
1053 adev->firmware.ucode[idx].ucode_id = idx;
1054 adev->firmware.ucode[idx].fw = adev->vcn.fw;
1055 adev->firmware.fw_size +=
1056 ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
1057
1058 if (amdgpu_ip_version(adev, UVD_HWIP, 0) ==
1059 IP_VERSION(4, 0, 3))
1060 break;
1061 }
1062 dev_info(adev->dev, "Will use PSP to load VCN firmware\n");
1063 }
1064}
1065
1066/*
1067 * debugfs for mapping vcn firmware log buffer.
1068 */
1069#if defined(CONFIG_DEBUG_FS)
1070static ssize_t amdgpu_debugfs_vcn_fwlog_read(struct file *f, char __user *buf,
1071 size_t size, loff_t *pos)
1072{
1073 struct amdgpu_vcn_inst *vcn;
1074 void *log_buf;
1075 volatile struct amdgpu_vcn_fwlog *plog;
1076 unsigned int read_pos, write_pos, available, i, read_bytes = 0;
1077 unsigned int read_num[2] = {0};
1078
1079 vcn = file_inode(f)->i_private;
1080 if (!vcn)
1081 return -ENODEV;
1082
1083 if (!vcn->fw_shared.cpu_addr || !amdgpu_vcnfw_log)
1084 return -EFAULT;
1085
1086 log_buf = vcn->fw_shared.cpu_addr + vcn->fw_shared.mem_size;
1087
1088 plog = (volatile struct amdgpu_vcn_fwlog *)log_buf;
1089 read_pos = plog->rptr;
1090 write_pos = plog->wptr;
1091
1092 if (read_pos > AMDGPU_VCNFW_LOG_SIZE || write_pos > AMDGPU_VCNFW_LOG_SIZE)
1093 return -EFAULT;
1094
1095 if (!size || (read_pos == write_pos))
1096 return 0;
1097
1098 if (write_pos > read_pos) {
1099 available = write_pos - read_pos;
1100 read_num[0] = min_t(size_t, size, available);
1101 } else {
1102 read_num[0] = AMDGPU_VCNFW_LOG_SIZE - read_pos;
1103 available = read_num[0] + write_pos - plog->header_size;
1104 if (size > available)
1105 read_num[1] = write_pos - plog->header_size;
1106 else if (size > read_num[0])
1107 read_num[1] = size - read_num[0];
1108 else
1109 read_num[0] = size;
1110 }
1111
1112 for (i = 0; i < 2; i++) {
1113 if (read_num[i]) {
1114 if (read_pos == AMDGPU_VCNFW_LOG_SIZE)
1115 read_pos = plog->header_size;
1116 if (read_num[i] == copy_to_user((buf + read_bytes),
1117 (log_buf + read_pos), read_num[i]))
1118 return -EFAULT;
1119
1120 read_bytes += read_num[i];
1121 read_pos += read_num[i];
1122 }
1123 }
1124
1125 plog->rptr = read_pos;
1126 *pos += read_bytes;
1127 return read_bytes;
1128}
1129
1130static const struct file_operations amdgpu_debugfs_vcnfwlog_fops = {
1131 .owner = THIS_MODULE,
1132 .read = amdgpu_debugfs_vcn_fwlog_read,
1133 .llseek = default_llseek
1134};
1135#endif
1136
1137void amdgpu_debugfs_vcn_fwlog_init(struct amdgpu_device *adev, uint8_t i,
1138 struct amdgpu_vcn_inst *vcn)
1139{
1140#if defined(CONFIG_DEBUG_FS)
1141 struct drm_minor *minor = adev_to_drm(adev)->primary;
1142 struct dentry *root = minor->debugfs_root;
1143 char name[32];
1144
1145 sprintf(name, "amdgpu_vcn_%d_fwlog", i);
1146 debugfs_create_file_size(name, S_IFREG | 0444, root, vcn,
1147 &amdgpu_debugfs_vcnfwlog_fops,
1148 AMDGPU_VCNFW_LOG_SIZE);
1149#endif
1150}
1151
1152void amdgpu_vcn_fwlog_init(struct amdgpu_vcn_inst *vcn)
1153{
1154#if defined(CONFIG_DEBUG_FS)
1155 volatile uint32_t *flag = vcn->fw_shared.cpu_addr;
1156 void *fw_log_cpu_addr = vcn->fw_shared.cpu_addr + vcn->fw_shared.mem_size;
1157 uint64_t fw_log_gpu_addr = vcn->fw_shared.gpu_addr + vcn->fw_shared.mem_size;
1158 volatile struct amdgpu_vcn_fwlog *log_buf = fw_log_cpu_addr;
1159 volatile struct amdgpu_fw_shared_fw_logging *fw_log = vcn->fw_shared.cpu_addr
1160 + vcn->fw_shared.log_offset;
1161 *flag |= cpu_to_le32(AMDGPU_VCN_FW_LOGGING_FLAG);
1162 fw_log->is_enabled = 1;
1163 fw_log->addr_lo = cpu_to_le32(fw_log_gpu_addr & 0xFFFFFFFF);
1164 fw_log->addr_hi = cpu_to_le32(fw_log_gpu_addr >> 32);
1165 fw_log->size = cpu_to_le32(AMDGPU_VCNFW_LOG_SIZE);
1166
1167 log_buf->header_size = sizeof(struct amdgpu_vcn_fwlog);
1168 log_buf->buffer_size = AMDGPU_VCNFW_LOG_SIZE;
1169 log_buf->rptr = log_buf->header_size;
1170 log_buf->wptr = log_buf->header_size;
1171 log_buf->wrapped = 0;
1172#endif
1173}
1174
1175int amdgpu_vcn_process_poison_irq(struct amdgpu_device *adev,
1176 struct amdgpu_irq_src *source,
1177 struct amdgpu_iv_entry *entry)
1178{
1179 struct ras_common_if *ras_if = adev->vcn.ras_if;
1180 struct ras_dispatch_if ih_data = {
1181 .entry = entry,
1182 };
1183
1184 if (!ras_if)
1185 return 0;
1186
1187 if (!amdgpu_sriov_vf(adev)) {
1188 ih_data.head = *ras_if;
1189 amdgpu_ras_interrupt_dispatch(adev, &ih_data);
1190 } else {
1191 if (adev->virt.ops && adev->virt.ops->ras_poison_handler)
1192 adev->virt.ops->ras_poison_handler(adev);
1193 else
1194 dev_warn(adev->dev,
1195 "No ras_poison_handler interface in SRIOV for VCN!\n");
1196 }
1197
1198 return 0;
1199}
1200
1201int amdgpu_vcn_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
1202{
1203 int r, i;
1204
1205 r = amdgpu_ras_block_late_init(adev, ras_block);
1206 if (r)
1207 return r;
1208
1209 if (amdgpu_ras_is_supported(adev, ras_block->block)) {
1210 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
1211 if (adev->vcn.harvest_config & (1 << i) ||
1212 !adev->vcn.inst[i].ras_poison_irq.funcs)
1213 continue;
1214
1215 r = amdgpu_irq_get(adev, &adev->vcn.inst[i].ras_poison_irq, 0);
1216 if (r)
1217 goto late_fini;
1218 }
1219 }
1220 return 0;
1221
1222late_fini:
1223 amdgpu_ras_block_late_fini(adev, ras_block);
1224 return r;
1225}
1226
1227int amdgpu_vcn_ras_sw_init(struct amdgpu_device *adev)
1228{
1229 int err;
1230 struct amdgpu_vcn_ras *ras;
1231
1232 if (!adev->vcn.ras)
1233 return 0;
1234
1235 ras = adev->vcn.ras;
1236 err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
1237 if (err) {
1238 dev_err(adev->dev, "Failed to register vcn ras block!\n");
1239 return err;
1240 }
1241
1242 strcpy(ras->ras_block.ras_comm.name, "vcn");
1243 ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__VCN;
1244 ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__POISON;
1245 adev->vcn.ras_if = &ras->ras_block.ras_comm;
1246
1247 if (!ras->ras_block.ras_late_init)
1248 ras->ras_block.ras_late_init = amdgpu_vcn_ras_late_init;
1249
1250 return 0;
1251}
1252
1253int amdgpu_vcn_psp_update_sram(struct amdgpu_device *adev, int inst_idx,
1254 enum AMDGPU_UCODE_ID ucode_id)
1255{
1256 struct amdgpu_firmware_info ucode = {
1257 .ucode_id = (ucode_id ? ucode_id :
1258 (inst_idx ? AMDGPU_UCODE_ID_VCN1_RAM :
1259 AMDGPU_UCODE_ID_VCN0_RAM)),
1260 .mc_addr = adev->vcn.inst[inst_idx].dpg_sram_gpu_addr,
1261 .ucode_size = ((uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_curr_addr -
1262 (uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr),
1263 };
1264
1265 return psp_execute_ip_fw_load(&adev->psp, &ucode);
1266}
1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26
27#include <linux/firmware.h>
28#include <linux/module.h>
29#include <linux/pci.h>
30#include <linux/debugfs.h>
31#include <drm/drm_drv.h>
32
33#include "amdgpu.h"
34#include "amdgpu_pm.h"
35#include "amdgpu_vcn.h"
36#include "soc15d.h"
37
38/* Firmware Names */
39#define FIRMWARE_RAVEN "amdgpu/raven_vcn.bin"
40#define FIRMWARE_PICASSO "amdgpu/picasso_vcn.bin"
41#define FIRMWARE_RAVEN2 "amdgpu/raven2_vcn.bin"
42#define FIRMWARE_ARCTURUS "amdgpu/arcturus_vcn.bin"
43#define FIRMWARE_RENOIR "amdgpu/renoir_vcn.bin"
44#define FIRMWARE_GREEN_SARDINE "amdgpu/green_sardine_vcn.bin"
45#define FIRMWARE_NAVI10 "amdgpu/navi10_vcn.bin"
46#define FIRMWARE_NAVI14 "amdgpu/navi14_vcn.bin"
47#define FIRMWARE_NAVI12 "amdgpu/navi12_vcn.bin"
48#define FIRMWARE_SIENNA_CICHLID "amdgpu/sienna_cichlid_vcn.bin"
49#define FIRMWARE_NAVY_FLOUNDER "amdgpu/navy_flounder_vcn.bin"
50#define FIRMWARE_VANGOGH "amdgpu/vangogh_vcn.bin"
51#define FIRMWARE_DIMGREY_CAVEFISH "amdgpu/dimgrey_cavefish_vcn.bin"
52#define FIRMWARE_ALDEBARAN "amdgpu/aldebaran_vcn.bin"
53#define FIRMWARE_BEIGE_GOBY "amdgpu/beige_goby_vcn.bin"
54#define FIRMWARE_YELLOW_CARP "amdgpu/yellow_carp_vcn.bin"
55#define FIRMWARE_VCN_3_1_2 "amdgpu/vcn_3_1_2.bin"
56#define FIRMWARE_VCN4_0_0 "amdgpu/vcn_4_0_0.bin"
57#define FIRMWARE_VCN4_0_2 "amdgpu/vcn_4_0_2.bin"
58#define FIRMWARE_VCN4_0_4 "amdgpu/vcn_4_0_4.bin"
59
60MODULE_FIRMWARE(FIRMWARE_RAVEN);
61MODULE_FIRMWARE(FIRMWARE_PICASSO);
62MODULE_FIRMWARE(FIRMWARE_RAVEN2);
63MODULE_FIRMWARE(FIRMWARE_ARCTURUS);
64MODULE_FIRMWARE(FIRMWARE_RENOIR);
65MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE);
66MODULE_FIRMWARE(FIRMWARE_ALDEBARAN);
67MODULE_FIRMWARE(FIRMWARE_NAVI10);
68MODULE_FIRMWARE(FIRMWARE_NAVI14);
69MODULE_FIRMWARE(FIRMWARE_NAVI12);
70MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID);
71MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER);
72MODULE_FIRMWARE(FIRMWARE_VANGOGH);
73MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH);
74MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY);
75MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP);
76MODULE_FIRMWARE(FIRMWARE_VCN_3_1_2);
77MODULE_FIRMWARE(FIRMWARE_VCN4_0_0);
78MODULE_FIRMWARE(FIRMWARE_VCN4_0_2);
79MODULE_FIRMWARE(FIRMWARE_VCN4_0_4);
80
81static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
82
83int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
84{
85 unsigned long bo_size;
86 const char *fw_name;
87 const struct common_firmware_header *hdr;
88 unsigned char fw_check;
89 unsigned int fw_shared_size, log_offset;
90 int i, r;
91
92 INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler);
93 mutex_init(&adev->vcn.vcn_pg_lock);
94 mutex_init(&adev->vcn.vcn1_jpeg1_workaround);
95 atomic_set(&adev->vcn.total_submission_cnt, 0);
96 for (i = 0; i < adev->vcn.num_vcn_inst; i++)
97 atomic_set(&adev->vcn.inst[i].dpg_enc_submission_cnt, 0);
98
99 switch (adev->ip_versions[UVD_HWIP][0]) {
100 case IP_VERSION(1, 0, 0):
101 case IP_VERSION(1, 0, 1):
102 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
103 fw_name = FIRMWARE_RAVEN2;
104 else if (adev->apu_flags & AMD_APU_IS_PICASSO)
105 fw_name = FIRMWARE_PICASSO;
106 else
107 fw_name = FIRMWARE_RAVEN;
108 break;
109 case IP_VERSION(2, 5, 0):
110 fw_name = FIRMWARE_ARCTURUS;
111 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
112 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
113 adev->vcn.indirect_sram = true;
114 break;
115 case IP_VERSION(2, 2, 0):
116 if (adev->apu_flags & AMD_APU_IS_RENOIR)
117 fw_name = FIRMWARE_RENOIR;
118 else
119 fw_name = FIRMWARE_GREEN_SARDINE;
120
121 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
122 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
123 adev->vcn.indirect_sram = true;
124 break;
125 case IP_VERSION(2, 6, 0):
126 fw_name = FIRMWARE_ALDEBARAN;
127 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
128 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
129 adev->vcn.indirect_sram = true;
130 break;
131 case IP_VERSION(2, 0, 0):
132 fw_name = FIRMWARE_NAVI10;
133 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
134 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
135 adev->vcn.indirect_sram = true;
136 break;
137 case IP_VERSION(2, 0, 2):
138 if (adev->asic_type == CHIP_NAVI12)
139 fw_name = FIRMWARE_NAVI12;
140 else
141 fw_name = FIRMWARE_NAVI14;
142 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
143 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
144 adev->vcn.indirect_sram = true;
145 break;
146 case IP_VERSION(3, 0, 0):
147 case IP_VERSION(3, 0, 64):
148 case IP_VERSION(3, 0, 192):
149 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0))
150 fw_name = FIRMWARE_SIENNA_CICHLID;
151 else
152 fw_name = FIRMWARE_NAVY_FLOUNDER;
153 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
154 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
155 adev->vcn.indirect_sram = true;
156 break;
157 case IP_VERSION(3, 0, 2):
158 fw_name = FIRMWARE_VANGOGH;
159 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
160 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
161 adev->vcn.indirect_sram = true;
162 break;
163 case IP_VERSION(3, 0, 16):
164 fw_name = FIRMWARE_DIMGREY_CAVEFISH;
165 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
166 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
167 adev->vcn.indirect_sram = true;
168 break;
169 case IP_VERSION(3, 0, 33):
170 fw_name = FIRMWARE_BEIGE_GOBY;
171 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
172 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
173 adev->vcn.indirect_sram = true;
174 break;
175 case IP_VERSION(3, 1, 1):
176 fw_name = FIRMWARE_YELLOW_CARP;
177 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
178 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
179 adev->vcn.indirect_sram = true;
180 break;
181 case IP_VERSION(3, 1, 2):
182 fw_name = FIRMWARE_VCN_3_1_2;
183 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
184 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
185 adev->vcn.indirect_sram = true;
186 break;
187 case IP_VERSION(4, 0, 0):
188 fw_name = FIRMWARE_VCN4_0_0;
189 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
190 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
191 adev->vcn.indirect_sram = true;
192 break;
193 case IP_VERSION(4, 0, 2):
194 fw_name = FIRMWARE_VCN4_0_2;
195 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
196 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
197 adev->vcn.indirect_sram = true;
198 break;
199 case IP_VERSION(4, 0, 4):
200 fw_name = FIRMWARE_VCN4_0_4;
201 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
202 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
203 adev->vcn.indirect_sram = true;
204 break;
205 default:
206 return -EINVAL;
207 }
208
209 r = request_firmware(&adev->vcn.fw, fw_name, adev->dev);
210 if (r) {
211 dev_err(adev->dev, "amdgpu_vcn: Can't load firmware \"%s\"\n",
212 fw_name);
213 return r;
214 }
215
216 r = amdgpu_ucode_validate(adev->vcn.fw);
217 if (r) {
218 dev_err(adev->dev, "amdgpu_vcn: Can't validate firmware \"%s\"\n",
219 fw_name);
220 release_firmware(adev->vcn.fw);
221 adev->vcn.fw = NULL;
222 return r;
223 }
224
225 hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
226 adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version);
227
228 /* Bit 20-23, it is encode major and non-zero for new naming convention.
229 * This field is part of version minor and DRM_DISABLED_FLAG in old naming
230 * convention. Since the l:wq!atest version minor is 0x5B and DRM_DISABLED_FLAG
231 * is zero in old naming convention, this field is always zero so far.
232 * These four bits are used to tell which naming convention is present.
233 */
234 fw_check = (le32_to_cpu(hdr->ucode_version) >> 20) & 0xf;
235 if (fw_check) {
236 unsigned int dec_ver, enc_major, enc_minor, vep, fw_rev;
237
238 fw_rev = le32_to_cpu(hdr->ucode_version) & 0xfff;
239 enc_minor = (le32_to_cpu(hdr->ucode_version) >> 12) & 0xff;
240 enc_major = fw_check;
241 dec_ver = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xf;
242 vep = (le32_to_cpu(hdr->ucode_version) >> 28) & 0xf;
243 DRM_INFO("Found VCN firmware Version ENC: %u.%u DEC: %u VEP: %u Revision: %u\n",
244 enc_major, enc_minor, dec_ver, vep, fw_rev);
245 } else {
246 unsigned int version_major, version_minor, family_id;
247
248 family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
249 version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
250 version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
251 DRM_INFO("Found VCN firmware Version: %u.%u Family ID: %u\n",
252 version_major, version_minor, family_id);
253 }
254
255 bo_size = AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_CONTEXT_SIZE;
256 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
257 bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
258
259 if (adev->ip_versions[UVD_HWIP][0] >= IP_VERSION(4, 0, 0)){
260 fw_shared_size = AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared));
261 log_offset = offsetof(struct amdgpu_vcn4_fw_shared, fw_log);
262 } else {
263 fw_shared_size = AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared));
264 log_offset = offsetof(struct amdgpu_fw_shared, fw_log);
265 }
266
267 bo_size += fw_shared_size;
268
269 if (amdgpu_vcnfw_log)
270 bo_size += AMDGPU_VCNFW_LOG_SIZE;
271
272 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
273 if (adev->vcn.harvest_config & (1 << i))
274 continue;
275
276 r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
277 AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.inst[i].vcpu_bo,
278 &adev->vcn.inst[i].gpu_addr, &adev->vcn.inst[i].cpu_addr);
279 if (r) {
280 dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", r);
281 return r;
282 }
283
284 adev->vcn.inst[i].fw_shared.cpu_addr = adev->vcn.inst[i].cpu_addr +
285 bo_size - fw_shared_size;
286 adev->vcn.inst[i].fw_shared.gpu_addr = adev->vcn.inst[i].gpu_addr +
287 bo_size - fw_shared_size;
288
289 adev->vcn.inst[i].fw_shared.mem_size = fw_shared_size;
290
291 if (amdgpu_vcnfw_log) {
292 adev->vcn.inst[i].fw_shared.cpu_addr -= AMDGPU_VCNFW_LOG_SIZE;
293 adev->vcn.inst[i].fw_shared.gpu_addr -= AMDGPU_VCNFW_LOG_SIZE;
294 adev->vcn.inst[i].fw_shared.log_offset = log_offset;
295 }
296
297 if (adev->vcn.indirect_sram) {
298 r = amdgpu_bo_create_kernel(adev, 64 * 2 * 4, PAGE_SIZE,
299 AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.inst[i].dpg_sram_bo,
300 &adev->vcn.inst[i].dpg_sram_gpu_addr, &adev->vcn.inst[i].dpg_sram_cpu_addr);
301 if (r) {
302 dev_err(adev->dev, "VCN %d (%d) failed to allocate DPG bo\n", i, r);
303 return r;
304 }
305 }
306 }
307
308 return 0;
309}
310
311int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
312{
313 int i, j;
314
315 for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
316 if (adev->vcn.harvest_config & (1 << j))
317 continue;
318
319 if (adev->vcn.indirect_sram) {
320 amdgpu_bo_free_kernel(&adev->vcn.inst[j].dpg_sram_bo,
321 &adev->vcn.inst[j].dpg_sram_gpu_addr,
322 (void **)&adev->vcn.inst[j].dpg_sram_cpu_addr);
323 }
324 kvfree(adev->vcn.inst[j].saved_bo);
325
326 amdgpu_bo_free_kernel(&adev->vcn.inst[j].vcpu_bo,
327 &adev->vcn.inst[j].gpu_addr,
328 (void **)&adev->vcn.inst[j].cpu_addr);
329
330 amdgpu_ring_fini(&adev->vcn.inst[j].ring_dec);
331
332 for (i = 0; i < adev->vcn.num_enc_rings; ++i)
333 amdgpu_ring_fini(&adev->vcn.inst[j].ring_enc[i]);
334 }
335
336 release_firmware(adev->vcn.fw);
337 mutex_destroy(&adev->vcn.vcn1_jpeg1_workaround);
338 mutex_destroy(&adev->vcn.vcn_pg_lock);
339
340 return 0;
341}
342
343/* from vcn4 and above, only unified queue is used */
344static bool amdgpu_vcn_using_unified_queue(struct amdgpu_ring *ring)
345{
346 struct amdgpu_device *adev = ring->adev;
347 bool ret = false;
348
349 if (adev->ip_versions[UVD_HWIP][0] >= IP_VERSION(4, 0, 0))
350 ret = true;
351
352 return ret;
353}
354
355bool amdgpu_vcn_is_disabled_vcn(struct amdgpu_device *adev, enum vcn_ring_type type, uint32_t vcn_instance)
356{
357 bool ret = false;
358 int vcn_config = adev->vcn.vcn_config[vcn_instance];
359
360 if ((type == VCN_ENCODE_RING) && (vcn_config & VCN_BLOCK_ENCODE_DISABLE_MASK)) {
361 ret = true;
362 } else if ((type == VCN_DECODE_RING) && (vcn_config & VCN_BLOCK_DECODE_DISABLE_MASK)) {
363 ret = true;
364 } else if ((type == VCN_UNIFIED_RING) && (vcn_config & VCN_BLOCK_QUEUE_DISABLE_MASK)) {
365 ret = true;
366 }
367
368 return ret;
369}
370
371int amdgpu_vcn_suspend(struct amdgpu_device *adev)
372{
373 unsigned size;
374 void *ptr;
375 int i, idx;
376
377 cancel_delayed_work_sync(&adev->vcn.idle_work);
378
379 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
380 if (adev->vcn.harvest_config & (1 << i))
381 continue;
382 if (adev->vcn.inst[i].vcpu_bo == NULL)
383 return 0;
384
385 size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo);
386 ptr = adev->vcn.inst[i].cpu_addr;
387
388 adev->vcn.inst[i].saved_bo = kvmalloc(size, GFP_KERNEL);
389 if (!adev->vcn.inst[i].saved_bo)
390 return -ENOMEM;
391
392 if (drm_dev_enter(adev_to_drm(adev), &idx)) {
393 memcpy_fromio(adev->vcn.inst[i].saved_bo, ptr, size);
394 drm_dev_exit(idx);
395 }
396 }
397 return 0;
398}
399
400int amdgpu_vcn_resume(struct amdgpu_device *adev)
401{
402 unsigned size;
403 void *ptr;
404 int i, idx;
405
406 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
407 if (adev->vcn.harvest_config & (1 << i))
408 continue;
409 if (adev->vcn.inst[i].vcpu_bo == NULL)
410 return -EINVAL;
411
412 size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo);
413 ptr = adev->vcn.inst[i].cpu_addr;
414
415 if (adev->vcn.inst[i].saved_bo != NULL) {
416 if (drm_dev_enter(adev_to_drm(adev), &idx)) {
417 memcpy_toio(ptr, adev->vcn.inst[i].saved_bo, size);
418 drm_dev_exit(idx);
419 }
420 kvfree(adev->vcn.inst[i].saved_bo);
421 adev->vcn.inst[i].saved_bo = NULL;
422 } else {
423 const struct common_firmware_header *hdr;
424 unsigned offset;
425
426 hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
427 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
428 offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
429 if (drm_dev_enter(adev_to_drm(adev), &idx)) {
430 memcpy_toio(adev->vcn.inst[i].cpu_addr, adev->vcn.fw->data + offset,
431 le32_to_cpu(hdr->ucode_size_bytes));
432 drm_dev_exit(idx);
433 }
434 size -= le32_to_cpu(hdr->ucode_size_bytes);
435 ptr += le32_to_cpu(hdr->ucode_size_bytes);
436 }
437 memset_io(ptr, 0, size);
438 }
439 }
440 return 0;
441}
442
443static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
444{
445 struct amdgpu_device *adev =
446 container_of(work, struct amdgpu_device, vcn.idle_work.work);
447 unsigned int fences = 0, fence[AMDGPU_MAX_VCN_INSTANCES] = {0};
448 unsigned int i, j;
449 int r = 0;
450
451 for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
452 if (adev->vcn.harvest_config & (1 << j))
453 continue;
454
455 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
456 fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_enc[i]);
457 }
458
459 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
460 struct dpg_pause_state new_state;
461
462 if (fence[j] ||
463 unlikely(atomic_read(&adev->vcn.inst[j].dpg_enc_submission_cnt)))
464 new_state.fw_based = VCN_DPG_STATE__PAUSE;
465 else
466 new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
467
468 adev->vcn.pause_dpg_mode(adev, j, &new_state);
469 }
470
471 fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_dec);
472 fences += fence[j];
473 }
474
475 if (!fences && !atomic_read(&adev->vcn.total_submission_cnt)) {
476 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
477 AMD_PG_STATE_GATE);
478 r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO,
479 false);
480 if (r)
481 dev_warn(adev->dev, "(%d) failed to disable video power profile mode\n", r);
482 } else {
483 schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
484 }
485}
486
487void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
488{
489 struct amdgpu_device *adev = ring->adev;
490 int r = 0;
491
492 atomic_inc(&adev->vcn.total_submission_cnt);
493
494 if (!cancel_delayed_work_sync(&adev->vcn.idle_work)) {
495 r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO,
496 true);
497 if (r)
498 dev_warn(adev->dev, "(%d) failed to switch to video power profile mode\n", r);
499 }
500
501 mutex_lock(&adev->vcn.vcn_pg_lock);
502 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
503 AMD_PG_STATE_UNGATE);
504
505 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
506 struct dpg_pause_state new_state;
507
508 if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC) {
509 atomic_inc(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt);
510 new_state.fw_based = VCN_DPG_STATE__PAUSE;
511 } else {
512 unsigned int fences = 0;
513 unsigned int i;
514
515 for (i = 0; i < adev->vcn.num_enc_rings; ++i)
516 fences += amdgpu_fence_count_emitted(&adev->vcn.inst[ring->me].ring_enc[i]);
517
518 if (fences || atomic_read(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt))
519 new_state.fw_based = VCN_DPG_STATE__PAUSE;
520 else
521 new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
522 }
523
524 adev->vcn.pause_dpg_mode(adev, ring->me, &new_state);
525 }
526 mutex_unlock(&adev->vcn.vcn_pg_lock);
527}
528
529void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring)
530{
531 if (ring->adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
532 ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
533 atomic_dec(&ring->adev->vcn.inst[ring->me].dpg_enc_submission_cnt);
534
535 atomic_dec(&ring->adev->vcn.total_submission_cnt);
536
537 schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
538}
539
540int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
541{
542 struct amdgpu_device *adev = ring->adev;
543 uint32_t tmp = 0;
544 unsigned i;
545 int r;
546
547 /* VCN in SRIOV does not support direct register read/write */
548 if (amdgpu_sriov_vf(adev))
549 return 0;
550
551 WREG32(adev->vcn.inst[ring->me].external.scratch9, 0xCAFEDEAD);
552 r = amdgpu_ring_alloc(ring, 3);
553 if (r)
554 return r;
555 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.scratch9, 0));
556 amdgpu_ring_write(ring, 0xDEADBEEF);
557 amdgpu_ring_commit(ring);
558 for (i = 0; i < adev->usec_timeout; i++) {
559 tmp = RREG32(adev->vcn.inst[ring->me].external.scratch9);
560 if (tmp == 0xDEADBEEF)
561 break;
562 udelay(1);
563 }
564
565 if (i >= adev->usec_timeout)
566 r = -ETIMEDOUT;
567
568 return r;
569}
570
571int amdgpu_vcn_dec_sw_ring_test_ring(struct amdgpu_ring *ring)
572{
573 struct amdgpu_device *adev = ring->adev;
574 uint32_t rptr;
575 unsigned int i;
576 int r;
577
578 if (amdgpu_sriov_vf(adev))
579 return 0;
580
581 r = amdgpu_ring_alloc(ring, 16);
582 if (r)
583 return r;
584
585 rptr = amdgpu_ring_get_rptr(ring);
586
587 amdgpu_ring_write(ring, VCN_DEC_SW_CMD_END);
588 amdgpu_ring_commit(ring);
589
590 for (i = 0; i < adev->usec_timeout; i++) {
591 if (amdgpu_ring_get_rptr(ring) != rptr)
592 break;
593 udelay(1);
594 }
595
596 if (i >= adev->usec_timeout)
597 r = -ETIMEDOUT;
598
599 return r;
600}
601
602static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
603 struct amdgpu_ib *ib_msg,
604 struct dma_fence **fence)
605{
606 u64 addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
607 struct amdgpu_device *adev = ring->adev;
608 struct dma_fence *f = NULL;
609 struct amdgpu_job *job;
610 struct amdgpu_ib *ib;
611 int i, r;
612
613 r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
614 64, AMDGPU_IB_POOL_DIRECT,
615 &job);
616 if (r)
617 goto err;
618
619 ib = &job->ibs[0];
620 ib->ptr[0] = PACKET0(adev->vcn.internal.data0, 0);
621 ib->ptr[1] = addr;
622 ib->ptr[2] = PACKET0(adev->vcn.internal.data1, 0);
623 ib->ptr[3] = addr >> 32;
624 ib->ptr[4] = PACKET0(adev->vcn.internal.cmd, 0);
625 ib->ptr[5] = 0;
626 for (i = 6; i < 16; i += 2) {
627 ib->ptr[i] = PACKET0(adev->vcn.internal.nop, 0);
628 ib->ptr[i+1] = 0;
629 }
630 ib->length_dw = 16;
631
632 r = amdgpu_job_submit_direct(job, ring, &f);
633 if (r)
634 goto err_free;
635
636 amdgpu_ib_free(adev, ib_msg, f);
637
638 if (fence)
639 *fence = dma_fence_get(f);
640 dma_fence_put(f);
641
642 return 0;
643
644err_free:
645 amdgpu_job_free(job);
646err:
647 amdgpu_ib_free(adev, ib_msg, f);
648 return r;
649}
650
651static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
652 struct amdgpu_ib *ib)
653{
654 struct amdgpu_device *adev = ring->adev;
655 uint32_t *msg;
656 int r, i;
657
658 memset(ib, 0, sizeof(*ib));
659 r = amdgpu_ib_get(adev, NULL, AMDGPU_GPU_PAGE_SIZE * 2,
660 AMDGPU_IB_POOL_DIRECT,
661 ib);
662 if (r)
663 return r;
664
665 msg = (uint32_t *)AMDGPU_GPU_PAGE_ALIGN((unsigned long)ib->ptr);
666 msg[0] = cpu_to_le32(0x00000028);
667 msg[1] = cpu_to_le32(0x00000038);
668 msg[2] = cpu_to_le32(0x00000001);
669 msg[3] = cpu_to_le32(0x00000000);
670 msg[4] = cpu_to_le32(handle);
671 msg[5] = cpu_to_le32(0x00000000);
672 msg[6] = cpu_to_le32(0x00000001);
673 msg[7] = cpu_to_le32(0x00000028);
674 msg[8] = cpu_to_le32(0x00000010);
675 msg[9] = cpu_to_le32(0x00000000);
676 msg[10] = cpu_to_le32(0x00000007);
677 msg[11] = cpu_to_le32(0x00000000);
678 msg[12] = cpu_to_le32(0x00000780);
679 msg[13] = cpu_to_le32(0x00000440);
680 for (i = 14; i < 1024; ++i)
681 msg[i] = cpu_to_le32(0x0);
682
683 return 0;
684}
685
686static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
687 struct amdgpu_ib *ib)
688{
689 struct amdgpu_device *adev = ring->adev;
690 uint32_t *msg;
691 int r, i;
692
693 memset(ib, 0, sizeof(*ib));
694 r = amdgpu_ib_get(adev, NULL, AMDGPU_GPU_PAGE_SIZE * 2,
695 AMDGPU_IB_POOL_DIRECT,
696 ib);
697 if (r)
698 return r;
699
700 msg = (uint32_t *)AMDGPU_GPU_PAGE_ALIGN((unsigned long)ib->ptr);
701 msg[0] = cpu_to_le32(0x00000028);
702 msg[1] = cpu_to_le32(0x00000018);
703 msg[2] = cpu_to_le32(0x00000000);
704 msg[3] = cpu_to_le32(0x00000002);
705 msg[4] = cpu_to_le32(handle);
706 msg[5] = cpu_to_le32(0x00000000);
707 for (i = 6; i < 1024; ++i)
708 msg[i] = cpu_to_le32(0x0);
709
710 return 0;
711}
712
713int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
714{
715 struct dma_fence *fence = NULL;
716 struct amdgpu_ib ib;
717 long r;
718
719 r = amdgpu_vcn_dec_get_create_msg(ring, 1, &ib);
720 if (r)
721 goto error;
722
723 r = amdgpu_vcn_dec_send_msg(ring, &ib, NULL);
724 if (r)
725 goto error;
726 r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &ib);
727 if (r)
728 goto error;
729
730 r = amdgpu_vcn_dec_send_msg(ring, &ib, &fence);
731 if (r)
732 goto error;
733
734 r = dma_fence_wait_timeout(fence, false, timeout);
735 if (r == 0)
736 r = -ETIMEDOUT;
737 else if (r > 0)
738 r = 0;
739
740 dma_fence_put(fence);
741error:
742 return r;
743}
744
745static uint32_t *amdgpu_vcn_unified_ring_ib_header(struct amdgpu_ib *ib,
746 uint32_t ib_pack_in_dw, bool enc)
747{
748 uint32_t *ib_checksum;
749
750 ib->ptr[ib->length_dw++] = 0x00000010; /* single queue checksum */
751 ib->ptr[ib->length_dw++] = 0x30000002;
752 ib_checksum = &ib->ptr[ib->length_dw++];
753 ib->ptr[ib->length_dw++] = ib_pack_in_dw;
754
755 ib->ptr[ib->length_dw++] = 0x00000010; /* engine info */
756 ib->ptr[ib->length_dw++] = 0x30000001;
757 ib->ptr[ib->length_dw++] = enc ? 0x2 : 0x3;
758 ib->ptr[ib->length_dw++] = ib_pack_in_dw * sizeof(uint32_t);
759
760 return ib_checksum;
761}
762
763static void amdgpu_vcn_unified_ring_ib_checksum(uint32_t **ib_checksum,
764 uint32_t ib_pack_in_dw)
765{
766 uint32_t i;
767 uint32_t checksum = 0;
768
769 for (i = 0; i < ib_pack_in_dw; i++)
770 checksum += *(*ib_checksum + 2 + i);
771
772 **ib_checksum = checksum;
773}
774
775static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
776 struct amdgpu_ib *ib_msg,
777 struct dma_fence **fence)
778{
779 struct amdgpu_vcn_decode_buffer *decode_buffer = NULL;
780 unsigned int ib_size_dw = 64;
781 struct amdgpu_device *adev = ring->adev;
782 struct dma_fence *f = NULL;
783 struct amdgpu_job *job;
784 struct amdgpu_ib *ib;
785 uint64_t addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
786 bool sq = amdgpu_vcn_using_unified_queue(ring);
787 uint32_t *ib_checksum;
788 uint32_t ib_pack_in_dw;
789 int i, r;
790
791 if (sq)
792 ib_size_dw += 8;
793
794 r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
795 ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
796 &job);
797 if (r)
798 goto err;
799
800 ib = &job->ibs[0];
801 ib->length_dw = 0;
802
803 /* single queue headers */
804 if (sq) {
805 ib_pack_in_dw = sizeof(struct amdgpu_vcn_decode_buffer) / sizeof(uint32_t)
806 + 4 + 2; /* engine info + decoding ib in dw */
807 ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, ib_pack_in_dw, false);
808 }
809
810 ib->ptr[ib->length_dw++] = sizeof(struct amdgpu_vcn_decode_buffer) + 8;
811 ib->ptr[ib->length_dw++] = cpu_to_le32(AMDGPU_VCN_IB_FLAG_DECODE_BUFFER);
812 decode_buffer = (struct amdgpu_vcn_decode_buffer *)&(ib->ptr[ib->length_dw]);
813 ib->length_dw += sizeof(struct amdgpu_vcn_decode_buffer) / 4;
814 memset(decode_buffer, 0, sizeof(struct amdgpu_vcn_decode_buffer));
815
816 decode_buffer->valid_buf_flag |= cpu_to_le32(AMDGPU_VCN_CMD_FLAG_MSG_BUFFER);
817 decode_buffer->msg_buffer_address_hi = cpu_to_le32(addr >> 32);
818 decode_buffer->msg_buffer_address_lo = cpu_to_le32(addr);
819
820 for (i = ib->length_dw; i < ib_size_dw; ++i)
821 ib->ptr[i] = 0x0;
822
823 if (sq)
824 amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, ib_pack_in_dw);
825
826 r = amdgpu_job_submit_direct(job, ring, &f);
827 if (r)
828 goto err_free;
829
830 amdgpu_ib_free(adev, ib_msg, f);
831
832 if (fence)
833 *fence = dma_fence_get(f);
834 dma_fence_put(f);
835
836 return 0;
837
838err_free:
839 amdgpu_job_free(job);
840err:
841 amdgpu_ib_free(adev, ib_msg, f);
842 return r;
843}
844
845int amdgpu_vcn_dec_sw_ring_test_ib(struct amdgpu_ring *ring, long timeout)
846{
847 struct dma_fence *fence = NULL;
848 struct amdgpu_ib ib;
849 long r;
850
851 r = amdgpu_vcn_dec_get_create_msg(ring, 1, &ib);
852 if (r)
853 goto error;
854
855 r = amdgpu_vcn_dec_sw_send_msg(ring, &ib, NULL);
856 if (r)
857 goto error;
858 r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &ib);
859 if (r)
860 goto error;
861
862 r = amdgpu_vcn_dec_sw_send_msg(ring, &ib, &fence);
863 if (r)
864 goto error;
865
866 r = dma_fence_wait_timeout(fence, false, timeout);
867 if (r == 0)
868 r = -ETIMEDOUT;
869 else if (r > 0)
870 r = 0;
871
872 dma_fence_put(fence);
873error:
874 return r;
875}
876
877int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
878{
879 struct amdgpu_device *adev = ring->adev;
880 uint32_t rptr;
881 unsigned i;
882 int r;
883
884 if (amdgpu_sriov_vf(adev))
885 return 0;
886
887 r = amdgpu_ring_alloc(ring, 16);
888 if (r)
889 return r;
890
891 rptr = amdgpu_ring_get_rptr(ring);
892
893 amdgpu_ring_write(ring, VCN_ENC_CMD_END);
894 amdgpu_ring_commit(ring);
895
896 for (i = 0; i < adev->usec_timeout; i++) {
897 if (amdgpu_ring_get_rptr(ring) != rptr)
898 break;
899 udelay(1);
900 }
901
902 if (i >= adev->usec_timeout)
903 r = -ETIMEDOUT;
904
905 return r;
906}
907
908static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
909 struct amdgpu_ib *ib_msg,
910 struct dma_fence **fence)
911{
912 unsigned int ib_size_dw = 16;
913 struct amdgpu_job *job;
914 struct amdgpu_ib *ib;
915 struct dma_fence *f = NULL;
916 uint32_t *ib_checksum = NULL;
917 uint64_t addr;
918 bool sq = amdgpu_vcn_using_unified_queue(ring);
919 int i, r;
920
921 if (sq)
922 ib_size_dw += 8;
923
924 r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
925 ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
926 &job);
927 if (r)
928 return r;
929
930 ib = &job->ibs[0];
931 addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
932
933 ib->length_dw = 0;
934
935 if (sq)
936 ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, 0x11, true);
937
938 ib->ptr[ib->length_dw++] = 0x00000018;
939 ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
940 ib->ptr[ib->length_dw++] = handle;
941 ib->ptr[ib->length_dw++] = upper_32_bits(addr);
942 ib->ptr[ib->length_dw++] = addr;
943 ib->ptr[ib->length_dw++] = 0x0000000b;
944
945 ib->ptr[ib->length_dw++] = 0x00000014;
946 ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
947 ib->ptr[ib->length_dw++] = 0x0000001c;
948 ib->ptr[ib->length_dw++] = 0x00000000;
949 ib->ptr[ib->length_dw++] = 0x00000000;
950
951 ib->ptr[ib->length_dw++] = 0x00000008;
952 ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */
953
954 for (i = ib->length_dw; i < ib_size_dw; ++i)
955 ib->ptr[i] = 0x0;
956
957 if (sq)
958 amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, 0x11);
959
960 r = amdgpu_job_submit_direct(job, ring, &f);
961 if (r)
962 goto err;
963
964 if (fence)
965 *fence = dma_fence_get(f);
966 dma_fence_put(f);
967
968 return 0;
969
970err:
971 amdgpu_job_free(job);
972 return r;
973}
974
975static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
976 struct amdgpu_ib *ib_msg,
977 struct dma_fence **fence)
978{
979 unsigned int ib_size_dw = 16;
980 struct amdgpu_job *job;
981 struct amdgpu_ib *ib;
982 struct dma_fence *f = NULL;
983 uint32_t *ib_checksum = NULL;
984 uint64_t addr;
985 bool sq = amdgpu_vcn_using_unified_queue(ring);
986 int i, r;
987
988 if (sq)
989 ib_size_dw += 8;
990
991 r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
992 ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
993 &job);
994 if (r)
995 return r;
996
997 ib = &job->ibs[0];
998 addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
999
1000 ib->length_dw = 0;
1001
1002 if (sq)
1003 ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, 0x11, true);
1004
1005 ib->ptr[ib->length_dw++] = 0x00000018;
1006 ib->ptr[ib->length_dw++] = 0x00000001;
1007 ib->ptr[ib->length_dw++] = handle;
1008 ib->ptr[ib->length_dw++] = upper_32_bits(addr);
1009 ib->ptr[ib->length_dw++] = addr;
1010 ib->ptr[ib->length_dw++] = 0x0000000b;
1011
1012 ib->ptr[ib->length_dw++] = 0x00000014;
1013 ib->ptr[ib->length_dw++] = 0x00000002;
1014 ib->ptr[ib->length_dw++] = 0x0000001c;
1015 ib->ptr[ib->length_dw++] = 0x00000000;
1016 ib->ptr[ib->length_dw++] = 0x00000000;
1017
1018 ib->ptr[ib->length_dw++] = 0x00000008;
1019 ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */
1020
1021 for (i = ib->length_dw; i < ib_size_dw; ++i)
1022 ib->ptr[i] = 0x0;
1023
1024 if (sq)
1025 amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, 0x11);
1026
1027 r = amdgpu_job_submit_direct(job, ring, &f);
1028 if (r)
1029 goto err;
1030
1031 if (fence)
1032 *fence = dma_fence_get(f);
1033 dma_fence_put(f);
1034
1035 return 0;
1036
1037err:
1038 amdgpu_job_free(job);
1039 return r;
1040}
1041
1042int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
1043{
1044 struct amdgpu_device *adev = ring->adev;
1045 struct dma_fence *fence = NULL;
1046 struct amdgpu_ib ib;
1047 long r;
1048
1049 memset(&ib, 0, sizeof(ib));
1050 r = amdgpu_ib_get(adev, NULL, (128 << 10) + AMDGPU_GPU_PAGE_SIZE,
1051 AMDGPU_IB_POOL_DIRECT,
1052 &ib);
1053 if (r)
1054 return r;
1055
1056 r = amdgpu_vcn_enc_get_create_msg(ring, 1, &ib, NULL);
1057 if (r)
1058 goto error;
1059
1060 r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, &ib, &fence);
1061 if (r)
1062 goto error;
1063
1064 r = dma_fence_wait_timeout(fence, false, timeout);
1065 if (r == 0)
1066 r = -ETIMEDOUT;
1067 else if (r > 0)
1068 r = 0;
1069
1070error:
1071 amdgpu_ib_free(adev, &ib, fence);
1072 dma_fence_put(fence);
1073
1074 return r;
1075}
1076
1077int amdgpu_vcn_unified_ring_test_ib(struct amdgpu_ring *ring, long timeout)
1078{
1079 long r;
1080
1081 r = amdgpu_vcn_enc_ring_test_ib(ring, timeout);
1082 if (r)
1083 goto error;
1084
1085 r = amdgpu_vcn_dec_sw_ring_test_ib(ring, timeout);
1086
1087error:
1088 return r;
1089}
1090
1091enum amdgpu_ring_priority_level amdgpu_vcn_get_enc_ring_prio(int ring)
1092{
1093 switch(ring) {
1094 case 0:
1095 return AMDGPU_RING_PRIO_0;
1096 case 1:
1097 return AMDGPU_RING_PRIO_1;
1098 case 2:
1099 return AMDGPU_RING_PRIO_2;
1100 default:
1101 return AMDGPU_RING_PRIO_0;
1102 }
1103}
1104
1105void amdgpu_vcn_setup_ucode(struct amdgpu_device *adev)
1106{
1107 int i;
1108 unsigned int idx;
1109
1110 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1111 const struct common_firmware_header *hdr;
1112 hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
1113
1114 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
1115 if (adev->vcn.harvest_config & (1 << i))
1116 continue;
1117 /* currently only support 2 FW instances */
1118 if (i >= 2) {
1119 dev_info(adev->dev, "More then 2 VCN FW instances!\n");
1120 break;
1121 }
1122 idx = AMDGPU_UCODE_ID_VCN + i;
1123 adev->firmware.ucode[idx].ucode_id = idx;
1124 adev->firmware.ucode[idx].fw = adev->vcn.fw;
1125 adev->firmware.fw_size +=
1126 ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
1127 }
1128 dev_info(adev->dev, "Will use PSP to load VCN firmware\n");
1129 }
1130}
1131
1132/*
1133 * debugfs for mapping vcn firmware log buffer.
1134 */
1135#if defined(CONFIG_DEBUG_FS)
1136static ssize_t amdgpu_debugfs_vcn_fwlog_read(struct file *f, char __user *buf,
1137 size_t size, loff_t *pos)
1138{
1139 struct amdgpu_vcn_inst *vcn;
1140 void *log_buf;
1141 volatile struct amdgpu_vcn_fwlog *plog;
1142 unsigned int read_pos, write_pos, available, i, read_bytes = 0;
1143 unsigned int read_num[2] = {0};
1144
1145 vcn = file_inode(f)->i_private;
1146 if (!vcn)
1147 return -ENODEV;
1148
1149 if (!vcn->fw_shared.cpu_addr || !amdgpu_vcnfw_log)
1150 return -EFAULT;
1151
1152 log_buf = vcn->fw_shared.cpu_addr + vcn->fw_shared.mem_size;
1153
1154 plog = (volatile struct amdgpu_vcn_fwlog *)log_buf;
1155 read_pos = plog->rptr;
1156 write_pos = plog->wptr;
1157
1158 if (read_pos > AMDGPU_VCNFW_LOG_SIZE || write_pos > AMDGPU_VCNFW_LOG_SIZE)
1159 return -EFAULT;
1160
1161 if (!size || (read_pos == write_pos))
1162 return 0;
1163
1164 if (write_pos > read_pos) {
1165 available = write_pos - read_pos;
1166 read_num[0] = min(size, (size_t)available);
1167 } else {
1168 read_num[0] = AMDGPU_VCNFW_LOG_SIZE - read_pos;
1169 available = read_num[0] + write_pos - plog->header_size;
1170 if (size > available)
1171 read_num[1] = write_pos - plog->header_size;
1172 else if (size > read_num[0])
1173 read_num[1] = size - read_num[0];
1174 else
1175 read_num[0] = size;
1176 }
1177
1178 for (i = 0; i < 2; i++) {
1179 if (read_num[i]) {
1180 if (read_pos == AMDGPU_VCNFW_LOG_SIZE)
1181 read_pos = plog->header_size;
1182 if (read_num[i] == copy_to_user((buf + read_bytes),
1183 (log_buf + read_pos), read_num[i]))
1184 return -EFAULT;
1185
1186 read_bytes += read_num[i];
1187 read_pos += read_num[i];
1188 }
1189 }
1190
1191 plog->rptr = read_pos;
1192 *pos += read_bytes;
1193 return read_bytes;
1194}
1195
1196static const struct file_operations amdgpu_debugfs_vcnfwlog_fops = {
1197 .owner = THIS_MODULE,
1198 .read = amdgpu_debugfs_vcn_fwlog_read,
1199 .llseek = default_llseek
1200};
1201#endif
1202
1203void amdgpu_debugfs_vcn_fwlog_init(struct amdgpu_device *adev, uint8_t i,
1204 struct amdgpu_vcn_inst *vcn)
1205{
1206#if defined(CONFIG_DEBUG_FS)
1207 struct drm_minor *minor = adev_to_drm(adev)->primary;
1208 struct dentry *root = minor->debugfs_root;
1209 char name[32];
1210
1211 sprintf(name, "amdgpu_vcn_%d_fwlog", i);
1212 debugfs_create_file_size(name, S_IFREG | S_IRUGO, root, vcn,
1213 &amdgpu_debugfs_vcnfwlog_fops,
1214 AMDGPU_VCNFW_LOG_SIZE);
1215#endif
1216}
1217
1218void amdgpu_vcn_fwlog_init(struct amdgpu_vcn_inst *vcn)
1219{
1220#if defined(CONFIG_DEBUG_FS)
1221 volatile uint32_t *flag = vcn->fw_shared.cpu_addr;
1222 void *fw_log_cpu_addr = vcn->fw_shared.cpu_addr + vcn->fw_shared.mem_size;
1223 uint64_t fw_log_gpu_addr = vcn->fw_shared.gpu_addr + vcn->fw_shared.mem_size;
1224 volatile struct amdgpu_vcn_fwlog *log_buf = fw_log_cpu_addr;
1225 volatile struct amdgpu_fw_shared_fw_logging *fw_log = vcn->fw_shared.cpu_addr
1226 + vcn->fw_shared.log_offset;
1227 *flag |= cpu_to_le32(AMDGPU_VCN_FW_LOGGING_FLAG);
1228 fw_log->is_enabled = 1;
1229 fw_log->addr_lo = cpu_to_le32(fw_log_gpu_addr & 0xFFFFFFFF);
1230 fw_log->addr_hi = cpu_to_le32(fw_log_gpu_addr >> 32);
1231 fw_log->size = cpu_to_le32(AMDGPU_VCNFW_LOG_SIZE);
1232
1233 log_buf->header_size = sizeof(struct amdgpu_vcn_fwlog);
1234 log_buf->buffer_size = AMDGPU_VCNFW_LOG_SIZE;
1235 log_buf->rptr = log_buf->header_size;
1236 log_buf->wptr = log_buf->header_size;
1237 log_buf->wrapped = 0;
1238#endif
1239}
1240
1241int amdgpu_vcn_process_poison_irq(struct amdgpu_device *adev,
1242 struct amdgpu_irq_src *source,
1243 struct amdgpu_iv_entry *entry)
1244{
1245 struct ras_common_if *ras_if = adev->vcn.ras_if;
1246 struct ras_dispatch_if ih_data = {
1247 .entry = entry,
1248 };
1249
1250 if (!ras_if)
1251 return 0;
1252
1253 ih_data.head = *ras_if;
1254 amdgpu_ras_interrupt_dispatch(adev, &ih_data);
1255
1256 return 0;
1257}
1258
1259void amdgpu_vcn_set_ras_funcs(struct amdgpu_device *adev)
1260{
1261 if (!adev->vcn.ras)
1262 return;
1263
1264 amdgpu_ras_register_ras_block(adev, &adev->vcn.ras->ras_block);
1265
1266 strcpy(adev->vcn.ras->ras_block.ras_comm.name, "vcn");
1267 adev->vcn.ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__VCN;
1268 adev->vcn.ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__POISON;
1269 adev->vcn.ras_if = &adev->vcn.ras->ras_block.ras_comm;
1270
1271 /* If don't define special ras_late_init function, use default ras_late_init */
1272 if (!adev->vcn.ras->ras_block.ras_late_init)
1273 adev->vcn.ras->ras_block.ras_late_init = amdgpu_ras_block_late_init;
1274}