Loading...
1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Author: Huang Rui
23 *
24 */
25
26#include <linux/firmware.h>
27#include <linux/dma-mapping.h>
28
29#include "amdgpu.h"
30#include "amdgpu_psp.h"
31#include "amdgpu_ucode.h"
32#include "soc15_common.h"
33#include "psp_v3_1.h"
34#include "psp_v10_0.h"
35#include "psp_v11_0.h"
36#include "psp_v12_0.h"
37
38#include "amdgpu_ras.h"
39
40static int psp_sysfs_init(struct amdgpu_device *adev);
41static void psp_sysfs_fini(struct amdgpu_device *adev);
42
43static int psp_load_smu_fw(struct psp_context *psp);
44
45/*
46 * Due to DF Cstate management centralized to PMFW, the firmware
47 * loading sequence will be updated as below:
48 * - Load KDB
49 * - Load SYS_DRV
50 * - Load tOS
51 * - Load PMFW
52 * - Setup TMR
53 * - Load other non-psp fw
54 * - Load ASD
55 * - Load XGMI/RAS/HDCP/DTM TA if any
56 *
57 * This new sequence is required for
58 * - Arcturus
59 * - Navi12 and onwards
60 */
61static void psp_check_pmfw_centralized_cstate_management(struct psp_context *psp)
62{
63 struct amdgpu_device *adev = psp->adev;
64
65 psp->pmfw_centralized_cstate_management = false;
66
67 if (amdgpu_sriov_vf(adev))
68 return;
69
70 if (adev->flags & AMD_IS_APU)
71 return;
72
73 if ((adev->asic_type == CHIP_ARCTURUS) ||
74 (adev->asic_type >= CHIP_NAVI12))
75 psp->pmfw_centralized_cstate_management = true;
76}
77
78static int psp_early_init(void *handle)
79{
80 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
81 struct psp_context *psp = &adev->psp;
82
83 switch (adev->asic_type) {
84 case CHIP_VEGA10:
85 case CHIP_VEGA12:
86 psp_v3_1_set_psp_funcs(psp);
87 psp->autoload_supported = false;
88 break;
89 case CHIP_RAVEN:
90 psp_v10_0_set_psp_funcs(psp);
91 psp->autoload_supported = false;
92 break;
93 case CHIP_VEGA20:
94 case CHIP_ARCTURUS:
95 psp_v11_0_set_psp_funcs(psp);
96 psp->autoload_supported = false;
97 break;
98 case CHIP_NAVI10:
99 case CHIP_NAVI14:
100 case CHIP_NAVI12:
101 case CHIP_SIENNA_CICHLID:
102 case CHIP_NAVY_FLOUNDER:
103 psp_v11_0_set_psp_funcs(psp);
104 psp->autoload_supported = true;
105 break;
106 case CHIP_RENOIR:
107 psp_v12_0_set_psp_funcs(psp);
108 break;
109 default:
110 return -EINVAL;
111 }
112
113 psp->adev = adev;
114
115 psp_check_pmfw_centralized_cstate_management(psp);
116
117 return 0;
118}
119
120static void psp_memory_training_fini(struct psp_context *psp)
121{
122 struct psp_memory_training_context *ctx = &psp->mem_train_ctx;
123
124 ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT;
125 kfree(ctx->sys_cache);
126 ctx->sys_cache = NULL;
127}
128
129static int psp_memory_training_init(struct psp_context *psp)
130{
131 int ret;
132 struct psp_memory_training_context *ctx = &psp->mem_train_ctx;
133
134 if (ctx->init != PSP_MEM_TRAIN_RESERVE_SUCCESS) {
135 DRM_DEBUG("memory training is not supported!\n");
136 return 0;
137 }
138
139 ctx->sys_cache = kzalloc(ctx->train_data_size, GFP_KERNEL);
140 if (ctx->sys_cache == NULL) {
141 DRM_ERROR("alloc mem_train_ctx.sys_cache failed!\n");
142 ret = -ENOMEM;
143 goto Err_out;
144 }
145
146 DRM_DEBUG("train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n",
147 ctx->train_data_size,
148 ctx->p2c_train_data_offset,
149 ctx->c2p_train_data_offset);
150 ctx->init = PSP_MEM_TRAIN_INIT_SUCCESS;
151 return 0;
152
153Err_out:
154 psp_memory_training_fini(psp);
155 return ret;
156}
157
158static int psp_sw_init(void *handle)
159{
160 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
161 struct psp_context *psp = &adev->psp;
162 int ret;
163
164 ret = psp_init_microcode(psp);
165 if (ret) {
166 DRM_ERROR("Failed to load psp firmware!\n");
167 return ret;
168 }
169
170 ret = psp_memory_training_init(psp);
171 if (ret) {
172 DRM_ERROR("Failed to initialize memory training!\n");
173 return ret;
174 }
175 ret = psp_mem_training(psp, PSP_MEM_TRAIN_COLD_BOOT);
176 if (ret) {
177 DRM_ERROR("Failed to process memory training!\n");
178 return ret;
179 }
180
181 if (adev->asic_type == CHIP_NAVI10 || adev->asic_type == CHIP_SIENNA_CICHLID) {
182 ret= psp_sysfs_init(adev);
183 if (ret) {
184 return ret;
185 }
186 }
187
188 return 0;
189}
190
191static int psp_sw_fini(void *handle)
192{
193 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
194
195 psp_memory_training_fini(&adev->psp);
196 if (adev->psp.sos_fw) {
197 release_firmware(adev->psp.sos_fw);
198 adev->psp.sos_fw = NULL;
199 }
200 if (adev->psp.asd_fw) {
201 release_firmware(adev->psp.asd_fw);
202 adev->psp.asd_fw = NULL;
203 }
204 if (adev->psp.ta_fw) {
205 release_firmware(adev->psp.ta_fw);
206 adev->psp.ta_fw = NULL;
207 }
208
209 if (adev->asic_type == CHIP_NAVI10)
210 psp_sysfs_fini(adev);
211
212 return 0;
213}
214
215int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
216 uint32_t reg_val, uint32_t mask, bool check_changed)
217{
218 uint32_t val;
219 int i;
220 struct amdgpu_device *adev = psp->adev;
221
222 for (i = 0; i < adev->usec_timeout; i++) {
223 val = RREG32(reg_index);
224 if (check_changed) {
225 if (val != reg_val)
226 return 0;
227 } else {
228 if ((val & mask) == reg_val)
229 return 0;
230 }
231 udelay(1);
232 }
233
234 return -ETIME;
235}
236
237static int
238psp_cmd_submit_buf(struct psp_context *psp,
239 struct amdgpu_firmware_info *ucode,
240 struct psp_gfx_cmd_resp *cmd, uint64_t fence_mc_addr)
241{
242 int ret;
243 int index;
244 int timeout = 2000;
245 bool ras_intr = false;
246 bool skip_unsupport = false;
247
248 mutex_lock(&psp->mutex);
249
250 memset(psp->cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE);
251
252 memcpy(psp->cmd_buf_mem, cmd, sizeof(struct psp_gfx_cmd_resp));
253
254 index = atomic_inc_return(&psp->fence_value);
255 ret = psp_ring_cmd_submit(psp, psp->cmd_buf_mc_addr, fence_mc_addr, index);
256 if (ret) {
257 atomic_dec(&psp->fence_value);
258 mutex_unlock(&psp->mutex);
259 return ret;
260 }
261
262 amdgpu_asic_invalidate_hdp(psp->adev, NULL);
263 while (*((unsigned int *)psp->fence_buf) != index) {
264 if (--timeout == 0)
265 break;
266 /*
267 * Shouldn't wait for timeout when err_event_athub occurs,
268 * because gpu reset thread triggered and lock resource should
269 * be released for psp resume sequence.
270 */
271 ras_intr = amdgpu_ras_intr_triggered();
272 if (ras_intr)
273 break;
274 msleep(1);
275 amdgpu_asic_invalidate_hdp(psp->adev, NULL);
276 }
277
278 /* We allow TEE_ERROR_NOT_SUPPORTED for VMR command and PSP_ERR_UNKNOWN_COMMAND in SRIOV */
279 skip_unsupport = (psp->cmd_buf_mem->resp.status == TEE_ERROR_NOT_SUPPORTED ||
280 psp->cmd_buf_mem->resp.status == PSP_ERR_UNKNOWN_COMMAND) && amdgpu_sriov_vf(psp->adev);
281
282 /* In some cases, psp response status is not 0 even there is no
283 * problem while the command is submitted. Some version of PSP FW
284 * doesn't write 0 to that field.
285 * So here we would like to only print a warning instead of an error
286 * during psp initialization to avoid breaking hw_init and it doesn't
287 * return -EINVAL.
288 */
289 if (!skip_unsupport && (psp->cmd_buf_mem->resp.status || !timeout) && !ras_intr) {
290 if (ucode)
291 DRM_WARN("failed to load ucode id (%d) ",
292 ucode->ucode_id);
293 DRM_WARN("psp command (0x%X) failed and response status is (0x%X)\n",
294 psp->cmd_buf_mem->cmd_id,
295 psp->cmd_buf_mem->resp.status);
296 if (!timeout) {
297 mutex_unlock(&psp->mutex);
298 return -EINVAL;
299 }
300 }
301
302 /* get xGMI session id from response buffer */
303 cmd->resp.session_id = psp->cmd_buf_mem->resp.session_id;
304
305 if (ucode) {
306 ucode->tmr_mc_addr_lo = psp->cmd_buf_mem->resp.fw_addr_lo;
307 ucode->tmr_mc_addr_hi = psp->cmd_buf_mem->resp.fw_addr_hi;
308 }
309 mutex_unlock(&psp->mutex);
310
311 return ret;
312}
313
314static void psp_prep_tmr_cmd_buf(struct psp_context *psp,
315 struct psp_gfx_cmd_resp *cmd,
316 uint64_t tmr_mc, uint32_t size)
317{
318 if (amdgpu_sriov_vf(psp->adev))
319 cmd->cmd_id = GFX_CMD_ID_SETUP_VMR;
320 else
321 cmd->cmd_id = GFX_CMD_ID_SETUP_TMR;
322 cmd->cmd.cmd_setup_tmr.buf_phy_addr_lo = lower_32_bits(tmr_mc);
323 cmd->cmd.cmd_setup_tmr.buf_phy_addr_hi = upper_32_bits(tmr_mc);
324 cmd->cmd.cmd_setup_tmr.buf_size = size;
325}
326
327static void psp_prep_load_toc_cmd_buf(struct psp_gfx_cmd_resp *cmd,
328 uint64_t pri_buf_mc, uint32_t size)
329{
330 cmd->cmd_id = GFX_CMD_ID_LOAD_TOC;
331 cmd->cmd.cmd_load_toc.toc_phy_addr_lo = lower_32_bits(pri_buf_mc);
332 cmd->cmd.cmd_load_toc.toc_phy_addr_hi = upper_32_bits(pri_buf_mc);
333 cmd->cmd.cmd_load_toc.toc_size = size;
334}
335
336/* Issue LOAD TOC cmd to PSP to part toc and calculate tmr size needed */
337static int psp_load_toc(struct psp_context *psp,
338 uint32_t *tmr_size)
339{
340 int ret;
341 struct psp_gfx_cmd_resp *cmd;
342
343 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
344 if (!cmd)
345 return -ENOMEM;
346 /* Copy toc to psp firmware private buffer */
347 memset(psp->fw_pri_buf, 0, PSP_1_MEG);
348 memcpy(psp->fw_pri_buf, psp->toc_start_addr, psp->toc_bin_size);
349
350 psp_prep_load_toc_cmd_buf(cmd, psp->fw_pri_mc_addr, psp->toc_bin_size);
351
352 ret = psp_cmd_submit_buf(psp, NULL, cmd,
353 psp->fence_buf_mc_addr);
354 if (!ret)
355 *tmr_size = psp->cmd_buf_mem->resp.tmr_size;
356 kfree(cmd);
357 return ret;
358}
359
360/* Set up Trusted Memory Region */
361static int psp_tmr_init(struct psp_context *psp)
362{
363 int ret;
364 int tmr_size;
365 void *tmr_buf;
366 void **pptr;
367
368 /*
369 * According to HW engineer, they prefer the TMR address be "naturally
370 * aligned" , e.g. the start address be an integer divide of TMR size.
371 *
372 * Note: this memory need be reserved till the driver
373 * uninitializes.
374 */
375 tmr_size = PSP_TMR_SIZE;
376
377 /* For ASICs support RLC autoload, psp will parse the toc
378 * and calculate the total size of TMR needed */
379 if (!amdgpu_sriov_vf(psp->adev) &&
380 psp->toc_start_addr &&
381 psp->toc_bin_size &&
382 psp->fw_pri_buf) {
383 ret = psp_load_toc(psp, &tmr_size);
384 if (ret) {
385 DRM_ERROR("Failed to load toc\n");
386 return ret;
387 }
388 }
389
390 pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
391 ret = amdgpu_bo_create_kernel(psp->adev, tmr_size, PSP_TMR_SIZE,
392 AMDGPU_GEM_DOMAIN_VRAM,
393 &psp->tmr_bo, &psp->tmr_mc_addr, pptr);
394
395 return ret;
396}
397
398static int psp_clear_vf_fw(struct psp_context *psp)
399{
400 int ret;
401 struct psp_gfx_cmd_resp *cmd;
402
403 if (!amdgpu_sriov_vf(psp->adev) || psp->adev->asic_type != CHIP_NAVI12)
404 return 0;
405
406 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
407 if (!cmd)
408 return -ENOMEM;
409
410 cmd->cmd_id = GFX_CMD_ID_CLEAR_VF_FW;
411
412 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
413 kfree(cmd);
414
415 return ret;
416}
417
418static bool psp_skip_tmr(struct psp_context *psp)
419{
420 switch (psp->adev->asic_type) {
421 case CHIP_NAVI12:
422 case CHIP_SIENNA_CICHLID:
423 return true;
424 default:
425 return false;
426 }
427}
428
429static int psp_tmr_load(struct psp_context *psp)
430{
431 int ret;
432 struct psp_gfx_cmd_resp *cmd;
433
434 /* For Navi12 and CHIP_SIENNA_CICHLID SRIOV, do not set up TMR.
435 * Already set up by host driver.
436 */
437 if (amdgpu_sriov_vf(psp->adev) && psp_skip_tmr(psp))
438 return 0;
439
440 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
441 if (!cmd)
442 return -ENOMEM;
443
444 psp_prep_tmr_cmd_buf(psp, cmd, psp->tmr_mc_addr,
445 amdgpu_bo_size(psp->tmr_bo));
446 DRM_INFO("reserve 0x%lx from 0x%llx for PSP TMR\n",
447 amdgpu_bo_size(psp->tmr_bo), psp->tmr_mc_addr);
448
449 ret = psp_cmd_submit_buf(psp, NULL, cmd,
450 psp->fence_buf_mc_addr);
451
452 kfree(cmd);
453
454 return ret;
455}
456
457static void psp_prep_tmr_unload_cmd_buf(struct psp_context *psp,
458 struct psp_gfx_cmd_resp *cmd)
459{
460 if (amdgpu_sriov_vf(psp->adev))
461 cmd->cmd_id = GFX_CMD_ID_DESTROY_VMR;
462 else
463 cmd->cmd_id = GFX_CMD_ID_DESTROY_TMR;
464}
465
466static int psp_tmr_unload(struct psp_context *psp)
467{
468 int ret;
469 struct psp_gfx_cmd_resp *cmd;
470
471 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
472 if (!cmd)
473 return -ENOMEM;
474
475 psp_prep_tmr_unload_cmd_buf(psp, cmd);
476 DRM_INFO("free PSP TMR buffer\n");
477
478 ret = psp_cmd_submit_buf(psp, NULL, cmd,
479 psp->fence_buf_mc_addr);
480
481 kfree(cmd);
482
483 return ret;
484}
485
486static int psp_tmr_terminate(struct psp_context *psp)
487{
488 int ret;
489 void *tmr_buf;
490 void **pptr;
491
492 ret = psp_tmr_unload(psp);
493 if (ret)
494 return ret;
495
496 /* free TMR memory buffer */
497 pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
498 amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, pptr);
499
500 return 0;
501}
502
503static void psp_prep_asd_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
504 uint64_t asd_mc, uint32_t size)
505{
506 cmd->cmd_id = GFX_CMD_ID_LOAD_ASD;
507 cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(asd_mc);
508 cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(asd_mc);
509 cmd->cmd.cmd_load_ta.app_len = size;
510
511 cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = 0;
512 cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = 0;
513 cmd->cmd.cmd_load_ta.cmd_buf_len = 0;
514}
515
516static int psp_asd_load(struct psp_context *psp)
517{
518 int ret;
519 struct psp_gfx_cmd_resp *cmd;
520
521 /* If PSP version doesn't match ASD version, asd loading will be failed.
522 * add workaround to bypass it for sriov now.
523 * TODO: add version check to make it common
524 */
525 if (amdgpu_sriov_vf(psp->adev) || !psp->asd_fw)
526 return 0;
527
528 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
529 if (!cmd)
530 return -ENOMEM;
531
532 memset(psp->fw_pri_buf, 0, PSP_1_MEG);
533 memcpy(psp->fw_pri_buf, psp->asd_start_addr, psp->asd_ucode_size);
534
535 psp_prep_asd_load_cmd_buf(cmd, psp->fw_pri_mc_addr,
536 psp->asd_ucode_size);
537
538 ret = psp_cmd_submit_buf(psp, NULL, cmd,
539 psp->fence_buf_mc_addr);
540 if (!ret) {
541 psp->asd_context.asd_initialized = true;
542 psp->asd_context.session_id = cmd->resp.session_id;
543 }
544
545 kfree(cmd);
546
547 return ret;
548}
549
550static void psp_prep_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd,
551 uint32_t session_id)
552{
553 cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA;
554 cmd->cmd.cmd_unload_ta.session_id = session_id;
555}
556
557static int psp_asd_unload(struct psp_context *psp)
558{
559 int ret;
560 struct psp_gfx_cmd_resp *cmd;
561
562 if (amdgpu_sriov_vf(psp->adev))
563 return 0;
564
565 if (!psp->asd_context.asd_initialized)
566 return 0;
567
568 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
569 if (!cmd)
570 return -ENOMEM;
571
572 psp_prep_ta_unload_cmd_buf(cmd, psp->asd_context.session_id);
573
574 ret = psp_cmd_submit_buf(psp, NULL, cmd,
575 psp->fence_buf_mc_addr);
576 if (!ret)
577 psp->asd_context.asd_initialized = false;
578
579 kfree(cmd);
580
581 return ret;
582}
583
584static void psp_prep_reg_prog_cmd_buf(struct psp_gfx_cmd_resp *cmd,
585 uint32_t id, uint32_t value)
586{
587 cmd->cmd_id = GFX_CMD_ID_PROG_REG;
588 cmd->cmd.cmd_setup_reg_prog.reg_value = value;
589 cmd->cmd.cmd_setup_reg_prog.reg_id = id;
590}
591
592int psp_reg_program(struct psp_context *psp, enum psp_reg_prog_id reg,
593 uint32_t value)
594{
595 struct psp_gfx_cmd_resp *cmd = NULL;
596 int ret = 0;
597
598 if (reg >= PSP_REG_LAST)
599 return -EINVAL;
600
601 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
602 if (!cmd)
603 return -ENOMEM;
604
605 psp_prep_reg_prog_cmd_buf(cmd, reg, value);
606 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
607
608 kfree(cmd);
609 return ret;
610}
611
612static void psp_prep_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
613 uint64_t ta_bin_mc,
614 uint32_t ta_bin_size,
615 uint64_t ta_shared_mc,
616 uint32_t ta_shared_size)
617{
618 cmd->cmd_id = GFX_CMD_ID_LOAD_TA;
619 cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(ta_bin_mc);
620 cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(ta_bin_mc);
621 cmd->cmd.cmd_load_ta.app_len = ta_bin_size;
622
623 cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = lower_32_bits(ta_shared_mc);
624 cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = upper_32_bits(ta_shared_mc);
625 cmd->cmd.cmd_load_ta.cmd_buf_len = ta_shared_size;
626}
627
628static int psp_xgmi_init_shared_buf(struct psp_context *psp)
629{
630 int ret;
631
632 /*
633 * Allocate 16k memory aligned to 4k from Frame Buffer (local
634 * physical) for xgmi ta <-> Driver
635 */
636 ret = amdgpu_bo_create_kernel(psp->adev, PSP_XGMI_SHARED_MEM_SIZE,
637 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
638 &psp->xgmi_context.xgmi_shared_bo,
639 &psp->xgmi_context.xgmi_shared_mc_addr,
640 &psp->xgmi_context.xgmi_shared_buf);
641
642 return ret;
643}
644
645static void psp_prep_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd,
646 uint32_t ta_cmd_id,
647 uint32_t session_id)
648{
649 cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD;
650 cmd->cmd.cmd_invoke_cmd.session_id = session_id;
651 cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id;
652}
653
654static int psp_ta_invoke(struct psp_context *psp,
655 uint32_t ta_cmd_id,
656 uint32_t session_id)
657{
658 int ret;
659 struct psp_gfx_cmd_resp *cmd;
660
661 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
662 if (!cmd)
663 return -ENOMEM;
664
665 psp_prep_ta_invoke_cmd_buf(cmd, ta_cmd_id, session_id);
666
667 ret = psp_cmd_submit_buf(psp, NULL, cmd,
668 psp->fence_buf_mc_addr);
669
670 kfree(cmd);
671
672 return ret;
673}
674
675static int psp_xgmi_load(struct psp_context *psp)
676{
677 int ret;
678 struct psp_gfx_cmd_resp *cmd;
679
680 /*
681 * TODO: bypass the loading in sriov for now
682 */
683
684 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
685 if (!cmd)
686 return -ENOMEM;
687
688 memset(psp->fw_pri_buf, 0, PSP_1_MEG);
689 memcpy(psp->fw_pri_buf, psp->ta_xgmi_start_addr, psp->ta_xgmi_ucode_size);
690
691 psp_prep_ta_load_cmd_buf(cmd,
692 psp->fw_pri_mc_addr,
693 psp->ta_xgmi_ucode_size,
694 psp->xgmi_context.xgmi_shared_mc_addr,
695 PSP_XGMI_SHARED_MEM_SIZE);
696
697 ret = psp_cmd_submit_buf(psp, NULL, cmd,
698 psp->fence_buf_mc_addr);
699
700 if (!ret) {
701 psp->xgmi_context.initialized = 1;
702 psp->xgmi_context.session_id = cmd->resp.session_id;
703 }
704
705 kfree(cmd);
706
707 return ret;
708}
709
710static int psp_xgmi_unload(struct psp_context *psp)
711{
712 int ret;
713 struct psp_gfx_cmd_resp *cmd;
714 struct amdgpu_device *adev = psp->adev;
715
716 /* XGMI TA unload currently is not supported on Arcturus */
717 if (adev->asic_type == CHIP_ARCTURUS)
718 return 0;
719
720 /*
721 * TODO: bypass the unloading in sriov for now
722 */
723
724 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
725 if (!cmd)
726 return -ENOMEM;
727
728 psp_prep_ta_unload_cmd_buf(cmd, psp->xgmi_context.session_id);
729
730 ret = psp_cmd_submit_buf(psp, NULL, cmd,
731 psp->fence_buf_mc_addr);
732
733 kfree(cmd);
734
735 return ret;
736}
737
738int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
739{
740 return psp_ta_invoke(psp, ta_cmd_id, psp->xgmi_context.session_id);
741}
742
743int psp_xgmi_terminate(struct psp_context *psp)
744{
745 int ret;
746
747 if (!psp->xgmi_context.initialized)
748 return 0;
749
750 ret = psp_xgmi_unload(psp);
751 if (ret)
752 return ret;
753
754 psp->xgmi_context.initialized = 0;
755
756 /* free xgmi shared memory */
757 amdgpu_bo_free_kernel(&psp->xgmi_context.xgmi_shared_bo,
758 &psp->xgmi_context.xgmi_shared_mc_addr,
759 &psp->xgmi_context.xgmi_shared_buf);
760
761 return 0;
762}
763
764int psp_xgmi_initialize(struct psp_context *psp)
765{
766 struct ta_xgmi_shared_memory *xgmi_cmd;
767 int ret;
768
769 if (!psp->adev->psp.ta_fw ||
770 !psp->adev->psp.ta_xgmi_ucode_size ||
771 !psp->adev->psp.ta_xgmi_start_addr)
772 return -ENOENT;
773
774 if (!psp->xgmi_context.initialized) {
775 ret = psp_xgmi_init_shared_buf(psp);
776 if (ret)
777 return ret;
778 }
779
780 /* Load XGMI TA */
781 ret = psp_xgmi_load(psp);
782 if (ret)
783 return ret;
784
785 /* Initialize XGMI session */
786 xgmi_cmd = (struct ta_xgmi_shared_memory *)(psp->xgmi_context.xgmi_shared_buf);
787 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
788 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__INITIALIZE;
789
790 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
791
792 return ret;
793}
794
795int psp_xgmi_get_hive_id(struct psp_context *psp, uint64_t *hive_id)
796{
797 struct ta_xgmi_shared_memory *xgmi_cmd;
798 int ret;
799
800 xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
801 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
802
803 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_HIVE_ID;
804
805 /* Invoke xgmi ta to get hive id */
806 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
807 if (ret)
808 return ret;
809
810 *hive_id = xgmi_cmd->xgmi_out_message.get_hive_id.hive_id;
811
812 return 0;
813}
814
815int psp_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id)
816{
817 struct ta_xgmi_shared_memory *xgmi_cmd;
818 int ret;
819
820 xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
821 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
822
823 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_NODE_ID;
824
825 /* Invoke xgmi ta to get the node id */
826 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
827 if (ret)
828 return ret;
829
830 *node_id = xgmi_cmd->xgmi_out_message.get_node_id.node_id;
831
832 return 0;
833}
834
835int psp_xgmi_get_topology_info(struct psp_context *psp,
836 int number_devices,
837 struct psp_xgmi_topology_info *topology)
838{
839 struct ta_xgmi_shared_memory *xgmi_cmd;
840 struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
841 struct ta_xgmi_cmd_get_topology_info_output *topology_info_output;
842 int i;
843 int ret;
844
845 if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
846 return -EINVAL;
847
848 xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
849 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
850
851 /* Fill in the shared memory with topology information as input */
852 topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
853 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_GET_TOPOLOGY_INFO;
854 topology_info_input->num_nodes = number_devices;
855
856 for (i = 0; i < topology_info_input->num_nodes; i++) {
857 topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
858 topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
859 topology_info_input->nodes[i].is_sharing_enabled = topology->nodes[i].is_sharing_enabled;
860 topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
861 }
862
863 /* Invoke xgmi ta to get the topology information */
864 ret = psp_xgmi_invoke(psp, TA_COMMAND_XGMI__GET_GET_TOPOLOGY_INFO);
865 if (ret)
866 return ret;
867
868 /* Read the output topology information from the shared memory */
869 topology_info_output = &xgmi_cmd->xgmi_out_message.get_topology_info;
870 topology->num_nodes = xgmi_cmd->xgmi_out_message.get_topology_info.num_nodes;
871 for (i = 0; i < topology->num_nodes; i++) {
872 topology->nodes[i].node_id = topology_info_output->nodes[i].node_id;
873 topology->nodes[i].num_hops = topology_info_output->nodes[i].num_hops;
874 topology->nodes[i].is_sharing_enabled = topology_info_output->nodes[i].is_sharing_enabled;
875 topology->nodes[i].sdma_engine = topology_info_output->nodes[i].sdma_engine;
876 }
877
878 return 0;
879}
880
881int psp_xgmi_set_topology_info(struct psp_context *psp,
882 int number_devices,
883 struct psp_xgmi_topology_info *topology)
884{
885 struct ta_xgmi_shared_memory *xgmi_cmd;
886 struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
887 int i;
888
889 if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
890 return -EINVAL;
891
892 xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
893 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
894
895 topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
896 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__SET_TOPOLOGY_INFO;
897 topology_info_input->num_nodes = number_devices;
898
899 for (i = 0; i < topology_info_input->num_nodes; i++) {
900 topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
901 topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
902 topology_info_input->nodes[i].is_sharing_enabled = 1;
903 topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
904 }
905
906 /* Invoke xgmi ta to set topology information */
907 return psp_xgmi_invoke(psp, TA_COMMAND_XGMI__SET_TOPOLOGY_INFO);
908}
909
910// ras begin
911static int psp_ras_init_shared_buf(struct psp_context *psp)
912{
913 int ret;
914
915 /*
916 * Allocate 16k memory aligned to 4k from Frame Buffer (local
917 * physical) for ras ta <-> Driver
918 */
919 ret = amdgpu_bo_create_kernel(psp->adev, PSP_RAS_SHARED_MEM_SIZE,
920 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
921 &psp->ras.ras_shared_bo,
922 &psp->ras.ras_shared_mc_addr,
923 &psp->ras.ras_shared_buf);
924
925 return ret;
926}
927
928static int psp_ras_load(struct psp_context *psp)
929{
930 int ret;
931 struct psp_gfx_cmd_resp *cmd;
932
933 /*
934 * TODO: bypass the loading in sriov for now
935 */
936 if (amdgpu_sriov_vf(psp->adev))
937 return 0;
938
939 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
940 if (!cmd)
941 return -ENOMEM;
942
943 memset(psp->fw_pri_buf, 0, PSP_1_MEG);
944 memcpy(psp->fw_pri_buf, psp->ta_ras_start_addr, psp->ta_ras_ucode_size);
945
946 psp_prep_ta_load_cmd_buf(cmd,
947 psp->fw_pri_mc_addr,
948 psp->ta_ras_ucode_size,
949 psp->ras.ras_shared_mc_addr,
950 PSP_RAS_SHARED_MEM_SIZE);
951
952 ret = psp_cmd_submit_buf(psp, NULL, cmd,
953 psp->fence_buf_mc_addr);
954
955 if (!ret) {
956 psp->ras.ras_initialized = true;
957 psp->ras.session_id = cmd->resp.session_id;
958 }
959
960 kfree(cmd);
961
962 return ret;
963}
964
965static int psp_ras_unload(struct psp_context *psp)
966{
967 int ret;
968 struct psp_gfx_cmd_resp *cmd;
969
970 /*
971 * TODO: bypass the unloading in sriov for now
972 */
973 if (amdgpu_sriov_vf(psp->adev))
974 return 0;
975
976 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
977 if (!cmd)
978 return -ENOMEM;
979
980 psp_prep_ta_unload_cmd_buf(cmd, psp->ras.session_id);
981
982 ret = psp_cmd_submit_buf(psp, NULL, cmd,
983 psp->fence_buf_mc_addr);
984
985 kfree(cmd);
986
987 return ret;
988}
989
990int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
991{
992 struct ta_ras_shared_memory *ras_cmd;
993 int ret;
994
995 ras_cmd = (struct ta_ras_shared_memory *)psp->ras.ras_shared_buf;
996
997 /*
998 * TODO: bypass the loading in sriov for now
999 */
1000 if (amdgpu_sriov_vf(psp->adev))
1001 return 0;
1002
1003 ret = psp_ta_invoke(psp, ta_cmd_id, psp->ras.session_id);
1004
1005 if (amdgpu_ras_intr_triggered())
1006 return ret;
1007
1008 if (ras_cmd->if_version > RAS_TA_HOST_IF_VER)
1009 {
1010 DRM_WARN("RAS: Unsupported Interface");
1011 return -EINVAL;
1012 }
1013
1014 if (!ret) {
1015 if (ras_cmd->ras_out_message.flags.err_inject_switch_disable_flag) {
1016 dev_warn(psp->adev->dev, "ECC switch disabled\n");
1017
1018 ras_cmd->ras_status = TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE;
1019 }
1020 else if (ras_cmd->ras_out_message.flags.reg_access_failure_flag)
1021 dev_warn(psp->adev->dev,
1022 "RAS internal register access blocked\n");
1023 }
1024
1025 return ret;
1026}
1027
1028int psp_ras_enable_features(struct psp_context *psp,
1029 union ta_ras_cmd_input *info, bool enable)
1030{
1031 struct ta_ras_shared_memory *ras_cmd;
1032 int ret;
1033
1034 if (!psp->ras.ras_initialized)
1035 return -EINVAL;
1036
1037 ras_cmd = (struct ta_ras_shared_memory *)psp->ras.ras_shared_buf;
1038 memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
1039
1040 if (enable)
1041 ras_cmd->cmd_id = TA_RAS_COMMAND__ENABLE_FEATURES;
1042 else
1043 ras_cmd->cmd_id = TA_RAS_COMMAND__DISABLE_FEATURES;
1044
1045 ras_cmd->ras_in_message = *info;
1046
1047 ret = psp_ras_invoke(psp, ras_cmd->cmd_id);
1048 if (ret)
1049 return -EINVAL;
1050
1051 return ras_cmd->ras_status;
1052}
1053
1054static int psp_ras_terminate(struct psp_context *psp)
1055{
1056 int ret;
1057
1058 /*
1059 * TODO: bypass the terminate in sriov for now
1060 */
1061 if (amdgpu_sriov_vf(psp->adev))
1062 return 0;
1063
1064 if (!psp->ras.ras_initialized)
1065 return 0;
1066
1067 ret = psp_ras_unload(psp);
1068 if (ret)
1069 return ret;
1070
1071 psp->ras.ras_initialized = false;
1072
1073 /* free ras shared memory */
1074 amdgpu_bo_free_kernel(&psp->ras.ras_shared_bo,
1075 &psp->ras.ras_shared_mc_addr,
1076 &psp->ras.ras_shared_buf);
1077
1078 return 0;
1079}
1080
1081static int psp_ras_initialize(struct psp_context *psp)
1082{
1083 int ret;
1084
1085 /*
1086 * TODO: bypass the initialize in sriov for now
1087 */
1088 if (amdgpu_sriov_vf(psp->adev))
1089 return 0;
1090
1091 if (!psp->adev->psp.ta_ras_ucode_size ||
1092 !psp->adev->psp.ta_ras_start_addr) {
1093 dev_info(psp->adev->dev, "RAS: optional ras ta ucode is not available\n");
1094 return 0;
1095 }
1096
1097 if (!psp->ras.ras_initialized) {
1098 ret = psp_ras_init_shared_buf(psp);
1099 if (ret)
1100 return ret;
1101 }
1102
1103 ret = psp_ras_load(psp);
1104 if (ret)
1105 return ret;
1106
1107 return 0;
1108}
1109
1110int psp_ras_trigger_error(struct psp_context *psp,
1111 struct ta_ras_trigger_error_input *info)
1112{
1113 struct ta_ras_shared_memory *ras_cmd;
1114 int ret;
1115
1116 if (!psp->ras.ras_initialized)
1117 return -EINVAL;
1118
1119 ras_cmd = (struct ta_ras_shared_memory *)psp->ras.ras_shared_buf;
1120 memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
1121
1122 ras_cmd->cmd_id = TA_RAS_COMMAND__TRIGGER_ERROR;
1123 ras_cmd->ras_in_message.trigger_error = *info;
1124
1125 ret = psp_ras_invoke(psp, ras_cmd->cmd_id);
1126 if (ret)
1127 return -EINVAL;
1128
1129 /* If err_event_athub occurs error inject was successful, however
1130 return status from TA is no long reliable */
1131 if (amdgpu_ras_intr_triggered())
1132 return 0;
1133
1134 return ras_cmd->ras_status;
1135}
1136// ras end
1137
1138// HDCP start
1139static int psp_hdcp_init_shared_buf(struct psp_context *psp)
1140{
1141 int ret;
1142
1143 /*
1144 * Allocate 16k memory aligned to 4k from Frame Buffer (local
1145 * physical) for hdcp ta <-> Driver
1146 */
1147 ret = amdgpu_bo_create_kernel(psp->adev, PSP_HDCP_SHARED_MEM_SIZE,
1148 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
1149 &psp->hdcp_context.hdcp_shared_bo,
1150 &psp->hdcp_context.hdcp_shared_mc_addr,
1151 &psp->hdcp_context.hdcp_shared_buf);
1152
1153 return ret;
1154}
1155
1156static int psp_hdcp_load(struct psp_context *psp)
1157{
1158 int ret;
1159 struct psp_gfx_cmd_resp *cmd;
1160
1161 /*
1162 * TODO: bypass the loading in sriov for now
1163 */
1164 if (amdgpu_sriov_vf(psp->adev))
1165 return 0;
1166
1167 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
1168 if (!cmd)
1169 return -ENOMEM;
1170
1171 memset(psp->fw_pri_buf, 0, PSP_1_MEG);
1172 memcpy(psp->fw_pri_buf, psp->ta_hdcp_start_addr,
1173 psp->ta_hdcp_ucode_size);
1174
1175 psp_prep_ta_load_cmd_buf(cmd,
1176 psp->fw_pri_mc_addr,
1177 psp->ta_hdcp_ucode_size,
1178 psp->hdcp_context.hdcp_shared_mc_addr,
1179 PSP_HDCP_SHARED_MEM_SIZE);
1180
1181 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1182
1183 if (!ret) {
1184 psp->hdcp_context.hdcp_initialized = true;
1185 psp->hdcp_context.session_id = cmd->resp.session_id;
1186 mutex_init(&psp->hdcp_context.mutex);
1187 }
1188
1189 kfree(cmd);
1190
1191 return ret;
1192}
1193static int psp_hdcp_initialize(struct psp_context *psp)
1194{
1195 int ret;
1196
1197 /*
1198 * TODO: bypass the initialize in sriov for now
1199 */
1200 if (amdgpu_sriov_vf(psp->adev))
1201 return 0;
1202
1203 if (!psp->adev->psp.ta_hdcp_ucode_size ||
1204 !psp->adev->psp.ta_hdcp_start_addr) {
1205 dev_info(psp->adev->dev, "HDCP: optional hdcp ta ucode is not available\n");
1206 return 0;
1207 }
1208
1209 if (!psp->hdcp_context.hdcp_initialized) {
1210 ret = psp_hdcp_init_shared_buf(psp);
1211 if (ret)
1212 return ret;
1213 }
1214
1215 ret = psp_hdcp_load(psp);
1216 if (ret)
1217 return ret;
1218
1219 return 0;
1220}
1221
1222static int psp_hdcp_unload(struct psp_context *psp)
1223{
1224 int ret;
1225 struct psp_gfx_cmd_resp *cmd;
1226
1227 /*
1228 * TODO: bypass the unloading in sriov for now
1229 */
1230 if (amdgpu_sriov_vf(psp->adev))
1231 return 0;
1232
1233 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
1234 if (!cmd)
1235 return -ENOMEM;
1236
1237 psp_prep_ta_unload_cmd_buf(cmd, psp->hdcp_context.session_id);
1238
1239 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1240
1241 kfree(cmd);
1242
1243 return ret;
1244}
1245
1246int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
1247{
1248 /*
1249 * TODO: bypass the loading in sriov for now
1250 */
1251 if (amdgpu_sriov_vf(psp->adev))
1252 return 0;
1253
1254 return psp_ta_invoke(psp, ta_cmd_id, psp->hdcp_context.session_id);
1255}
1256
1257static int psp_hdcp_terminate(struct psp_context *psp)
1258{
1259 int ret;
1260
1261 /*
1262 * TODO: bypass the terminate in sriov for now
1263 */
1264 if (amdgpu_sriov_vf(psp->adev))
1265 return 0;
1266
1267 if (!psp->hdcp_context.hdcp_initialized)
1268 return 0;
1269
1270 ret = psp_hdcp_unload(psp);
1271 if (ret)
1272 return ret;
1273
1274 psp->hdcp_context.hdcp_initialized = false;
1275
1276 /* free hdcp shared memory */
1277 amdgpu_bo_free_kernel(&psp->hdcp_context.hdcp_shared_bo,
1278 &psp->hdcp_context.hdcp_shared_mc_addr,
1279 &psp->hdcp_context.hdcp_shared_buf);
1280
1281 return 0;
1282}
1283// HDCP end
1284
1285// DTM start
1286static int psp_dtm_init_shared_buf(struct psp_context *psp)
1287{
1288 int ret;
1289
1290 /*
1291 * Allocate 16k memory aligned to 4k from Frame Buffer (local
1292 * physical) for dtm ta <-> Driver
1293 */
1294 ret = amdgpu_bo_create_kernel(psp->adev, PSP_DTM_SHARED_MEM_SIZE,
1295 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
1296 &psp->dtm_context.dtm_shared_bo,
1297 &psp->dtm_context.dtm_shared_mc_addr,
1298 &psp->dtm_context.dtm_shared_buf);
1299
1300 return ret;
1301}
1302
1303static int psp_dtm_load(struct psp_context *psp)
1304{
1305 int ret;
1306 struct psp_gfx_cmd_resp *cmd;
1307
1308 /*
1309 * TODO: bypass the loading in sriov for now
1310 */
1311 if (amdgpu_sriov_vf(psp->adev))
1312 return 0;
1313
1314 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
1315 if (!cmd)
1316 return -ENOMEM;
1317
1318 memset(psp->fw_pri_buf, 0, PSP_1_MEG);
1319 memcpy(psp->fw_pri_buf, psp->ta_dtm_start_addr, psp->ta_dtm_ucode_size);
1320
1321 psp_prep_ta_load_cmd_buf(cmd,
1322 psp->fw_pri_mc_addr,
1323 psp->ta_dtm_ucode_size,
1324 psp->dtm_context.dtm_shared_mc_addr,
1325 PSP_DTM_SHARED_MEM_SIZE);
1326
1327 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1328
1329 if (!ret) {
1330 psp->dtm_context.dtm_initialized = true;
1331 psp->dtm_context.session_id = cmd->resp.session_id;
1332 mutex_init(&psp->dtm_context.mutex);
1333 }
1334
1335 kfree(cmd);
1336
1337 return ret;
1338}
1339
1340static int psp_dtm_initialize(struct psp_context *psp)
1341{
1342 int ret;
1343
1344 /*
1345 * TODO: bypass the initialize in sriov for now
1346 */
1347 if (amdgpu_sriov_vf(psp->adev))
1348 return 0;
1349
1350 if (!psp->adev->psp.ta_dtm_ucode_size ||
1351 !psp->adev->psp.ta_dtm_start_addr) {
1352 dev_info(psp->adev->dev, "DTM: optional dtm ta ucode is not available\n");
1353 return 0;
1354 }
1355
1356 if (!psp->dtm_context.dtm_initialized) {
1357 ret = psp_dtm_init_shared_buf(psp);
1358 if (ret)
1359 return ret;
1360 }
1361
1362 ret = psp_dtm_load(psp);
1363 if (ret)
1364 return ret;
1365
1366 return 0;
1367}
1368
1369static int psp_dtm_unload(struct psp_context *psp)
1370{
1371 int ret;
1372 struct psp_gfx_cmd_resp *cmd;
1373
1374 /*
1375 * TODO: bypass the unloading in sriov for now
1376 */
1377 if (amdgpu_sriov_vf(psp->adev))
1378 return 0;
1379
1380 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
1381 if (!cmd)
1382 return -ENOMEM;
1383
1384 psp_prep_ta_unload_cmd_buf(cmd, psp->dtm_context.session_id);
1385
1386 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1387
1388 kfree(cmd);
1389
1390 return ret;
1391}
1392
1393int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
1394{
1395 /*
1396 * TODO: bypass the loading in sriov for now
1397 */
1398 if (amdgpu_sriov_vf(psp->adev))
1399 return 0;
1400
1401 return psp_ta_invoke(psp, ta_cmd_id, psp->dtm_context.session_id);
1402}
1403
1404static int psp_dtm_terminate(struct psp_context *psp)
1405{
1406 int ret;
1407
1408 /*
1409 * TODO: bypass the terminate in sriov for now
1410 */
1411 if (amdgpu_sriov_vf(psp->adev))
1412 return 0;
1413
1414 if (!psp->dtm_context.dtm_initialized)
1415 return 0;
1416
1417 ret = psp_dtm_unload(psp);
1418 if (ret)
1419 return ret;
1420
1421 psp->dtm_context.dtm_initialized = false;
1422
1423 /* free hdcp shared memory */
1424 amdgpu_bo_free_kernel(&psp->dtm_context.dtm_shared_bo,
1425 &psp->dtm_context.dtm_shared_mc_addr,
1426 &psp->dtm_context.dtm_shared_buf);
1427
1428 return 0;
1429}
1430// DTM end
1431
1432static int psp_hw_start(struct psp_context *psp)
1433{
1434 struct amdgpu_device *adev = psp->adev;
1435 int ret;
1436
1437 if (!amdgpu_sriov_vf(adev)) {
1438 if (psp->kdb_bin_size &&
1439 (psp->funcs->bootloader_load_kdb != NULL)) {
1440 ret = psp_bootloader_load_kdb(psp);
1441 if (ret) {
1442 DRM_ERROR("PSP load kdb failed!\n");
1443 return ret;
1444 }
1445 }
1446
1447 if (psp->spl_bin_size) {
1448 ret = psp_bootloader_load_spl(psp);
1449 if (ret) {
1450 DRM_ERROR("PSP load spl failed!\n");
1451 return ret;
1452 }
1453 }
1454
1455 ret = psp_bootloader_load_sysdrv(psp);
1456 if (ret) {
1457 DRM_ERROR("PSP load sysdrv failed!\n");
1458 return ret;
1459 }
1460
1461 ret = psp_bootloader_load_sos(psp);
1462 if (ret) {
1463 DRM_ERROR("PSP load sos failed!\n");
1464 return ret;
1465 }
1466 }
1467
1468 ret = psp_ring_create(psp, PSP_RING_TYPE__KM);
1469 if (ret) {
1470 DRM_ERROR("PSP create ring failed!\n");
1471 return ret;
1472 }
1473
1474 ret = psp_clear_vf_fw(psp);
1475 if (ret) {
1476 DRM_ERROR("PSP clear vf fw!\n");
1477 return ret;
1478 }
1479
1480 ret = psp_tmr_init(psp);
1481 if (ret) {
1482 DRM_ERROR("PSP tmr init failed!\n");
1483 return ret;
1484 }
1485
1486 /*
1487 * For ASICs with DF Cstate management centralized
1488 * to PMFW, TMR setup should be performed after PMFW
1489 * loaded and before other non-psp firmware loaded.
1490 */
1491 if (psp->pmfw_centralized_cstate_management) {
1492 ret = psp_load_smu_fw(psp);
1493 if (ret)
1494 return ret;
1495 }
1496
1497 ret = psp_tmr_load(psp);
1498 if (ret) {
1499 DRM_ERROR("PSP load tmr failed!\n");
1500 return ret;
1501 }
1502
1503 return 0;
1504}
1505
1506static int psp_get_fw_type(struct amdgpu_firmware_info *ucode,
1507 enum psp_gfx_fw_type *type)
1508{
1509 switch (ucode->ucode_id) {
1510 case AMDGPU_UCODE_ID_SDMA0:
1511 *type = GFX_FW_TYPE_SDMA0;
1512 break;
1513 case AMDGPU_UCODE_ID_SDMA1:
1514 *type = GFX_FW_TYPE_SDMA1;
1515 break;
1516 case AMDGPU_UCODE_ID_SDMA2:
1517 *type = GFX_FW_TYPE_SDMA2;
1518 break;
1519 case AMDGPU_UCODE_ID_SDMA3:
1520 *type = GFX_FW_TYPE_SDMA3;
1521 break;
1522 case AMDGPU_UCODE_ID_SDMA4:
1523 *type = GFX_FW_TYPE_SDMA4;
1524 break;
1525 case AMDGPU_UCODE_ID_SDMA5:
1526 *type = GFX_FW_TYPE_SDMA5;
1527 break;
1528 case AMDGPU_UCODE_ID_SDMA6:
1529 *type = GFX_FW_TYPE_SDMA6;
1530 break;
1531 case AMDGPU_UCODE_ID_SDMA7:
1532 *type = GFX_FW_TYPE_SDMA7;
1533 break;
1534 case AMDGPU_UCODE_ID_CP_MES:
1535 *type = GFX_FW_TYPE_CP_MES;
1536 break;
1537 case AMDGPU_UCODE_ID_CP_MES_DATA:
1538 *type = GFX_FW_TYPE_MES_STACK;
1539 break;
1540 case AMDGPU_UCODE_ID_CP_CE:
1541 *type = GFX_FW_TYPE_CP_CE;
1542 break;
1543 case AMDGPU_UCODE_ID_CP_PFP:
1544 *type = GFX_FW_TYPE_CP_PFP;
1545 break;
1546 case AMDGPU_UCODE_ID_CP_ME:
1547 *type = GFX_FW_TYPE_CP_ME;
1548 break;
1549 case AMDGPU_UCODE_ID_CP_MEC1:
1550 *type = GFX_FW_TYPE_CP_MEC;
1551 break;
1552 case AMDGPU_UCODE_ID_CP_MEC1_JT:
1553 *type = GFX_FW_TYPE_CP_MEC_ME1;
1554 break;
1555 case AMDGPU_UCODE_ID_CP_MEC2:
1556 *type = GFX_FW_TYPE_CP_MEC;
1557 break;
1558 case AMDGPU_UCODE_ID_CP_MEC2_JT:
1559 *type = GFX_FW_TYPE_CP_MEC_ME2;
1560 break;
1561 case AMDGPU_UCODE_ID_RLC_G:
1562 *type = GFX_FW_TYPE_RLC_G;
1563 break;
1564 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL:
1565 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_CNTL;
1566 break;
1567 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM:
1568 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_GPM_MEM;
1569 break;
1570 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM:
1571 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_MEM;
1572 break;
1573 case AMDGPU_UCODE_ID_SMC:
1574 *type = GFX_FW_TYPE_SMU;
1575 break;
1576 case AMDGPU_UCODE_ID_UVD:
1577 *type = GFX_FW_TYPE_UVD;
1578 break;
1579 case AMDGPU_UCODE_ID_UVD1:
1580 *type = GFX_FW_TYPE_UVD1;
1581 break;
1582 case AMDGPU_UCODE_ID_VCE:
1583 *type = GFX_FW_TYPE_VCE;
1584 break;
1585 case AMDGPU_UCODE_ID_VCN:
1586 *type = GFX_FW_TYPE_VCN;
1587 break;
1588 case AMDGPU_UCODE_ID_VCN1:
1589 *type = GFX_FW_TYPE_VCN1;
1590 break;
1591 case AMDGPU_UCODE_ID_DMCU_ERAM:
1592 *type = GFX_FW_TYPE_DMCU_ERAM;
1593 break;
1594 case AMDGPU_UCODE_ID_DMCU_INTV:
1595 *type = GFX_FW_TYPE_DMCU_ISR;
1596 break;
1597 case AMDGPU_UCODE_ID_VCN0_RAM:
1598 *type = GFX_FW_TYPE_VCN0_RAM;
1599 break;
1600 case AMDGPU_UCODE_ID_VCN1_RAM:
1601 *type = GFX_FW_TYPE_VCN1_RAM;
1602 break;
1603 case AMDGPU_UCODE_ID_DMCUB:
1604 *type = GFX_FW_TYPE_DMUB;
1605 break;
1606 case AMDGPU_UCODE_ID_MAXIMUM:
1607 default:
1608 return -EINVAL;
1609 }
1610
1611 return 0;
1612}
1613
1614static void psp_print_fw_hdr(struct psp_context *psp,
1615 struct amdgpu_firmware_info *ucode)
1616{
1617 struct amdgpu_device *adev = psp->adev;
1618 struct common_firmware_header *hdr;
1619
1620 switch (ucode->ucode_id) {
1621 case AMDGPU_UCODE_ID_SDMA0:
1622 case AMDGPU_UCODE_ID_SDMA1:
1623 case AMDGPU_UCODE_ID_SDMA2:
1624 case AMDGPU_UCODE_ID_SDMA3:
1625 case AMDGPU_UCODE_ID_SDMA4:
1626 case AMDGPU_UCODE_ID_SDMA5:
1627 case AMDGPU_UCODE_ID_SDMA6:
1628 case AMDGPU_UCODE_ID_SDMA7:
1629 hdr = (struct common_firmware_header *)
1630 adev->sdma.instance[ucode->ucode_id - AMDGPU_UCODE_ID_SDMA0].fw->data;
1631 amdgpu_ucode_print_sdma_hdr(hdr);
1632 break;
1633 case AMDGPU_UCODE_ID_CP_CE:
1634 hdr = (struct common_firmware_header *)adev->gfx.ce_fw->data;
1635 amdgpu_ucode_print_gfx_hdr(hdr);
1636 break;
1637 case AMDGPU_UCODE_ID_CP_PFP:
1638 hdr = (struct common_firmware_header *)adev->gfx.pfp_fw->data;
1639 amdgpu_ucode_print_gfx_hdr(hdr);
1640 break;
1641 case AMDGPU_UCODE_ID_CP_ME:
1642 hdr = (struct common_firmware_header *)adev->gfx.me_fw->data;
1643 amdgpu_ucode_print_gfx_hdr(hdr);
1644 break;
1645 case AMDGPU_UCODE_ID_CP_MEC1:
1646 hdr = (struct common_firmware_header *)adev->gfx.mec_fw->data;
1647 amdgpu_ucode_print_gfx_hdr(hdr);
1648 break;
1649 case AMDGPU_UCODE_ID_RLC_G:
1650 hdr = (struct common_firmware_header *)adev->gfx.rlc_fw->data;
1651 amdgpu_ucode_print_rlc_hdr(hdr);
1652 break;
1653 case AMDGPU_UCODE_ID_SMC:
1654 hdr = (struct common_firmware_header *)adev->pm.fw->data;
1655 amdgpu_ucode_print_smc_hdr(hdr);
1656 break;
1657 default:
1658 break;
1659 }
1660}
1661
1662static int psp_prep_load_ip_fw_cmd_buf(struct amdgpu_firmware_info *ucode,
1663 struct psp_gfx_cmd_resp *cmd)
1664{
1665 int ret;
1666 uint64_t fw_mem_mc_addr = ucode->mc_addr;
1667
1668 memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp));
1669
1670 cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW;
1671 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(fw_mem_mc_addr);
1672 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(fw_mem_mc_addr);
1673 cmd->cmd.cmd_load_ip_fw.fw_size = ucode->ucode_size;
1674
1675 ret = psp_get_fw_type(ucode, &cmd->cmd.cmd_load_ip_fw.fw_type);
1676 if (ret)
1677 DRM_ERROR("Unknown firmware type\n");
1678
1679 return ret;
1680}
1681
1682static int psp_execute_np_fw_load(struct psp_context *psp,
1683 struct amdgpu_firmware_info *ucode)
1684{
1685 int ret = 0;
1686
1687 ret = psp_prep_load_ip_fw_cmd_buf(ucode, psp->cmd);
1688 if (ret)
1689 return ret;
1690
1691 ret = psp_cmd_submit_buf(psp, ucode, psp->cmd,
1692 psp->fence_buf_mc_addr);
1693
1694 return ret;
1695}
1696
1697static int psp_load_smu_fw(struct psp_context *psp)
1698{
1699 int ret;
1700 struct amdgpu_device* adev = psp->adev;
1701 struct amdgpu_firmware_info *ucode =
1702 &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC];
1703 struct amdgpu_ras *ras = psp->ras.ras;
1704
1705 if (!ucode->fw || amdgpu_sriov_vf(psp->adev))
1706 return 0;
1707
1708
1709 if (adev->in_gpu_reset && ras && ras->supported) {
1710 ret = amdgpu_dpm_set_mp1_state(adev, PP_MP1_STATE_UNLOAD);
1711 if (ret) {
1712 DRM_WARN("Failed to set MP1 state prepare for reload\n");
1713 }
1714 }
1715
1716 ret = psp_execute_np_fw_load(psp, ucode);
1717
1718 if (ret)
1719 DRM_ERROR("PSP load smu failed!\n");
1720
1721 return ret;
1722}
1723
1724static bool fw_load_skip_check(struct psp_context *psp,
1725 struct amdgpu_firmware_info *ucode)
1726{
1727 if (!ucode->fw)
1728 return true;
1729
1730 if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC &&
1731 (psp_smu_reload_quirk(psp) ||
1732 psp->autoload_supported ||
1733 psp->pmfw_centralized_cstate_management))
1734 return true;
1735
1736 if (amdgpu_sriov_vf(psp->adev) &&
1737 (ucode->ucode_id == AMDGPU_UCODE_ID_SDMA0
1738 || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA1
1739 || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA2
1740 || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA3
1741 || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA4
1742 || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA5
1743 || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA6
1744 || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA7
1745 || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_G
1746 || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL
1747 || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM
1748 || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM
1749 || ucode->ucode_id == AMDGPU_UCODE_ID_SMC))
1750 /*skip ucode loading in SRIOV VF */
1751 return true;
1752
1753 if (psp->autoload_supported &&
1754 (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC1_JT ||
1755 ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT))
1756 /* skip mec JT when autoload is enabled */
1757 return true;
1758
1759 return false;
1760}
1761
1762static int psp_np_fw_load(struct psp_context *psp)
1763{
1764 int i, ret;
1765 struct amdgpu_firmware_info *ucode;
1766 struct amdgpu_device* adev = psp->adev;
1767
1768 if (psp->autoload_supported &&
1769 !psp->pmfw_centralized_cstate_management) {
1770 ret = psp_load_smu_fw(psp);
1771 if (ret)
1772 return ret;
1773 }
1774
1775 for (i = 0; i < adev->firmware.max_ucodes; i++) {
1776 ucode = &adev->firmware.ucode[i];
1777
1778 if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC &&
1779 !fw_load_skip_check(psp, ucode)) {
1780 ret = psp_load_smu_fw(psp);
1781 if (ret)
1782 return ret;
1783 continue;
1784 }
1785
1786 if (fw_load_skip_check(psp, ucode))
1787 continue;
1788
1789 if (psp->autoload_supported &&
1790 (adev->asic_type == CHIP_SIENNA_CICHLID ||
1791 adev->asic_type == CHIP_NAVY_FLOUNDER) &&
1792 (ucode->ucode_id == AMDGPU_UCODE_ID_SDMA1 ||
1793 ucode->ucode_id == AMDGPU_UCODE_ID_SDMA2 ||
1794 ucode->ucode_id == AMDGPU_UCODE_ID_SDMA3))
1795 /* PSP only receive one SDMA fw for sienna_cichlid,
1796 * as all four sdma fw are same */
1797 continue;
1798
1799 psp_print_fw_hdr(psp, ucode);
1800
1801 ret = psp_execute_np_fw_load(psp, ucode);
1802 if (ret)
1803 return ret;
1804
1805 /* Start rlc autoload after psp recieved all the gfx firmware */
1806 if (psp->autoload_supported && ucode->ucode_id == (amdgpu_sriov_vf(adev) ?
1807 AMDGPU_UCODE_ID_CP_MEC2 : AMDGPU_UCODE_ID_RLC_G)) {
1808 ret = psp_rlc_autoload_start(psp);
1809 if (ret) {
1810 DRM_ERROR("Failed to start rlc autoload\n");
1811 return ret;
1812 }
1813 }
1814 }
1815
1816 return 0;
1817}
1818
1819static int psp_load_fw(struct amdgpu_device *adev)
1820{
1821 int ret;
1822 struct psp_context *psp = &adev->psp;
1823
1824 if (amdgpu_sriov_vf(adev) && adev->in_gpu_reset) {
1825 psp_ring_stop(psp, PSP_RING_TYPE__KM); /* should not destroy ring, only stop */
1826 goto skip_memalloc;
1827 }
1828
1829 psp->cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
1830 if (!psp->cmd)
1831 return -ENOMEM;
1832
1833 ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG,
1834 AMDGPU_GEM_DOMAIN_GTT,
1835 &psp->fw_pri_bo,
1836 &psp->fw_pri_mc_addr,
1837 &psp->fw_pri_buf);
1838 if (ret)
1839 goto failed;
1840
1841 ret = amdgpu_bo_create_kernel(adev, PSP_FENCE_BUFFER_SIZE, PAGE_SIZE,
1842 AMDGPU_GEM_DOMAIN_VRAM,
1843 &psp->fence_buf_bo,
1844 &psp->fence_buf_mc_addr,
1845 &psp->fence_buf);
1846 if (ret)
1847 goto failed;
1848
1849 ret = amdgpu_bo_create_kernel(adev, PSP_CMD_BUFFER_SIZE, PAGE_SIZE,
1850 AMDGPU_GEM_DOMAIN_VRAM,
1851 &psp->cmd_buf_bo, &psp->cmd_buf_mc_addr,
1852 (void **)&psp->cmd_buf_mem);
1853 if (ret)
1854 goto failed;
1855
1856 memset(psp->fence_buf, 0, PSP_FENCE_BUFFER_SIZE);
1857
1858 ret = psp_ring_init(psp, PSP_RING_TYPE__KM);
1859 if (ret) {
1860 DRM_ERROR("PSP ring init failed!\n");
1861 goto failed;
1862 }
1863
1864skip_memalloc:
1865 ret = psp_hw_start(psp);
1866 if (ret)
1867 goto failed;
1868
1869 ret = psp_np_fw_load(psp);
1870 if (ret)
1871 goto failed;
1872
1873 ret = psp_asd_load(psp);
1874 if (ret) {
1875 DRM_ERROR("PSP load asd failed!\n");
1876 return ret;
1877 }
1878
1879 if (psp->adev->psp.ta_fw) {
1880 ret = psp_ras_initialize(psp);
1881 if (ret)
1882 dev_err(psp->adev->dev,
1883 "RAS: Failed to initialize RAS\n");
1884
1885 ret = psp_hdcp_initialize(psp);
1886 if (ret)
1887 dev_err(psp->adev->dev,
1888 "HDCP: Failed to initialize HDCP\n");
1889
1890 ret = psp_dtm_initialize(psp);
1891 if (ret)
1892 dev_err(psp->adev->dev,
1893 "DTM: Failed to initialize DTM\n");
1894 }
1895
1896 return 0;
1897
1898failed:
1899 /*
1900 * all cleanup jobs (xgmi terminate, ras terminate,
1901 * ring destroy, cmd/fence/fw buffers destory,
1902 * psp->cmd destory) are delayed to psp_hw_fini
1903 */
1904 return ret;
1905}
1906
1907static int psp_hw_init(void *handle)
1908{
1909 int ret;
1910 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1911
1912 mutex_lock(&adev->firmware.mutex);
1913 /*
1914 * This sequence is just used on hw_init only once, no need on
1915 * resume.
1916 */
1917 ret = amdgpu_ucode_init_bo(adev);
1918 if (ret)
1919 goto failed;
1920
1921 ret = psp_load_fw(adev);
1922 if (ret) {
1923 DRM_ERROR("PSP firmware loading failed\n");
1924 goto failed;
1925 }
1926
1927 mutex_unlock(&adev->firmware.mutex);
1928 return 0;
1929
1930failed:
1931 adev->firmware.load_type = AMDGPU_FW_LOAD_DIRECT;
1932 mutex_unlock(&adev->firmware.mutex);
1933 return -EINVAL;
1934}
1935
1936static int psp_hw_fini(void *handle)
1937{
1938 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1939 struct psp_context *psp = &adev->psp;
1940 int ret;
1941
1942 if (psp->adev->psp.ta_fw) {
1943 psp_ras_terminate(psp);
1944 psp_dtm_terminate(psp);
1945 psp_hdcp_terminate(psp);
1946 }
1947
1948 psp_asd_unload(psp);
1949 ret = psp_clear_vf_fw(psp);
1950 if (ret) {
1951 DRM_ERROR("PSP clear vf fw!\n");
1952 return ret;
1953 }
1954
1955 psp_tmr_terminate(psp);
1956 psp_ring_destroy(psp, PSP_RING_TYPE__KM);
1957
1958 amdgpu_bo_free_kernel(&psp->fw_pri_bo,
1959 &psp->fw_pri_mc_addr, &psp->fw_pri_buf);
1960 amdgpu_bo_free_kernel(&psp->fence_buf_bo,
1961 &psp->fence_buf_mc_addr, &psp->fence_buf);
1962 amdgpu_bo_free_kernel(&psp->cmd_buf_bo, &psp->cmd_buf_mc_addr,
1963 (void **)&psp->cmd_buf_mem);
1964
1965 kfree(psp->cmd);
1966 psp->cmd = NULL;
1967
1968 return 0;
1969}
1970
1971static int psp_suspend(void *handle)
1972{
1973 int ret;
1974 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1975 struct psp_context *psp = &adev->psp;
1976
1977 if (adev->gmc.xgmi.num_physical_nodes > 1 &&
1978 psp->xgmi_context.initialized == 1) {
1979 ret = psp_xgmi_terminate(psp);
1980 if (ret) {
1981 DRM_ERROR("Failed to terminate xgmi ta\n");
1982 return ret;
1983 }
1984 }
1985
1986 if (psp->adev->psp.ta_fw) {
1987 ret = psp_ras_terminate(psp);
1988 if (ret) {
1989 DRM_ERROR("Failed to terminate ras ta\n");
1990 return ret;
1991 }
1992 ret = psp_hdcp_terminate(psp);
1993 if (ret) {
1994 DRM_ERROR("Failed to terminate hdcp ta\n");
1995 return ret;
1996 }
1997 ret = psp_dtm_terminate(psp);
1998 if (ret) {
1999 DRM_ERROR("Failed to terminate dtm ta\n");
2000 return ret;
2001 }
2002 }
2003
2004 ret = psp_asd_unload(psp);
2005 if (ret) {
2006 DRM_ERROR("Failed to unload asd\n");
2007 return ret;
2008 }
2009
2010 ret = psp_tmr_terminate(psp);
2011 if (ret) {
2012 DRM_ERROR("Failed to terminate tmr\n");
2013 return ret;
2014 }
2015
2016 ret = psp_ring_stop(psp, PSP_RING_TYPE__KM);
2017 if (ret) {
2018 DRM_ERROR("PSP ring stop failed\n");
2019 return ret;
2020 }
2021
2022 return 0;
2023}
2024
2025static int psp_resume(void *handle)
2026{
2027 int ret;
2028 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2029 struct psp_context *psp = &adev->psp;
2030
2031 DRM_INFO("PSP is resuming...\n");
2032
2033 ret = psp_mem_training(psp, PSP_MEM_TRAIN_RESUME);
2034 if (ret) {
2035 DRM_ERROR("Failed to process memory training!\n");
2036 return ret;
2037 }
2038
2039 mutex_lock(&adev->firmware.mutex);
2040
2041 ret = psp_hw_start(psp);
2042 if (ret)
2043 goto failed;
2044
2045 ret = psp_np_fw_load(psp);
2046 if (ret)
2047 goto failed;
2048
2049 ret = psp_asd_load(psp);
2050 if (ret) {
2051 DRM_ERROR("PSP load asd failed!\n");
2052 goto failed;
2053 }
2054
2055 if (adev->gmc.xgmi.num_physical_nodes > 1) {
2056 ret = psp_xgmi_initialize(psp);
2057 /* Warning the XGMI seesion initialize failure
2058 * Instead of stop driver initialization
2059 */
2060 if (ret)
2061 dev_err(psp->adev->dev,
2062 "XGMI: Failed to initialize XGMI session\n");
2063 }
2064
2065 if (psp->adev->psp.ta_fw) {
2066 ret = psp_ras_initialize(psp);
2067 if (ret)
2068 dev_err(psp->adev->dev,
2069 "RAS: Failed to initialize RAS\n");
2070
2071 ret = psp_hdcp_initialize(psp);
2072 if (ret)
2073 dev_err(psp->adev->dev,
2074 "HDCP: Failed to initialize HDCP\n");
2075
2076 ret = psp_dtm_initialize(psp);
2077 if (ret)
2078 dev_err(psp->adev->dev,
2079 "DTM: Failed to initialize DTM\n");
2080 }
2081
2082 mutex_unlock(&adev->firmware.mutex);
2083
2084 return 0;
2085
2086failed:
2087 DRM_ERROR("PSP resume failed\n");
2088 mutex_unlock(&adev->firmware.mutex);
2089 return ret;
2090}
2091
2092int psp_gpu_reset(struct amdgpu_device *adev)
2093{
2094 int ret;
2095
2096 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
2097 return 0;
2098
2099 mutex_lock(&adev->psp.mutex);
2100 ret = psp_mode1_reset(&adev->psp);
2101 mutex_unlock(&adev->psp.mutex);
2102
2103 return ret;
2104}
2105
2106int psp_rlc_autoload_start(struct psp_context *psp)
2107{
2108 int ret;
2109 struct psp_gfx_cmd_resp *cmd;
2110
2111 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
2112 if (!cmd)
2113 return -ENOMEM;
2114
2115 cmd->cmd_id = GFX_CMD_ID_AUTOLOAD_RLC;
2116
2117 ret = psp_cmd_submit_buf(psp, NULL, cmd,
2118 psp->fence_buf_mc_addr);
2119 kfree(cmd);
2120 return ret;
2121}
2122
2123int psp_update_vcn_sram(struct amdgpu_device *adev, int inst_idx,
2124 uint64_t cmd_gpu_addr, int cmd_size)
2125{
2126 struct amdgpu_firmware_info ucode = {0};
2127
2128 ucode.ucode_id = inst_idx ? AMDGPU_UCODE_ID_VCN1_RAM :
2129 AMDGPU_UCODE_ID_VCN0_RAM;
2130 ucode.mc_addr = cmd_gpu_addr;
2131 ucode.ucode_size = cmd_size;
2132
2133 return psp_execute_np_fw_load(&adev->psp, &ucode);
2134}
2135
2136int psp_ring_cmd_submit(struct psp_context *psp,
2137 uint64_t cmd_buf_mc_addr,
2138 uint64_t fence_mc_addr,
2139 int index)
2140{
2141 unsigned int psp_write_ptr_reg = 0;
2142 struct psp_gfx_rb_frame *write_frame;
2143 struct psp_ring *ring = &psp->km_ring;
2144 struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem;
2145 struct psp_gfx_rb_frame *ring_buffer_end = ring_buffer_start +
2146 ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1;
2147 struct amdgpu_device *adev = psp->adev;
2148 uint32_t ring_size_dw = ring->ring_size / 4;
2149 uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4;
2150
2151 /* KM (GPCOM) prepare write pointer */
2152 psp_write_ptr_reg = psp_ring_get_wptr(psp);
2153
2154 /* Update KM RB frame pointer to new frame */
2155 /* write_frame ptr increments by size of rb_frame in bytes */
2156 /* psp_write_ptr_reg increments by size of rb_frame in DWORDs */
2157 if ((psp_write_ptr_reg % ring_size_dw) == 0)
2158 write_frame = ring_buffer_start;
2159 else
2160 write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw);
2161 /* Check invalid write_frame ptr address */
2162 if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) {
2163 DRM_ERROR("ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n",
2164 ring_buffer_start, ring_buffer_end, write_frame);
2165 DRM_ERROR("write_frame is pointing to address out of bounds\n");
2166 return -EINVAL;
2167 }
2168
2169 /* Initialize KM RB frame */
2170 memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame));
2171
2172 /* Update KM RB frame */
2173 write_frame->cmd_buf_addr_hi = upper_32_bits(cmd_buf_mc_addr);
2174 write_frame->cmd_buf_addr_lo = lower_32_bits(cmd_buf_mc_addr);
2175 write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr);
2176 write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr);
2177 write_frame->fence_value = index;
2178 amdgpu_asic_flush_hdp(adev, NULL);
2179
2180 /* Update the write Pointer in DWORDs */
2181 psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw;
2182 psp_ring_set_wptr(psp, psp_write_ptr_reg);
2183 return 0;
2184}
2185
2186int psp_init_asd_microcode(struct psp_context *psp,
2187 const char *chip_name)
2188{
2189 struct amdgpu_device *adev = psp->adev;
2190 char fw_name[30];
2191 const struct psp_firmware_header_v1_0 *asd_hdr;
2192 int err = 0;
2193
2194 if (!chip_name) {
2195 dev_err(adev->dev, "invalid chip name for asd microcode\n");
2196 return -EINVAL;
2197 }
2198
2199 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_asd.bin", chip_name);
2200 err = request_firmware(&adev->psp.asd_fw, fw_name, adev->dev);
2201 if (err)
2202 goto out;
2203
2204 err = amdgpu_ucode_validate(adev->psp.asd_fw);
2205 if (err)
2206 goto out;
2207
2208 asd_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data;
2209 adev->psp.asd_fw_version = le32_to_cpu(asd_hdr->header.ucode_version);
2210 adev->psp.asd_feature_version = le32_to_cpu(asd_hdr->ucode_feature_version);
2211 adev->psp.asd_ucode_size = le32_to_cpu(asd_hdr->header.ucode_size_bytes);
2212 adev->psp.asd_start_addr = (uint8_t *)asd_hdr +
2213 le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes);
2214 return 0;
2215out:
2216 dev_err(adev->dev, "fail to initialize asd microcode\n");
2217 release_firmware(adev->psp.asd_fw);
2218 adev->psp.asd_fw = NULL;
2219 return err;
2220}
2221
2222int psp_init_sos_microcode(struct psp_context *psp,
2223 const char *chip_name)
2224{
2225 struct amdgpu_device *adev = psp->adev;
2226 char fw_name[30];
2227 const struct psp_firmware_header_v1_0 *sos_hdr;
2228 const struct psp_firmware_header_v1_1 *sos_hdr_v1_1;
2229 const struct psp_firmware_header_v1_2 *sos_hdr_v1_2;
2230 const struct psp_firmware_header_v1_3 *sos_hdr_v1_3;
2231 int err = 0;
2232
2233 if (!chip_name) {
2234 dev_err(adev->dev, "invalid chip name for sos microcode\n");
2235 return -EINVAL;
2236 }
2237
2238 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sos.bin", chip_name);
2239 err = request_firmware(&adev->psp.sos_fw, fw_name, adev->dev);
2240 if (err)
2241 goto out;
2242
2243 err = amdgpu_ucode_validate(adev->psp.sos_fw);
2244 if (err)
2245 goto out;
2246
2247 sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data;
2248 amdgpu_ucode_print_psp_hdr(&sos_hdr->header);
2249
2250 switch (sos_hdr->header.header_version_major) {
2251 case 1:
2252 adev->psp.sos_fw_version = le32_to_cpu(sos_hdr->header.ucode_version);
2253 adev->psp.sos_feature_version = le32_to_cpu(sos_hdr->ucode_feature_version);
2254 adev->psp.sos_bin_size = le32_to_cpu(sos_hdr->sos_size_bytes);
2255 adev->psp.sys_bin_size = le32_to_cpu(sos_hdr->sos_offset_bytes);
2256 adev->psp.sys_start_addr = (uint8_t *)sos_hdr +
2257 le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
2258 adev->psp.sos_start_addr = (uint8_t *)adev->psp.sys_start_addr +
2259 le32_to_cpu(sos_hdr->sos_offset_bytes);
2260 if (sos_hdr->header.header_version_minor == 1) {
2261 sos_hdr_v1_1 = (const struct psp_firmware_header_v1_1 *)adev->psp.sos_fw->data;
2262 adev->psp.toc_bin_size = le32_to_cpu(sos_hdr_v1_1->toc_size_bytes);
2263 adev->psp.toc_start_addr = (uint8_t *)adev->psp.sys_start_addr +
2264 le32_to_cpu(sos_hdr_v1_1->toc_offset_bytes);
2265 adev->psp.kdb_bin_size = le32_to_cpu(sos_hdr_v1_1->kdb_size_bytes);
2266 adev->psp.kdb_start_addr = (uint8_t *)adev->psp.sys_start_addr +
2267 le32_to_cpu(sos_hdr_v1_1->kdb_offset_bytes);
2268 }
2269 if (sos_hdr->header.header_version_minor == 2) {
2270 sos_hdr_v1_2 = (const struct psp_firmware_header_v1_2 *)adev->psp.sos_fw->data;
2271 adev->psp.kdb_bin_size = le32_to_cpu(sos_hdr_v1_2->kdb_size_bytes);
2272 adev->psp.kdb_start_addr = (uint8_t *)adev->psp.sys_start_addr +
2273 le32_to_cpu(sos_hdr_v1_2->kdb_offset_bytes);
2274 }
2275 if (sos_hdr->header.header_version_minor == 3) {
2276 sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data;
2277 adev->psp.toc_bin_size = le32_to_cpu(sos_hdr_v1_3->v1_1.toc_size_bytes);
2278 adev->psp.toc_start_addr = (uint8_t *)adev->psp.sys_start_addr +
2279 le32_to_cpu(sos_hdr_v1_3->v1_1.toc_offset_bytes);
2280 adev->psp.kdb_bin_size = le32_to_cpu(sos_hdr_v1_3->v1_1.kdb_size_bytes);
2281 adev->psp.kdb_start_addr = (uint8_t *)adev->psp.sys_start_addr +
2282 le32_to_cpu(sos_hdr_v1_3->v1_1.kdb_offset_bytes);
2283 adev->psp.spl_bin_size = le32_to_cpu(sos_hdr_v1_3->spl_size_bytes);
2284 adev->psp.spl_start_addr = (uint8_t *)adev->psp.sys_start_addr +
2285 le32_to_cpu(sos_hdr_v1_3->spl_offset_bytes);
2286 }
2287 break;
2288 default:
2289 dev_err(adev->dev,
2290 "unsupported psp sos firmware\n");
2291 err = -EINVAL;
2292 goto out;
2293 }
2294
2295 return 0;
2296out:
2297 dev_err(adev->dev,
2298 "failed to init sos firmware\n");
2299 release_firmware(adev->psp.sos_fw);
2300 adev->psp.sos_fw = NULL;
2301
2302 return err;
2303}
2304
2305int parse_ta_bin_descriptor(struct psp_context *psp,
2306 const struct ta_fw_bin_desc *desc,
2307 const struct ta_firmware_header_v2_0 *ta_hdr)
2308{
2309 uint8_t *ucode_start_addr = NULL;
2310
2311 if (!psp || !desc || !ta_hdr)
2312 return -EINVAL;
2313
2314 ucode_start_addr = (uint8_t *)ta_hdr +
2315 le32_to_cpu(desc->offset_bytes) +
2316 le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
2317
2318 switch (desc->fw_type) {
2319 case TA_FW_TYPE_PSP_ASD:
2320 psp->asd_fw_version = le32_to_cpu(desc->fw_version);
2321 psp->asd_feature_version = le32_to_cpu(desc->fw_version);
2322 psp->asd_ucode_size = le32_to_cpu(desc->size_bytes);
2323 psp->asd_start_addr = ucode_start_addr;
2324 break;
2325 case TA_FW_TYPE_PSP_XGMI:
2326 psp->ta_xgmi_ucode_version = le32_to_cpu(desc->fw_version);
2327 psp->ta_xgmi_ucode_size = le32_to_cpu(desc->size_bytes);
2328 psp->ta_xgmi_start_addr = ucode_start_addr;
2329 break;
2330 case TA_FW_TYPE_PSP_RAS:
2331 psp->ta_ras_ucode_version = le32_to_cpu(desc->fw_version);
2332 psp->ta_ras_ucode_size = le32_to_cpu(desc->size_bytes);
2333 psp->ta_ras_start_addr = ucode_start_addr;
2334 break;
2335 case TA_FW_TYPE_PSP_HDCP:
2336 psp->ta_hdcp_ucode_version = le32_to_cpu(desc->fw_version);
2337 psp->ta_hdcp_ucode_size = le32_to_cpu(desc->size_bytes);
2338 psp->ta_hdcp_start_addr = ucode_start_addr;
2339 break;
2340 case TA_FW_TYPE_PSP_DTM:
2341 psp->ta_dtm_ucode_version = le32_to_cpu(desc->fw_version);
2342 psp->ta_dtm_ucode_size = le32_to_cpu(desc->size_bytes);
2343 psp->ta_dtm_start_addr = ucode_start_addr;
2344 break;
2345 default:
2346 dev_warn(psp->adev->dev, "Unsupported TA type: %d\n", desc->fw_type);
2347 break;
2348 }
2349
2350 return 0;
2351}
2352
2353int psp_init_ta_microcode(struct psp_context *psp,
2354 const char *chip_name)
2355{
2356 struct amdgpu_device *adev = psp->adev;
2357 char fw_name[30];
2358 const struct ta_firmware_header_v2_0 *ta_hdr;
2359 int err = 0;
2360 int ta_index = 0;
2361
2362 if (!chip_name) {
2363 dev_err(adev->dev, "invalid chip name for ta microcode\n");
2364 return -EINVAL;
2365 }
2366
2367 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name);
2368 err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev);
2369 if (err)
2370 goto out;
2371
2372 err = amdgpu_ucode_validate(adev->psp.ta_fw);
2373 if (err)
2374 goto out;
2375
2376 ta_hdr = (const struct ta_firmware_header_v2_0 *)adev->psp.ta_fw->data;
2377
2378 if (le16_to_cpu(ta_hdr->header.header_version_major) != 2) {
2379 dev_err(adev->dev, "unsupported TA header version\n");
2380 err = -EINVAL;
2381 goto out;
2382 }
2383
2384 if (le32_to_cpu(ta_hdr->ta_fw_bin_count) >= UCODE_MAX_TA_PACKAGING) {
2385 dev_err(adev->dev, "packed TA count exceeds maximum limit\n");
2386 err = -EINVAL;
2387 goto out;
2388 }
2389
2390 for (ta_index = 0; ta_index < le32_to_cpu(ta_hdr->ta_fw_bin_count); ta_index++) {
2391 err = parse_ta_bin_descriptor(psp,
2392 &ta_hdr->ta_fw_bin[ta_index],
2393 ta_hdr);
2394 if (err)
2395 goto out;
2396 }
2397
2398 return 0;
2399out:
2400 dev_err(adev->dev, "fail to initialize ta microcode\n");
2401 release_firmware(adev->psp.ta_fw);
2402 adev->psp.ta_fw = NULL;
2403 return err;
2404}
2405
2406static int psp_set_clockgating_state(void *handle,
2407 enum amd_clockgating_state state)
2408{
2409 return 0;
2410}
2411
2412static int psp_set_powergating_state(void *handle,
2413 enum amd_powergating_state state)
2414{
2415 return 0;
2416}
2417
2418static ssize_t psp_usbc_pd_fw_sysfs_read(struct device *dev,
2419 struct device_attribute *attr,
2420 char *buf)
2421{
2422 struct drm_device *ddev = dev_get_drvdata(dev);
2423 struct amdgpu_device *adev = ddev->dev_private;
2424 uint32_t fw_ver;
2425 int ret;
2426
2427 if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) {
2428 DRM_INFO("PSP block is not ready yet.");
2429 return -EBUSY;
2430 }
2431
2432 mutex_lock(&adev->psp.mutex);
2433 ret = psp_read_usbc_pd_fw(&adev->psp, &fw_ver);
2434 mutex_unlock(&adev->psp.mutex);
2435
2436 if (ret) {
2437 DRM_ERROR("Failed to read USBC PD FW, err = %d", ret);
2438 return ret;
2439 }
2440
2441 return snprintf(buf, PAGE_SIZE, "%x\n", fw_ver);
2442}
2443
2444static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev,
2445 struct device_attribute *attr,
2446 const char *buf,
2447 size_t count)
2448{
2449 struct drm_device *ddev = dev_get_drvdata(dev);
2450 struct amdgpu_device *adev = ddev->dev_private;
2451 void *cpu_addr;
2452 dma_addr_t dma_addr;
2453 int ret;
2454 char fw_name[100];
2455 const struct firmware *usbc_pd_fw;
2456
2457 if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) {
2458 DRM_INFO("PSP block is not ready yet.");
2459 return -EBUSY;
2460 }
2461
2462 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s", buf);
2463 ret = request_firmware(&usbc_pd_fw, fw_name, adev->dev);
2464 if (ret)
2465 goto fail;
2466
2467 /* We need contiguous physical mem to place the FW for psp to access */
2468 cpu_addr = dma_alloc_coherent(adev->dev, usbc_pd_fw->size, &dma_addr, GFP_KERNEL);
2469
2470 ret = dma_mapping_error(adev->dev, dma_addr);
2471 if (ret)
2472 goto rel_buf;
2473
2474 memcpy_toio(cpu_addr, usbc_pd_fw->data, usbc_pd_fw->size);
2475
2476 /*
2477 * x86 specific workaround.
2478 * Without it the buffer is invisible in PSP.
2479 *
2480 * TODO Remove once PSP starts snooping CPU cache
2481 */
2482#ifdef CONFIG_X86
2483 clflush_cache_range(cpu_addr, (usbc_pd_fw->size & ~(L1_CACHE_BYTES - 1)));
2484#endif
2485
2486 mutex_lock(&adev->psp.mutex);
2487 ret = psp_load_usbc_pd_fw(&adev->psp, dma_addr);
2488 mutex_unlock(&adev->psp.mutex);
2489
2490rel_buf:
2491 dma_free_coherent(adev->dev, usbc_pd_fw->size, cpu_addr, dma_addr);
2492 release_firmware(usbc_pd_fw);
2493
2494fail:
2495 if (ret) {
2496 DRM_ERROR("Failed to load USBC PD FW, err = %d", ret);
2497 return ret;
2498 }
2499
2500 return count;
2501}
2502
2503static DEVICE_ATTR(usbc_pd_fw, S_IRUGO | S_IWUSR,
2504 psp_usbc_pd_fw_sysfs_read,
2505 psp_usbc_pd_fw_sysfs_write);
2506
2507
2508
2509const struct amd_ip_funcs psp_ip_funcs = {
2510 .name = "psp",
2511 .early_init = psp_early_init,
2512 .late_init = NULL,
2513 .sw_init = psp_sw_init,
2514 .sw_fini = psp_sw_fini,
2515 .hw_init = psp_hw_init,
2516 .hw_fini = psp_hw_fini,
2517 .suspend = psp_suspend,
2518 .resume = psp_resume,
2519 .is_idle = NULL,
2520 .check_soft_reset = NULL,
2521 .wait_for_idle = NULL,
2522 .soft_reset = NULL,
2523 .set_clockgating_state = psp_set_clockgating_state,
2524 .set_powergating_state = psp_set_powergating_state,
2525};
2526
2527static int psp_sysfs_init(struct amdgpu_device *adev)
2528{
2529 int ret = device_create_file(adev->dev, &dev_attr_usbc_pd_fw);
2530
2531 if (ret)
2532 DRM_ERROR("Failed to create USBC PD FW control file!");
2533
2534 return ret;
2535}
2536
2537static void psp_sysfs_fini(struct amdgpu_device *adev)
2538{
2539 device_remove_file(adev->dev, &dev_attr_usbc_pd_fw);
2540}
2541
2542const struct amdgpu_ip_block_version psp_v3_1_ip_block =
2543{
2544 .type = AMD_IP_BLOCK_TYPE_PSP,
2545 .major = 3,
2546 .minor = 1,
2547 .rev = 0,
2548 .funcs = &psp_ip_funcs,
2549};
2550
2551const struct amdgpu_ip_block_version psp_v10_0_ip_block =
2552{
2553 .type = AMD_IP_BLOCK_TYPE_PSP,
2554 .major = 10,
2555 .minor = 0,
2556 .rev = 0,
2557 .funcs = &psp_ip_funcs,
2558};
2559
2560const struct amdgpu_ip_block_version psp_v11_0_ip_block =
2561{
2562 .type = AMD_IP_BLOCK_TYPE_PSP,
2563 .major = 11,
2564 .minor = 0,
2565 .rev = 0,
2566 .funcs = &psp_ip_funcs,
2567};
2568
2569const struct amdgpu_ip_block_version psp_v12_0_ip_block =
2570{
2571 .type = AMD_IP_BLOCK_TYPE_PSP,
2572 .major = 12,
2573 .minor = 0,
2574 .rev = 0,
2575 .funcs = &psp_ip_funcs,
2576};
1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Author: Huang Rui
23 *
24 */
25
26#include <linux/firmware.h>
27#include <drm/drm_drv.h>
28
29#include "amdgpu.h"
30#include "amdgpu_psp.h"
31#include "amdgpu_ucode.h"
32#include "amdgpu_xgmi.h"
33#include "soc15_common.h"
34#include "psp_v3_1.h"
35#include "psp_v10_0.h"
36#include "psp_v11_0.h"
37#include "psp_v11_0_8.h"
38#include "psp_v12_0.h"
39#include "psp_v13_0.h"
40#include "psp_v13_0_4.h"
41
42#include "amdgpu_ras.h"
43#include "amdgpu_securedisplay.h"
44#include "amdgpu_atomfirmware.h"
45
46#define AMD_VBIOS_FILE_MAX_SIZE_B (1024*1024*3)
47
48static int psp_load_smu_fw(struct psp_context *psp);
49static int psp_rap_terminate(struct psp_context *psp);
50static int psp_securedisplay_terminate(struct psp_context *psp);
51
52static int psp_ring_init(struct psp_context *psp,
53 enum psp_ring_type ring_type)
54{
55 int ret = 0;
56 struct psp_ring *ring;
57 struct amdgpu_device *adev = psp->adev;
58
59 ring = &psp->km_ring;
60
61 ring->ring_type = ring_type;
62
63 /* allocate 4k Page of Local Frame Buffer memory for ring */
64 ring->ring_size = 0x1000;
65 ret = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE,
66 AMDGPU_GEM_DOMAIN_VRAM |
67 AMDGPU_GEM_DOMAIN_GTT,
68 &adev->firmware.rbuf,
69 &ring->ring_mem_mc_addr,
70 (void **)&ring->ring_mem);
71 if (ret) {
72 ring->ring_size = 0;
73 return ret;
74 }
75
76 return 0;
77}
78
79/*
80 * Due to DF Cstate management centralized to PMFW, the firmware
81 * loading sequence will be updated as below:
82 * - Load KDB
83 * - Load SYS_DRV
84 * - Load tOS
85 * - Load PMFW
86 * - Setup TMR
87 * - Load other non-psp fw
88 * - Load ASD
89 * - Load XGMI/RAS/HDCP/DTM TA if any
90 *
91 * This new sequence is required for
92 * - Arcturus and onwards
93 */
94static void psp_check_pmfw_centralized_cstate_management(struct psp_context *psp)
95{
96 struct amdgpu_device *adev = psp->adev;
97
98 if (amdgpu_sriov_vf(adev)) {
99 psp->pmfw_centralized_cstate_management = false;
100 return;
101 }
102
103 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
104 case IP_VERSION(11, 0, 0):
105 case IP_VERSION(11, 0, 4):
106 case IP_VERSION(11, 0, 5):
107 case IP_VERSION(11, 0, 7):
108 case IP_VERSION(11, 0, 9):
109 case IP_VERSION(11, 0, 11):
110 case IP_VERSION(11, 0, 12):
111 case IP_VERSION(11, 0, 13):
112 case IP_VERSION(13, 0, 0):
113 case IP_VERSION(13, 0, 2):
114 case IP_VERSION(13, 0, 7):
115 psp->pmfw_centralized_cstate_management = true;
116 break;
117 default:
118 psp->pmfw_centralized_cstate_management = false;
119 break;
120 }
121}
122
123static int psp_init_sriov_microcode(struct psp_context *psp)
124{
125 struct amdgpu_device *adev = psp->adev;
126 char ucode_prefix[30];
127 int ret = 0;
128
129 amdgpu_ucode_ip_version_decode(adev, MP0_HWIP, ucode_prefix, sizeof(ucode_prefix));
130
131 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
132 case IP_VERSION(9, 0, 0):
133 case IP_VERSION(11, 0, 7):
134 case IP_VERSION(11, 0, 9):
135 adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
136 ret = psp_init_cap_microcode(psp, ucode_prefix);
137 break;
138 case IP_VERSION(13, 0, 2):
139 adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
140 ret = psp_init_cap_microcode(psp, ucode_prefix);
141 ret &= psp_init_ta_microcode(psp, ucode_prefix);
142 break;
143 case IP_VERSION(13, 0, 0):
144 adev->virt.autoload_ucode_id = 0;
145 break;
146 case IP_VERSION(13, 0, 6):
147 ret = psp_init_cap_microcode(psp, ucode_prefix);
148 ret &= psp_init_ta_microcode(psp, ucode_prefix);
149 break;
150 case IP_VERSION(13, 0, 10):
151 adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MES1_DATA;
152 ret = psp_init_cap_microcode(psp, ucode_prefix);
153 break;
154 default:
155 return -EINVAL;
156 }
157 return ret;
158}
159
160static int psp_early_init(void *handle)
161{
162 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
163 struct psp_context *psp = &adev->psp;
164
165 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
166 case IP_VERSION(9, 0, 0):
167 psp_v3_1_set_psp_funcs(psp);
168 psp->autoload_supported = false;
169 break;
170 case IP_VERSION(10, 0, 0):
171 case IP_VERSION(10, 0, 1):
172 psp_v10_0_set_psp_funcs(psp);
173 psp->autoload_supported = false;
174 break;
175 case IP_VERSION(11, 0, 2):
176 case IP_VERSION(11, 0, 4):
177 psp_v11_0_set_psp_funcs(psp);
178 psp->autoload_supported = false;
179 break;
180 case IP_VERSION(11, 0, 0):
181 case IP_VERSION(11, 0, 7):
182 adev->psp.sup_pd_fw_up = !amdgpu_sriov_vf(adev);
183 fallthrough;
184 case IP_VERSION(11, 0, 5):
185 case IP_VERSION(11, 0, 9):
186 case IP_VERSION(11, 0, 11):
187 case IP_VERSION(11, 5, 0):
188 case IP_VERSION(11, 0, 12):
189 case IP_VERSION(11, 0, 13):
190 psp_v11_0_set_psp_funcs(psp);
191 psp->autoload_supported = true;
192 break;
193 case IP_VERSION(11, 0, 3):
194 case IP_VERSION(12, 0, 1):
195 psp_v12_0_set_psp_funcs(psp);
196 break;
197 case IP_VERSION(13, 0, 2):
198 case IP_VERSION(13, 0, 6):
199 psp_v13_0_set_psp_funcs(psp);
200 break;
201 case IP_VERSION(13, 0, 1):
202 case IP_VERSION(13, 0, 3):
203 case IP_VERSION(13, 0, 5):
204 case IP_VERSION(13, 0, 8):
205 case IP_VERSION(13, 0, 11):
206 case IP_VERSION(14, 0, 0):
207 psp_v13_0_set_psp_funcs(psp);
208 psp->autoload_supported = true;
209 break;
210 case IP_VERSION(11, 0, 8):
211 if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2) {
212 psp_v11_0_8_set_psp_funcs(psp);
213 psp->autoload_supported = false;
214 }
215 break;
216 case IP_VERSION(13, 0, 0):
217 case IP_VERSION(13, 0, 7):
218 case IP_VERSION(13, 0, 10):
219 psp_v13_0_set_psp_funcs(psp);
220 psp->autoload_supported = true;
221 adev->psp.sup_ifwi_up = !amdgpu_sriov_vf(adev);
222 break;
223 case IP_VERSION(13, 0, 4):
224 psp_v13_0_4_set_psp_funcs(psp);
225 psp->autoload_supported = true;
226 break;
227 default:
228 return -EINVAL;
229 }
230
231 psp->adev = adev;
232
233 psp_check_pmfw_centralized_cstate_management(psp);
234
235 if (amdgpu_sriov_vf(adev))
236 return psp_init_sriov_microcode(psp);
237 else
238 return psp_init_microcode(psp);
239}
240
241void psp_ta_free_shared_buf(struct ta_mem_context *mem_ctx)
242{
243 amdgpu_bo_free_kernel(&mem_ctx->shared_bo, &mem_ctx->shared_mc_addr,
244 &mem_ctx->shared_buf);
245 mem_ctx->shared_bo = NULL;
246}
247
248static void psp_free_shared_bufs(struct psp_context *psp)
249{
250 void *tmr_buf;
251 void **pptr;
252
253 /* free TMR memory buffer */
254 pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
255 amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, pptr);
256 psp->tmr_bo = NULL;
257
258 /* free xgmi shared memory */
259 psp_ta_free_shared_buf(&psp->xgmi_context.context.mem_context);
260
261 /* free ras shared memory */
262 psp_ta_free_shared_buf(&psp->ras_context.context.mem_context);
263
264 /* free hdcp shared memory */
265 psp_ta_free_shared_buf(&psp->hdcp_context.context.mem_context);
266
267 /* free dtm shared memory */
268 psp_ta_free_shared_buf(&psp->dtm_context.context.mem_context);
269
270 /* free rap shared memory */
271 psp_ta_free_shared_buf(&psp->rap_context.context.mem_context);
272
273 /* free securedisplay shared memory */
274 psp_ta_free_shared_buf(&psp->securedisplay_context.context.mem_context);
275
276
277}
278
279static void psp_memory_training_fini(struct psp_context *psp)
280{
281 struct psp_memory_training_context *ctx = &psp->mem_train_ctx;
282
283 ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT;
284 kfree(ctx->sys_cache);
285 ctx->sys_cache = NULL;
286}
287
288static int psp_memory_training_init(struct psp_context *psp)
289{
290 int ret;
291 struct psp_memory_training_context *ctx = &psp->mem_train_ctx;
292
293 if (ctx->init != PSP_MEM_TRAIN_RESERVE_SUCCESS) {
294 DRM_DEBUG("memory training is not supported!\n");
295 return 0;
296 }
297
298 ctx->sys_cache = kzalloc(ctx->train_data_size, GFP_KERNEL);
299 if (ctx->sys_cache == NULL) {
300 DRM_ERROR("alloc mem_train_ctx.sys_cache failed!\n");
301 ret = -ENOMEM;
302 goto Err_out;
303 }
304
305 DRM_DEBUG("train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n",
306 ctx->train_data_size,
307 ctx->p2c_train_data_offset,
308 ctx->c2p_train_data_offset);
309 ctx->init = PSP_MEM_TRAIN_INIT_SUCCESS;
310 return 0;
311
312Err_out:
313 psp_memory_training_fini(psp);
314 return ret;
315}
316
317/*
318 * Helper funciton to query psp runtime database entry
319 *
320 * @adev: amdgpu_device pointer
321 * @entry_type: the type of psp runtime database entry
322 * @db_entry: runtime database entry pointer
323 *
324 * Return false if runtime database doesn't exit or entry is invalid
325 * or true if the specific database entry is found, and copy to @db_entry
326 */
327static bool psp_get_runtime_db_entry(struct amdgpu_device *adev,
328 enum psp_runtime_entry_type entry_type,
329 void *db_entry)
330{
331 uint64_t db_header_pos, db_dir_pos;
332 struct psp_runtime_data_header db_header = {0};
333 struct psp_runtime_data_directory db_dir = {0};
334 bool ret = false;
335 int i;
336
337 if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6))
338 return false;
339
340 db_header_pos = adev->gmc.mc_vram_size - PSP_RUNTIME_DB_OFFSET;
341 db_dir_pos = db_header_pos + sizeof(struct psp_runtime_data_header);
342
343 /* read runtime db header from vram */
344 amdgpu_device_vram_access(adev, db_header_pos, (uint32_t *)&db_header,
345 sizeof(struct psp_runtime_data_header), false);
346
347 if (db_header.cookie != PSP_RUNTIME_DB_COOKIE_ID) {
348 /* runtime db doesn't exist, exit */
349 dev_dbg(adev->dev, "PSP runtime database doesn't exist\n");
350 return false;
351 }
352
353 /* read runtime database entry from vram */
354 amdgpu_device_vram_access(adev, db_dir_pos, (uint32_t *)&db_dir,
355 sizeof(struct psp_runtime_data_directory), false);
356
357 if (db_dir.entry_count >= PSP_RUNTIME_DB_DIAG_ENTRY_MAX_COUNT) {
358 /* invalid db entry count, exit */
359 dev_warn(adev->dev, "Invalid PSP runtime database entry count\n");
360 return false;
361 }
362
363 /* look up for requested entry type */
364 for (i = 0; i < db_dir.entry_count && !ret; i++) {
365 if (db_dir.entry_list[i].entry_type == entry_type) {
366 switch (entry_type) {
367 case PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG:
368 if (db_dir.entry_list[i].size < sizeof(struct psp_runtime_boot_cfg_entry)) {
369 /* invalid db entry size */
370 dev_warn(adev->dev, "Invalid PSP runtime database boot cfg entry size\n");
371 return false;
372 }
373 /* read runtime database entry */
374 amdgpu_device_vram_access(adev, db_header_pos + db_dir.entry_list[i].offset,
375 (uint32_t *)db_entry, sizeof(struct psp_runtime_boot_cfg_entry), false);
376 ret = true;
377 break;
378 case PSP_RUNTIME_ENTRY_TYPE_PPTABLE_ERR_STATUS:
379 if (db_dir.entry_list[i].size < sizeof(struct psp_runtime_scpm_entry)) {
380 /* invalid db entry size */
381 dev_warn(adev->dev, "Invalid PSP runtime database scpm entry size\n");
382 return false;
383 }
384 /* read runtime database entry */
385 amdgpu_device_vram_access(adev, db_header_pos + db_dir.entry_list[i].offset,
386 (uint32_t *)db_entry, sizeof(struct psp_runtime_scpm_entry), false);
387 ret = true;
388 break;
389 default:
390 ret = false;
391 break;
392 }
393 }
394 }
395
396 return ret;
397}
398
399static int psp_sw_init(void *handle)
400{
401 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
402 struct psp_context *psp = &adev->psp;
403 int ret;
404 struct psp_runtime_boot_cfg_entry boot_cfg_entry;
405 struct psp_memory_training_context *mem_training_ctx = &psp->mem_train_ctx;
406 struct psp_runtime_scpm_entry scpm_entry;
407
408 psp->cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
409 if (!psp->cmd) {
410 DRM_ERROR("Failed to allocate memory to command buffer!\n");
411 ret = -ENOMEM;
412 }
413
414 adev->psp.xgmi_context.supports_extended_data =
415 !adev->gmc.xgmi.connected_to_cpu &&
416 amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2);
417
418 memset(&scpm_entry, 0, sizeof(scpm_entry));
419 if ((psp_get_runtime_db_entry(adev,
420 PSP_RUNTIME_ENTRY_TYPE_PPTABLE_ERR_STATUS,
421 &scpm_entry)) &&
422 (scpm_entry.scpm_status != SCPM_DISABLE)) {
423 adev->scpm_enabled = true;
424 adev->scpm_status = scpm_entry.scpm_status;
425 } else {
426 adev->scpm_enabled = false;
427 adev->scpm_status = SCPM_DISABLE;
428 }
429
430 /* TODO: stop gpu driver services and print alarm if scpm is enabled with error status */
431
432 memset(&boot_cfg_entry, 0, sizeof(boot_cfg_entry));
433 if (psp_get_runtime_db_entry(adev,
434 PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG,
435 &boot_cfg_entry)) {
436 psp->boot_cfg_bitmask = boot_cfg_entry.boot_cfg_bitmask;
437 if ((psp->boot_cfg_bitmask) &
438 BOOT_CFG_FEATURE_TWO_STAGE_DRAM_TRAINING) {
439 /* If psp runtime database exists, then
440 * only enable two stage memory training
441 * when TWO_STAGE_DRAM_TRAINING bit is set
442 * in runtime database
443 */
444 mem_training_ctx->enable_mem_training = true;
445 }
446
447 } else {
448 /* If psp runtime database doesn't exist or is
449 * invalid, force enable two stage memory training
450 */
451 mem_training_ctx->enable_mem_training = true;
452 }
453
454 if (mem_training_ctx->enable_mem_training) {
455 ret = psp_memory_training_init(psp);
456 if (ret) {
457 DRM_ERROR("Failed to initialize memory training!\n");
458 return ret;
459 }
460
461 ret = psp_mem_training(psp, PSP_MEM_TRAIN_COLD_BOOT);
462 if (ret) {
463 DRM_ERROR("Failed to process memory training!\n");
464 return ret;
465 }
466 }
467
468 ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG,
469 (amdgpu_sriov_vf(adev) || adev->debug_use_vram_fw_buf) ?
470 AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
471 &psp->fw_pri_bo,
472 &psp->fw_pri_mc_addr,
473 &psp->fw_pri_buf);
474 if (ret)
475 return ret;
476
477 ret = amdgpu_bo_create_kernel(adev, PSP_FENCE_BUFFER_SIZE, PAGE_SIZE,
478 AMDGPU_GEM_DOMAIN_VRAM |
479 AMDGPU_GEM_DOMAIN_GTT,
480 &psp->fence_buf_bo,
481 &psp->fence_buf_mc_addr,
482 &psp->fence_buf);
483 if (ret)
484 goto failed1;
485
486 ret = amdgpu_bo_create_kernel(adev, PSP_CMD_BUFFER_SIZE, PAGE_SIZE,
487 AMDGPU_GEM_DOMAIN_VRAM |
488 AMDGPU_GEM_DOMAIN_GTT,
489 &psp->cmd_buf_bo, &psp->cmd_buf_mc_addr,
490 (void **)&psp->cmd_buf_mem);
491 if (ret)
492 goto failed2;
493
494 return 0;
495
496failed2:
497 amdgpu_bo_free_kernel(&psp->fence_buf_bo,
498 &psp->fence_buf_mc_addr, &psp->fence_buf);
499failed1:
500 amdgpu_bo_free_kernel(&psp->fw_pri_bo,
501 &psp->fw_pri_mc_addr, &psp->fw_pri_buf);
502 return ret;
503}
504
505static int psp_sw_fini(void *handle)
506{
507 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
508 struct psp_context *psp = &adev->psp;
509 struct psp_gfx_cmd_resp *cmd = psp->cmd;
510
511 psp_memory_training_fini(psp);
512
513 amdgpu_ucode_release(&psp->sos_fw);
514 amdgpu_ucode_release(&psp->asd_fw);
515 amdgpu_ucode_release(&psp->ta_fw);
516 amdgpu_ucode_release(&psp->cap_fw);
517 amdgpu_ucode_release(&psp->toc_fw);
518
519 kfree(cmd);
520 cmd = NULL;
521
522 psp_free_shared_bufs(psp);
523
524 if (psp->km_ring.ring_mem)
525 amdgpu_bo_free_kernel(&adev->firmware.rbuf,
526 &psp->km_ring.ring_mem_mc_addr,
527 (void **)&psp->km_ring.ring_mem);
528
529 amdgpu_bo_free_kernel(&psp->fw_pri_bo,
530 &psp->fw_pri_mc_addr, &psp->fw_pri_buf);
531 amdgpu_bo_free_kernel(&psp->fence_buf_bo,
532 &psp->fence_buf_mc_addr, &psp->fence_buf);
533 amdgpu_bo_free_kernel(&psp->cmd_buf_bo, &psp->cmd_buf_mc_addr,
534 (void **)&psp->cmd_buf_mem);
535
536 return 0;
537}
538
539int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
540 uint32_t reg_val, uint32_t mask, bool check_changed)
541{
542 uint32_t val;
543 int i;
544 struct amdgpu_device *adev = psp->adev;
545
546 if (psp->adev->no_hw_access)
547 return 0;
548
549 for (i = 0; i < adev->usec_timeout; i++) {
550 val = RREG32(reg_index);
551 if (check_changed) {
552 if (val != reg_val)
553 return 0;
554 } else {
555 if ((val & mask) == reg_val)
556 return 0;
557 }
558 udelay(1);
559 }
560
561 return -ETIME;
562}
563
564int psp_wait_for_spirom_update(struct psp_context *psp, uint32_t reg_index,
565 uint32_t reg_val, uint32_t mask, uint32_t msec_timeout)
566{
567 uint32_t val;
568 int i;
569 struct amdgpu_device *adev = psp->adev;
570
571 if (psp->adev->no_hw_access)
572 return 0;
573
574 for (i = 0; i < msec_timeout; i++) {
575 val = RREG32(reg_index);
576 if ((val & mask) == reg_val)
577 return 0;
578 msleep(1);
579 }
580
581 return -ETIME;
582}
583
584static const char *psp_gfx_cmd_name(enum psp_gfx_cmd_id cmd_id)
585{
586 switch (cmd_id) {
587 case GFX_CMD_ID_LOAD_TA:
588 return "LOAD_TA";
589 case GFX_CMD_ID_UNLOAD_TA:
590 return "UNLOAD_TA";
591 case GFX_CMD_ID_INVOKE_CMD:
592 return "INVOKE_CMD";
593 case GFX_CMD_ID_LOAD_ASD:
594 return "LOAD_ASD";
595 case GFX_CMD_ID_SETUP_TMR:
596 return "SETUP_TMR";
597 case GFX_CMD_ID_LOAD_IP_FW:
598 return "LOAD_IP_FW";
599 case GFX_CMD_ID_DESTROY_TMR:
600 return "DESTROY_TMR";
601 case GFX_CMD_ID_SAVE_RESTORE:
602 return "SAVE_RESTORE_IP_FW";
603 case GFX_CMD_ID_SETUP_VMR:
604 return "SETUP_VMR";
605 case GFX_CMD_ID_DESTROY_VMR:
606 return "DESTROY_VMR";
607 case GFX_CMD_ID_PROG_REG:
608 return "PROG_REG";
609 case GFX_CMD_ID_GET_FW_ATTESTATION:
610 return "GET_FW_ATTESTATION";
611 case GFX_CMD_ID_LOAD_TOC:
612 return "ID_LOAD_TOC";
613 case GFX_CMD_ID_AUTOLOAD_RLC:
614 return "AUTOLOAD_RLC";
615 case GFX_CMD_ID_BOOT_CFG:
616 return "BOOT_CFG";
617 default:
618 return "UNKNOWN CMD";
619 }
620}
621
622static int
623psp_cmd_submit_buf(struct psp_context *psp,
624 struct amdgpu_firmware_info *ucode,
625 struct psp_gfx_cmd_resp *cmd, uint64_t fence_mc_addr)
626{
627 int ret;
628 int index;
629 int timeout = 20000;
630 bool ras_intr = false;
631 bool skip_unsupport = false;
632
633 if (psp->adev->no_hw_access)
634 return 0;
635
636 memset(psp->cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE);
637
638 memcpy(psp->cmd_buf_mem, cmd, sizeof(struct psp_gfx_cmd_resp));
639
640 index = atomic_inc_return(&psp->fence_value);
641 ret = psp_ring_cmd_submit(psp, psp->cmd_buf_mc_addr, fence_mc_addr, index);
642 if (ret) {
643 atomic_dec(&psp->fence_value);
644 goto exit;
645 }
646
647 amdgpu_device_invalidate_hdp(psp->adev, NULL);
648 while (*((unsigned int *)psp->fence_buf) != index) {
649 if (--timeout == 0)
650 break;
651 /*
652 * Shouldn't wait for timeout when err_event_athub occurs,
653 * because gpu reset thread triggered and lock resource should
654 * be released for psp resume sequence.
655 */
656 ras_intr = amdgpu_ras_intr_triggered();
657 if (ras_intr)
658 break;
659 usleep_range(10, 100);
660 amdgpu_device_invalidate_hdp(psp->adev, NULL);
661 }
662
663 /* We allow TEE_ERROR_NOT_SUPPORTED for VMR command and PSP_ERR_UNKNOWN_COMMAND in SRIOV */
664 skip_unsupport = (psp->cmd_buf_mem->resp.status == TEE_ERROR_NOT_SUPPORTED ||
665 psp->cmd_buf_mem->resp.status == PSP_ERR_UNKNOWN_COMMAND) && amdgpu_sriov_vf(psp->adev);
666
667 memcpy(&cmd->resp, &psp->cmd_buf_mem->resp, sizeof(struct psp_gfx_resp));
668
669 /* In some cases, psp response status is not 0 even there is no
670 * problem while the command is submitted. Some version of PSP FW
671 * doesn't write 0 to that field.
672 * So here we would like to only print a warning instead of an error
673 * during psp initialization to avoid breaking hw_init and it doesn't
674 * return -EINVAL.
675 */
676 if (!skip_unsupport && (psp->cmd_buf_mem->resp.status || !timeout) && !ras_intr) {
677 if (ucode)
678 DRM_WARN("failed to load ucode %s(0x%X) ",
679 amdgpu_ucode_name(ucode->ucode_id), ucode->ucode_id);
680 DRM_WARN("psp gfx command %s(0x%X) failed and response status is (0x%X)\n",
681 psp_gfx_cmd_name(psp->cmd_buf_mem->cmd_id), psp->cmd_buf_mem->cmd_id,
682 psp->cmd_buf_mem->resp.status);
683 /* If any firmware (including CAP) load fails under SRIOV, it should
684 * return failure to stop the VF from initializing.
685 * Also return failure in case of timeout
686 */
687 if ((ucode && amdgpu_sriov_vf(psp->adev)) || !timeout) {
688 ret = -EINVAL;
689 goto exit;
690 }
691 }
692
693 if (ucode) {
694 ucode->tmr_mc_addr_lo = psp->cmd_buf_mem->resp.fw_addr_lo;
695 ucode->tmr_mc_addr_hi = psp->cmd_buf_mem->resp.fw_addr_hi;
696 }
697
698exit:
699 return ret;
700}
701
702static struct psp_gfx_cmd_resp *acquire_psp_cmd_buf(struct psp_context *psp)
703{
704 struct psp_gfx_cmd_resp *cmd = psp->cmd;
705
706 mutex_lock(&psp->mutex);
707
708 memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp));
709
710 return cmd;
711}
712
713static void release_psp_cmd_buf(struct psp_context *psp)
714{
715 mutex_unlock(&psp->mutex);
716}
717
718static void psp_prep_tmr_cmd_buf(struct psp_context *psp,
719 struct psp_gfx_cmd_resp *cmd,
720 uint64_t tmr_mc, struct amdgpu_bo *tmr_bo)
721{
722 struct amdgpu_device *adev = psp->adev;
723 uint32_t size = 0;
724 uint64_t tmr_pa = 0;
725
726 if (tmr_bo) {
727 size = amdgpu_bo_size(tmr_bo);
728 tmr_pa = amdgpu_gmc_vram_pa(adev, tmr_bo);
729 }
730
731 if (amdgpu_sriov_vf(psp->adev))
732 cmd->cmd_id = GFX_CMD_ID_SETUP_VMR;
733 else
734 cmd->cmd_id = GFX_CMD_ID_SETUP_TMR;
735 cmd->cmd.cmd_setup_tmr.buf_phy_addr_lo = lower_32_bits(tmr_mc);
736 cmd->cmd.cmd_setup_tmr.buf_phy_addr_hi = upper_32_bits(tmr_mc);
737 cmd->cmd.cmd_setup_tmr.buf_size = size;
738 cmd->cmd.cmd_setup_tmr.bitfield.virt_phy_addr = 1;
739 cmd->cmd.cmd_setup_tmr.system_phy_addr_lo = lower_32_bits(tmr_pa);
740 cmd->cmd.cmd_setup_tmr.system_phy_addr_hi = upper_32_bits(tmr_pa);
741}
742
743static void psp_prep_load_toc_cmd_buf(struct psp_gfx_cmd_resp *cmd,
744 uint64_t pri_buf_mc, uint32_t size)
745{
746 cmd->cmd_id = GFX_CMD_ID_LOAD_TOC;
747 cmd->cmd.cmd_load_toc.toc_phy_addr_lo = lower_32_bits(pri_buf_mc);
748 cmd->cmd.cmd_load_toc.toc_phy_addr_hi = upper_32_bits(pri_buf_mc);
749 cmd->cmd.cmd_load_toc.toc_size = size;
750}
751
752/* Issue LOAD TOC cmd to PSP to part toc and calculate tmr size needed */
753static int psp_load_toc(struct psp_context *psp,
754 uint32_t *tmr_size)
755{
756 int ret;
757 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
758
759 /* Copy toc to psp firmware private buffer */
760 psp_copy_fw(psp, psp->toc.start_addr, psp->toc.size_bytes);
761
762 psp_prep_load_toc_cmd_buf(cmd, psp->fw_pri_mc_addr, psp->toc.size_bytes);
763
764 ret = psp_cmd_submit_buf(psp, NULL, cmd,
765 psp->fence_buf_mc_addr);
766 if (!ret)
767 *tmr_size = psp->cmd_buf_mem->resp.tmr_size;
768
769 release_psp_cmd_buf(psp);
770
771 return ret;
772}
773
774static bool psp_boottime_tmr(struct psp_context *psp)
775{
776 switch (amdgpu_ip_version(psp->adev, MP0_HWIP, 0)) {
777 case IP_VERSION(13, 0, 6):
778 return true;
779 default:
780 return false;
781 }
782}
783
784/* Set up Trusted Memory Region */
785static int psp_tmr_init(struct psp_context *psp)
786{
787 int ret = 0;
788 int tmr_size;
789 void *tmr_buf;
790 void **pptr;
791
792 /*
793 * According to HW engineer, they prefer the TMR address be "naturally
794 * aligned" , e.g. the start address be an integer divide of TMR size.
795 *
796 * Note: this memory need be reserved till the driver
797 * uninitializes.
798 */
799 tmr_size = PSP_TMR_SIZE(psp->adev);
800
801 /* For ASICs support RLC autoload, psp will parse the toc
802 * and calculate the total size of TMR needed
803 */
804 if (!amdgpu_sriov_vf(psp->adev) &&
805 psp->toc.start_addr &&
806 psp->toc.size_bytes &&
807 psp->fw_pri_buf) {
808 ret = psp_load_toc(psp, &tmr_size);
809 if (ret) {
810 DRM_ERROR("Failed to load toc\n");
811 return ret;
812 }
813 }
814
815 if (!psp->tmr_bo) {
816 pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
817 ret = amdgpu_bo_create_kernel(psp->adev, tmr_size,
818 PSP_TMR_ALIGNMENT,
819 AMDGPU_HAS_VRAM(psp->adev) ?
820 AMDGPU_GEM_DOMAIN_VRAM :
821 AMDGPU_GEM_DOMAIN_GTT,
822 &psp->tmr_bo, &psp->tmr_mc_addr,
823 pptr);
824 }
825
826 return ret;
827}
828
829static bool psp_skip_tmr(struct psp_context *psp)
830{
831 switch (amdgpu_ip_version(psp->adev, MP0_HWIP, 0)) {
832 case IP_VERSION(11, 0, 9):
833 case IP_VERSION(11, 0, 7):
834 case IP_VERSION(13, 0, 2):
835 case IP_VERSION(13, 0, 6):
836 case IP_VERSION(13, 0, 10):
837 return true;
838 default:
839 return false;
840 }
841}
842
843static int psp_tmr_load(struct psp_context *psp)
844{
845 int ret;
846 struct psp_gfx_cmd_resp *cmd;
847
848 /* For Navi12 and CHIP_SIENNA_CICHLID SRIOV, do not set up TMR.
849 * Already set up by host driver.
850 */
851 if (amdgpu_sriov_vf(psp->adev) && psp_skip_tmr(psp))
852 return 0;
853
854 cmd = acquire_psp_cmd_buf(psp);
855
856 psp_prep_tmr_cmd_buf(psp, cmd, psp->tmr_mc_addr, psp->tmr_bo);
857 if (psp->tmr_bo)
858 DRM_INFO("reserve 0x%lx from 0x%llx for PSP TMR\n",
859 amdgpu_bo_size(psp->tmr_bo), psp->tmr_mc_addr);
860
861 ret = psp_cmd_submit_buf(psp, NULL, cmd,
862 psp->fence_buf_mc_addr);
863
864 release_psp_cmd_buf(psp);
865
866 return ret;
867}
868
869static void psp_prep_tmr_unload_cmd_buf(struct psp_context *psp,
870 struct psp_gfx_cmd_resp *cmd)
871{
872 if (amdgpu_sriov_vf(psp->adev))
873 cmd->cmd_id = GFX_CMD_ID_DESTROY_VMR;
874 else
875 cmd->cmd_id = GFX_CMD_ID_DESTROY_TMR;
876}
877
878static int psp_tmr_unload(struct psp_context *psp)
879{
880 int ret;
881 struct psp_gfx_cmd_resp *cmd;
882
883 /* skip TMR unload for Navi12 and CHIP_SIENNA_CICHLID SRIOV,
884 * as TMR is not loaded at all
885 */
886 if (amdgpu_sriov_vf(psp->adev) && psp_skip_tmr(psp))
887 return 0;
888
889 cmd = acquire_psp_cmd_buf(psp);
890
891 psp_prep_tmr_unload_cmd_buf(psp, cmd);
892 dev_dbg(psp->adev->dev, "free PSP TMR buffer\n");
893
894 ret = psp_cmd_submit_buf(psp, NULL, cmd,
895 psp->fence_buf_mc_addr);
896
897 release_psp_cmd_buf(psp);
898
899 return ret;
900}
901
902static int psp_tmr_terminate(struct psp_context *psp)
903{
904 return psp_tmr_unload(psp);
905}
906
907int psp_get_fw_attestation_records_addr(struct psp_context *psp,
908 uint64_t *output_ptr)
909{
910 int ret;
911 struct psp_gfx_cmd_resp *cmd;
912
913 if (!output_ptr)
914 return -EINVAL;
915
916 if (amdgpu_sriov_vf(psp->adev))
917 return 0;
918
919 cmd = acquire_psp_cmd_buf(psp);
920
921 cmd->cmd_id = GFX_CMD_ID_GET_FW_ATTESTATION;
922
923 ret = psp_cmd_submit_buf(psp, NULL, cmd,
924 psp->fence_buf_mc_addr);
925
926 if (!ret) {
927 *output_ptr = ((uint64_t)cmd->resp.uresp.fwar_db_info.fwar_db_addr_lo) +
928 ((uint64_t)cmd->resp.uresp.fwar_db_info.fwar_db_addr_hi << 32);
929 }
930
931 release_psp_cmd_buf(psp);
932
933 return ret;
934}
935
936static int psp_boot_config_get(struct amdgpu_device *adev, uint32_t *boot_cfg)
937{
938 struct psp_context *psp = &adev->psp;
939 struct psp_gfx_cmd_resp *cmd;
940 int ret;
941
942 if (amdgpu_sriov_vf(adev))
943 return 0;
944
945 cmd = acquire_psp_cmd_buf(psp);
946
947 cmd->cmd_id = GFX_CMD_ID_BOOT_CFG;
948 cmd->cmd.boot_cfg.sub_cmd = BOOTCFG_CMD_GET;
949
950 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
951 if (!ret) {
952 *boot_cfg =
953 (cmd->resp.uresp.boot_cfg.boot_cfg & BOOT_CONFIG_GECC) ? 1 : 0;
954 }
955
956 release_psp_cmd_buf(psp);
957
958 return ret;
959}
960
961static int psp_boot_config_set(struct amdgpu_device *adev, uint32_t boot_cfg)
962{
963 int ret;
964 struct psp_context *psp = &adev->psp;
965 struct psp_gfx_cmd_resp *cmd;
966
967 if (amdgpu_sriov_vf(adev))
968 return 0;
969
970 cmd = acquire_psp_cmd_buf(psp);
971
972 cmd->cmd_id = GFX_CMD_ID_BOOT_CFG;
973 cmd->cmd.boot_cfg.sub_cmd = BOOTCFG_CMD_SET;
974 cmd->cmd.boot_cfg.boot_config = boot_cfg;
975 cmd->cmd.boot_cfg.boot_config_valid = boot_cfg;
976
977 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
978
979 release_psp_cmd_buf(psp);
980
981 return ret;
982}
983
984static int psp_rl_load(struct amdgpu_device *adev)
985{
986 int ret;
987 struct psp_context *psp = &adev->psp;
988 struct psp_gfx_cmd_resp *cmd;
989
990 if (!is_psp_fw_valid(psp->rl))
991 return 0;
992
993 cmd = acquire_psp_cmd_buf(psp);
994
995 memset(psp->fw_pri_buf, 0, PSP_1_MEG);
996 memcpy(psp->fw_pri_buf, psp->rl.start_addr, psp->rl.size_bytes);
997
998 cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW;
999 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(psp->fw_pri_mc_addr);
1000 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(psp->fw_pri_mc_addr);
1001 cmd->cmd.cmd_load_ip_fw.fw_size = psp->rl.size_bytes;
1002 cmd->cmd.cmd_load_ip_fw.fw_type = GFX_FW_TYPE_REG_LIST;
1003
1004 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1005
1006 release_psp_cmd_buf(psp);
1007
1008 return ret;
1009}
1010
1011int psp_spatial_partition(struct psp_context *psp, int mode)
1012{
1013 struct psp_gfx_cmd_resp *cmd;
1014 int ret;
1015
1016 if (amdgpu_sriov_vf(psp->adev))
1017 return 0;
1018
1019 cmd = acquire_psp_cmd_buf(psp);
1020
1021 cmd->cmd_id = GFX_CMD_ID_SRIOV_SPATIAL_PART;
1022 cmd->cmd.cmd_spatial_part.mode = mode;
1023
1024 dev_info(psp->adev->dev, "Requesting %d partitions through PSP", mode);
1025 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1026
1027 release_psp_cmd_buf(psp);
1028
1029 return ret;
1030}
1031
1032static int psp_asd_initialize(struct psp_context *psp)
1033{
1034 int ret;
1035
1036 /* If PSP version doesn't match ASD version, asd loading will be failed.
1037 * add workaround to bypass it for sriov now.
1038 * TODO: add version check to make it common
1039 */
1040 if (amdgpu_sriov_vf(psp->adev) || !psp->asd_context.bin_desc.size_bytes)
1041 return 0;
1042
1043 psp->asd_context.mem_context.shared_mc_addr = 0;
1044 psp->asd_context.mem_context.shared_mem_size = PSP_ASD_SHARED_MEM_SIZE;
1045 psp->asd_context.ta_load_type = GFX_CMD_ID_LOAD_ASD;
1046
1047 ret = psp_ta_load(psp, &psp->asd_context);
1048 if (!ret)
1049 psp->asd_context.initialized = true;
1050
1051 return ret;
1052}
1053
1054static void psp_prep_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd,
1055 uint32_t session_id)
1056{
1057 cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA;
1058 cmd->cmd.cmd_unload_ta.session_id = session_id;
1059}
1060
1061int psp_ta_unload(struct psp_context *psp, struct ta_context *context)
1062{
1063 int ret;
1064 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
1065
1066 psp_prep_ta_unload_cmd_buf(cmd, context->session_id);
1067
1068 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1069
1070 context->resp_status = cmd->resp.status;
1071
1072 release_psp_cmd_buf(psp);
1073
1074 return ret;
1075}
1076
1077static int psp_asd_terminate(struct psp_context *psp)
1078{
1079 int ret;
1080
1081 if (amdgpu_sriov_vf(psp->adev))
1082 return 0;
1083
1084 if (!psp->asd_context.initialized)
1085 return 0;
1086
1087 ret = psp_ta_unload(psp, &psp->asd_context);
1088 if (!ret)
1089 psp->asd_context.initialized = false;
1090
1091 return ret;
1092}
1093
1094static void psp_prep_reg_prog_cmd_buf(struct psp_gfx_cmd_resp *cmd,
1095 uint32_t id, uint32_t value)
1096{
1097 cmd->cmd_id = GFX_CMD_ID_PROG_REG;
1098 cmd->cmd.cmd_setup_reg_prog.reg_value = value;
1099 cmd->cmd.cmd_setup_reg_prog.reg_id = id;
1100}
1101
1102int psp_reg_program(struct psp_context *psp, enum psp_reg_prog_id reg,
1103 uint32_t value)
1104{
1105 struct psp_gfx_cmd_resp *cmd;
1106 int ret = 0;
1107
1108 if (reg >= PSP_REG_LAST)
1109 return -EINVAL;
1110
1111 cmd = acquire_psp_cmd_buf(psp);
1112
1113 psp_prep_reg_prog_cmd_buf(cmd, reg, value);
1114 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1115 if (ret)
1116 DRM_ERROR("PSP failed to program reg id %d", reg);
1117
1118 release_psp_cmd_buf(psp);
1119
1120 return ret;
1121}
1122
1123static void psp_prep_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
1124 uint64_t ta_bin_mc,
1125 struct ta_context *context)
1126{
1127 cmd->cmd_id = context->ta_load_type;
1128 cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(ta_bin_mc);
1129 cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(ta_bin_mc);
1130 cmd->cmd.cmd_load_ta.app_len = context->bin_desc.size_bytes;
1131
1132 cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo =
1133 lower_32_bits(context->mem_context.shared_mc_addr);
1134 cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi =
1135 upper_32_bits(context->mem_context.shared_mc_addr);
1136 cmd->cmd.cmd_load_ta.cmd_buf_len = context->mem_context.shared_mem_size;
1137}
1138
1139int psp_ta_init_shared_buf(struct psp_context *psp,
1140 struct ta_mem_context *mem_ctx)
1141{
1142 /*
1143 * Allocate 16k memory aligned to 4k from Frame Buffer (local
1144 * physical) for ta to host memory
1145 */
1146 return amdgpu_bo_create_kernel(psp->adev, mem_ctx->shared_mem_size,
1147 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM |
1148 AMDGPU_GEM_DOMAIN_GTT,
1149 &mem_ctx->shared_bo,
1150 &mem_ctx->shared_mc_addr,
1151 &mem_ctx->shared_buf);
1152}
1153
1154static void psp_prep_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd,
1155 uint32_t ta_cmd_id,
1156 uint32_t session_id)
1157{
1158 cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD;
1159 cmd->cmd.cmd_invoke_cmd.session_id = session_id;
1160 cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id;
1161}
1162
1163int psp_ta_invoke(struct psp_context *psp,
1164 uint32_t ta_cmd_id,
1165 struct ta_context *context)
1166{
1167 int ret;
1168 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
1169
1170 psp_prep_ta_invoke_cmd_buf(cmd, ta_cmd_id, context->session_id);
1171
1172 ret = psp_cmd_submit_buf(psp, NULL, cmd,
1173 psp->fence_buf_mc_addr);
1174
1175 context->resp_status = cmd->resp.status;
1176
1177 release_psp_cmd_buf(psp);
1178
1179 return ret;
1180}
1181
1182int psp_ta_load(struct psp_context *psp, struct ta_context *context)
1183{
1184 int ret;
1185 struct psp_gfx_cmd_resp *cmd;
1186
1187 cmd = acquire_psp_cmd_buf(psp);
1188
1189 psp_copy_fw(psp, context->bin_desc.start_addr,
1190 context->bin_desc.size_bytes);
1191
1192 psp_prep_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr, context);
1193
1194 ret = psp_cmd_submit_buf(psp, NULL, cmd,
1195 psp->fence_buf_mc_addr);
1196
1197 context->resp_status = cmd->resp.status;
1198
1199 if (!ret)
1200 context->session_id = cmd->resp.session_id;
1201
1202 release_psp_cmd_buf(psp);
1203
1204 return ret;
1205}
1206
1207int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
1208{
1209 return psp_ta_invoke(psp, ta_cmd_id, &psp->xgmi_context.context);
1210}
1211
1212int psp_xgmi_terminate(struct psp_context *psp)
1213{
1214 int ret;
1215 struct amdgpu_device *adev = psp->adev;
1216
1217 /* XGMI TA unload currently is not supported on Arcturus/Aldebaran A+A */
1218 if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 4) ||
1219 (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2) &&
1220 adev->gmc.xgmi.connected_to_cpu))
1221 return 0;
1222
1223 if (!psp->xgmi_context.context.initialized)
1224 return 0;
1225
1226 ret = psp_ta_unload(psp, &psp->xgmi_context.context);
1227
1228 psp->xgmi_context.context.initialized = false;
1229
1230 return ret;
1231}
1232
1233int psp_xgmi_initialize(struct psp_context *psp, bool set_extended_data, bool load_ta)
1234{
1235 struct ta_xgmi_shared_memory *xgmi_cmd;
1236 int ret;
1237
1238 if (!psp->ta_fw ||
1239 !psp->xgmi_context.context.bin_desc.size_bytes ||
1240 !psp->xgmi_context.context.bin_desc.start_addr)
1241 return -ENOENT;
1242
1243 if (!load_ta)
1244 goto invoke;
1245
1246 psp->xgmi_context.context.mem_context.shared_mem_size = PSP_XGMI_SHARED_MEM_SIZE;
1247 psp->xgmi_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
1248
1249 if (!psp->xgmi_context.context.mem_context.shared_buf) {
1250 ret = psp_ta_init_shared_buf(psp, &psp->xgmi_context.context.mem_context);
1251 if (ret)
1252 return ret;
1253 }
1254
1255 /* Load XGMI TA */
1256 ret = psp_ta_load(psp, &psp->xgmi_context.context);
1257 if (!ret)
1258 psp->xgmi_context.context.initialized = true;
1259 else
1260 return ret;
1261
1262invoke:
1263 /* Initialize XGMI session */
1264 xgmi_cmd = (struct ta_xgmi_shared_memory *)(psp->xgmi_context.context.mem_context.shared_buf);
1265 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1266 xgmi_cmd->flag_extend_link_record = set_extended_data;
1267 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__INITIALIZE;
1268
1269 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
1270 /* note down the capbility flag for XGMI TA */
1271 psp->xgmi_context.xgmi_ta_caps = xgmi_cmd->caps_flag;
1272
1273 return ret;
1274}
1275
1276int psp_xgmi_get_hive_id(struct psp_context *psp, uint64_t *hive_id)
1277{
1278 struct ta_xgmi_shared_memory *xgmi_cmd;
1279 int ret;
1280
1281 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
1282 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1283
1284 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_HIVE_ID;
1285
1286 /* Invoke xgmi ta to get hive id */
1287 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
1288 if (ret)
1289 return ret;
1290
1291 *hive_id = xgmi_cmd->xgmi_out_message.get_hive_id.hive_id;
1292
1293 return 0;
1294}
1295
1296int psp_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id)
1297{
1298 struct ta_xgmi_shared_memory *xgmi_cmd;
1299 int ret;
1300
1301 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
1302 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1303
1304 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_NODE_ID;
1305
1306 /* Invoke xgmi ta to get the node id */
1307 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
1308 if (ret)
1309 return ret;
1310
1311 *node_id = xgmi_cmd->xgmi_out_message.get_node_id.node_id;
1312
1313 return 0;
1314}
1315
1316static bool psp_xgmi_peer_link_info_supported(struct psp_context *psp)
1317{
1318 return (amdgpu_ip_version(psp->adev, MP0_HWIP, 0) ==
1319 IP_VERSION(13, 0, 2) &&
1320 psp->xgmi_context.context.bin_desc.fw_version >= 0x2000000b) ||
1321 amdgpu_ip_version(psp->adev, MP0_HWIP, 0) >=
1322 IP_VERSION(13, 0, 6);
1323}
1324
1325/*
1326 * Chips that support extended topology information require the driver to
1327 * reflect topology information in the opposite direction. This is
1328 * because the TA has already exceeded its link record limit and if the
1329 * TA holds bi-directional information, the driver would have to do
1330 * multiple fetches instead of just two.
1331 */
1332static void psp_xgmi_reflect_topology_info(struct psp_context *psp,
1333 struct psp_xgmi_node_info node_info)
1334{
1335 struct amdgpu_device *mirror_adev;
1336 struct amdgpu_hive_info *hive;
1337 uint64_t src_node_id = psp->adev->gmc.xgmi.node_id;
1338 uint64_t dst_node_id = node_info.node_id;
1339 uint8_t dst_num_hops = node_info.num_hops;
1340 uint8_t dst_num_links = node_info.num_links;
1341
1342 hive = amdgpu_get_xgmi_hive(psp->adev);
1343 list_for_each_entry(mirror_adev, &hive->device_list, gmc.xgmi.head) {
1344 struct psp_xgmi_topology_info *mirror_top_info;
1345 int j;
1346
1347 if (mirror_adev->gmc.xgmi.node_id != dst_node_id)
1348 continue;
1349
1350 mirror_top_info = &mirror_adev->psp.xgmi_context.top_info;
1351 for (j = 0; j < mirror_top_info->num_nodes; j++) {
1352 if (mirror_top_info->nodes[j].node_id != src_node_id)
1353 continue;
1354
1355 mirror_top_info->nodes[j].num_hops = dst_num_hops;
1356 /*
1357 * prevent 0 num_links value re-reflection since reflection
1358 * criteria is based on num_hops (direct or indirect).
1359 *
1360 */
1361 if (dst_num_links)
1362 mirror_top_info->nodes[j].num_links = dst_num_links;
1363
1364 break;
1365 }
1366
1367 break;
1368 }
1369
1370 amdgpu_put_xgmi_hive(hive);
1371}
1372
1373int psp_xgmi_get_topology_info(struct psp_context *psp,
1374 int number_devices,
1375 struct psp_xgmi_topology_info *topology,
1376 bool get_extended_data)
1377{
1378 struct ta_xgmi_shared_memory *xgmi_cmd;
1379 struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
1380 struct ta_xgmi_cmd_get_topology_info_output *topology_info_output;
1381 int i;
1382 int ret;
1383
1384 if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
1385 return -EINVAL;
1386
1387 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
1388 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1389 xgmi_cmd->flag_extend_link_record = get_extended_data;
1390
1391 /* Fill in the shared memory with topology information as input */
1392 topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
1393 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_TOPOLOGY_INFO;
1394 topology_info_input->num_nodes = number_devices;
1395
1396 for (i = 0; i < topology_info_input->num_nodes; i++) {
1397 topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
1398 topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
1399 topology_info_input->nodes[i].is_sharing_enabled = topology->nodes[i].is_sharing_enabled;
1400 topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
1401 }
1402
1403 /* Invoke xgmi ta to get the topology information */
1404 ret = psp_xgmi_invoke(psp, TA_COMMAND_XGMI__GET_TOPOLOGY_INFO);
1405 if (ret)
1406 return ret;
1407
1408 /* Read the output topology information from the shared memory */
1409 topology_info_output = &xgmi_cmd->xgmi_out_message.get_topology_info;
1410 topology->num_nodes = xgmi_cmd->xgmi_out_message.get_topology_info.num_nodes;
1411 for (i = 0; i < topology->num_nodes; i++) {
1412 /* extended data will either be 0 or equal to non-extended data */
1413 if (topology_info_output->nodes[i].num_hops)
1414 topology->nodes[i].num_hops = topology_info_output->nodes[i].num_hops;
1415
1416 /* non-extended data gets everything here so no need to update */
1417 if (!get_extended_data) {
1418 topology->nodes[i].node_id = topology_info_output->nodes[i].node_id;
1419 topology->nodes[i].is_sharing_enabled =
1420 topology_info_output->nodes[i].is_sharing_enabled;
1421 topology->nodes[i].sdma_engine =
1422 topology_info_output->nodes[i].sdma_engine;
1423 }
1424
1425 }
1426
1427 /* Invoke xgmi ta again to get the link information */
1428 if (psp_xgmi_peer_link_info_supported(psp)) {
1429 struct ta_xgmi_cmd_get_peer_link_info *link_info_output;
1430 struct ta_xgmi_cmd_get_extend_peer_link_info *link_extend_info_output;
1431 bool requires_reflection =
1432 (psp->xgmi_context.supports_extended_data &&
1433 get_extended_data) ||
1434 amdgpu_ip_version(psp->adev, MP0_HWIP, 0) ==
1435 IP_VERSION(13, 0, 6);
1436 bool ta_port_num_support = amdgpu_sriov_vf(psp->adev) ? 0 :
1437 psp->xgmi_context.xgmi_ta_caps & EXTEND_PEER_LINK_INFO_CMD_FLAG;
1438
1439 /* popluate the shared output buffer rather than the cmd input buffer
1440 * with node_ids as the input for GET_PEER_LINKS command execution.
1441 * This is required for GET_PEER_LINKS per xgmi ta implementation.
1442 * The same requirement for GET_EXTEND_PEER_LINKS command.
1443 */
1444 if (ta_port_num_support) {
1445 link_extend_info_output = &xgmi_cmd->xgmi_out_message.get_extend_link_info;
1446
1447 for (i = 0; i < topology->num_nodes; i++)
1448 link_extend_info_output->nodes[i].node_id = topology->nodes[i].node_id;
1449
1450 link_extend_info_output->num_nodes = topology->num_nodes;
1451 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_EXTEND_PEER_LINKS;
1452 } else {
1453 link_info_output = &xgmi_cmd->xgmi_out_message.get_link_info;
1454
1455 for (i = 0; i < topology->num_nodes; i++)
1456 link_info_output->nodes[i].node_id = topology->nodes[i].node_id;
1457
1458 link_info_output->num_nodes = topology->num_nodes;
1459 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_PEER_LINKS;
1460 }
1461
1462 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
1463 if (ret)
1464 return ret;
1465
1466 for (i = 0; i < topology->num_nodes; i++) {
1467 uint8_t node_num_links = ta_port_num_support ?
1468 link_extend_info_output->nodes[i].num_links : link_info_output->nodes[i].num_links;
1469 /* accumulate num_links on extended data */
1470 if (get_extended_data) {
1471 topology->nodes[i].num_links = topology->nodes[i].num_links + node_num_links;
1472 } else {
1473 topology->nodes[i].num_links = (requires_reflection && topology->nodes[i].num_links) ?
1474 topology->nodes[i].num_links : node_num_links;
1475 }
1476 /* popluate the connected port num info if supported and available */
1477 if (ta_port_num_support && topology->nodes[i].num_links) {
1478 memcpy(topology->nodes[i].port_num, link_extend_info_output->nodes[i].port_num,
1479 sizeof(struct xgmi_connected_port_num) * TA_XGMI__MAX_PORT_NUM);
1480 }
1481
1482 /* reflect the topology information for bi-directionality */
1483 if (requires_reflection && topology->nodes[i].num_hops)
1484 psp_xgmi_reflect_topology_info(psp, topology->nodes[i]);
1485 }
1486 }
1487
1488 return 0;
1489}
1490
1491int psp_xgmi_set_topology_info(struct psp_context *psp,
1492 int number_devices,
1493 struct psp_xgmi_topology_info *topology)
1494{
1495 struct ta_xgmi_shared_memory *xgmi_cmd;
1496 struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
1497 int i;
1498
1499 if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
1500 return -EINVAL;
1501
1502 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
1503 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1504
1505 topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
1506 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__SET_TOPOLOGY_INFO;
1507 topology_info_input->num_nodes = number_devices;
1508
1509 for (i = 0; i < topology_info_input->num_nodes; i++) {
1510 topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
1511 topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
1512 topology_info_input->nodes[i].is_sharing_enabled = 1;
1513 topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
1514 }
1515
1516 /* Invoke xgmi ta to set topology information */
1517 return psp_xgmi_invoke(psp, TA_COMMAND_XGMI__SET_TOPOLOGY_INFO);
1518}
1519
1520// ras begin
1521static void psp_ras_ta_check_status(struct psp_context *psp)
1522{
1523 struct ta_ras_shared_memory *ras_cmd =
1524 (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1525
1526 switch (ras_cmd->ras_status) {
1527 case TA_RAS_STATUS__ERROR_UNSUPPORTED_IP:
1528 dev_warn(psp->adev->dev,
1529 "RAS WARNING: cmd failed due to unsupported ip\n");
1530 break;
1531 case TA_RAS_STATUS__ERROR_UNSUPPORTED_ERROR_INJ:
1532 dev_warn(psp->adev->dev,
1533 "RAS WARNING: cmd failed due to unsupported error injection\n");
1534 break;
1535 case TA_RAS_STATUS__SUCCESS:
1536 break;
1537 case TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED:
1538 if (ras_cmd->cmd_id == TA_RAS_COMMAND__TRIGGER_ERROR)
1539 dev_warn(psp->adev->dev,
1540 "RAS WARNING: Inject error to critical region is not allowed\n");
1541 break;
1542 default:
1543 dev_warn(psp->adev->dev,
1544 "RAS WARNING: ras status = 0x%X\n", ras_cmd->ras_status);
1545 break;
1546 }
1547}
1548
1549int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
1550{
1551 struct ta_ras_shared_memory *ras_cmd;
1552 int ret;
1553
1554 ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1555
1556 /*
1557 * TODO: bypass the loading in sriov for now
1558 */
1559 if (amdgpu_sriov_vf(psp->adev))
1560 return 0;
1561
1562 ret = psp_ta_invoke(psp, ta_cmd_id, &psp->ras_context.context);
1563
1564 if (amdgpu_ras_intr_triggered())
1565 return ret;
1566
1567 if (ras_cmd->if_version > RAS_TA_HOST_IF_VER) {
1568 DRM_WARN("RAS: Unsupported Interface");
1569 return -EINVAL;
1570 }
1571
1572 if (!ret) {
1573 if (ras_cmd->ras_out_message.flags.err_inject_switch_disable_flag) {
1574 dev_warn(psp->adev->dev, "ECC switch disabled\n");
1575
1576 ras_cmd->ras_status = TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE;
1577 } else if (ras_cmd->ras_out_message.flags.reg_access_failure_flag)
1578 dev_warn(psp->adev->dev,
1579 "RAS internal register access blocked\n");
1580
1581 psp_ras_ta_check_status(psp);
1582 }
1583
1584 return ret;
1585}
1586
1587int psp_ras_enable_features(struct psp_context *psp,
1588 union ta_ras_cmd_input *info, bool enable)
1589{
1590 struct ta_ras_shared_memory *ras_cmd;
1591 int ret;
1592
1593 if (!psp->ras_context.context.initialized)
1594 return -EINVAL;
1595
1596 ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1597 memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
1598
1599 if (enable)
1600 ras_cmd->cmd_id = TA_RAS_COMMAND__ENABLE_FEATURES;
1601 else
1602 ras_cmd->cmd_id = TA_RAS_COMMAND__DISABLE_FEATURES;
1603
1604 ras_cmd->ras_in_message = *info;
1605
1606 ret = psp_ras_invoke(psp, ras_cmd->cmd_id);
1607 if (ret)
1608 return -EINVAL;
1609
1610 return 0;
1611}
1612
1613int psp_ras_terminate(struct psp_context *psp)
1614{
1615 int ret;
1616
1617 /*
1618 * TODO: bypass the terminate in sriov for now
1619 */
1620 if (amdgpu_sriov_vf(psp->adev))
1621 return 0;
1622
1623 if (!psp->ras_context.context.initialized)
1624 return 0;
1625
1626 ret = psp_ta_unload(psp, &psp->ras_context.context);
1627
1628 psp->ras_context.context.initialized = false;
1629
1630 return ret;
1631}
1632
1633int psp_ras_initialize(struct psp_context *psp)
1634{
1635 int ret;
1636 uint32_t boot_cfg = 0xFF;
1637 struct amdgpu_device *adev = psp->adev;
1638 struct ta_ras_shared_memory *ras_cmd;
1639
1640 /*
1641 * TODO: bypass the initialize in sriov for now
1642 */
1643 if (amdgpu_sriov_vf(adev))
1644 return 0;
1645
1646 if (!adev->psp.ras_context.context.bin_desc.size_bytes ||
1647 !adev->psp.ras_context.context.bin_desc.start_addr) {
1648 dev_info(adev->dev, "RAS: optional ras ta ucode is not available\n");
1649 return 0;
1650 }
1651
1652 if (amdgpu_atomfirmware_dynamic_boot_config_supported(adev)) {
1653 /* query GECC enablement status from boot config
1654 * boot_cfg: 1: GECC is enabled or 0: GECC is disabled
1655 */
1656 ret = psp_boot_config_get(adev, &boot_cfg);
1657 if (ret)
1658 dev_warn(adev->dev, "PSP get boot config failed\n");
1659
1660 if (!amdgpu_ras_is_supported(psp->adev, AMDGPU_RAS_BLOCK__UMC)) {
1661 if (!boot_cfg) {
1662 dev_info(adev->dev, "GECC is disabled\n");
1663 } else {
1664 /* disable GECC in next boot cycle if ras is
1665 * disabled by module parameter amdgpu_ras_enable
1666 * and/or amdgpu_ras_mask, or boot_config_get call
1667 * is failed
1668 */
1669 ret = psp_boot_config_set(adev, 0);
1670 if (ret)
1671 dev_warn(adev->dev, "PSP set boot config failed\n");
1672 else
1673 dev_warn(adev->dev, "GECC will be disabled in next boot cycle if set amdgpu_ras_enable and/or amdgpu_ras_mask to 0x0\n");
1674 }
1675 } else {
1676 if (boot_cfg == 1) {
1677 dev_info(adev->dev, "GECC is enabled\n");
1678 } else {
1679 /* enable GECC in next boot cycle if it is disabled
1680 * in boot config, or force enable GECC if failed to
1681 * get boot configuration
1682 */
1683 ret = psp_boot_config_set(adev, BOOT_CONFIG_GECC);
1684 if (ret)
1685 dev_warn(adev->dev, "PSP set boot config failed\n");
1686 else
1687 dev_warn(adev->dev, "GECC will be enabled in next boot cycle\n");
1688 }
1689 }
1690 }
1691
1692 psp->ras_context.context.mem_context.shared_mem_size = PSP_RAS_SHARED_MEM_SIZE;
1693 psp->ras_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
1694
1695 if (!psp->ras_context.context.mem_context.shared_buf) {
1696 ret = psp_ta_init_shared_buf(psp, &psp->ras_context.context.mem_context);
1697 if (ret)
1698 return ret;
1699 }
1700
1701 ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1702 memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
1703
1704 if (amdgpu_ras_is_poison_mode_supported(adev))
1705 ras_cmd->ras_in_message.init_flags.poison_mode_en = 1;
1706 if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu)
1707 ras_cmd->ras_in_message.init_flags.dgpu_mode = 1;
1708 ras_cmd->ras_in_message.init_flags.xcc_mask =
1709 adev->gfx.xcc_mask;
1710 ras_cmd->ras_in_message.init_flags.channel_dis_num = hweight32(adev->gmc.m_half_use) * 2;
1711
1712 ret = psp_ta_load(psp, &psp->ras_context.context);
1713
1714 if (!ret && !ras_cmd->ras_status)
1715 psp->ras_context.context.initialized = true;
1716 else {
1717 if (ras_cmd->ras_status)
1718 dev_warn(psp->adev->dev, "RAS Init Status: 0x%X\n", ras_cmd->ras_status);
1719
1720 /* fail to load RAS TA */
1721 psp->ras_context.context.initialized = false;
1722 }
1723
1724 return ret;
1725}
1726
1727int psp_ras_trigger_error(struct psp_context *psp,
1728 struct ta_ras_trigger_error_input *info, uint32_t instance_mask)
1729{
1730 struct ta_ras_shared_memory *ras_cmd;
1731 struct amdgpu_device *adev = psp->adev;
1732 int ret;
1733 uint32_t dev_mask;
1734
1735 if (!psp->ras_context.context.initialized)
1736 return -EINVAL;
1737
1738 switch (info->block_id) {
1739 case TA_RAS_BLOCK__GFX:
1740 dev_mask = GET_MASK(GC, instance_mask);
1741 break;
1742 case TA_RAS_BLOCK__SDMA:
1743 dev_mask = GET_MASK(SDMA0, instance_mask);
1744 break;
1745 case TA_RAS_BLOCK__VCN:
1746 case TA_RAS_BLOCK__JPEG:
1747 dev_mask = GET_MASK(VCN, instance_mask);
1748 break;
1749 default:
1750 dev_mask = instance_mask;
1751 break;
1752 }
1753
1754 /* reuse sub_block_index for backward compatibility */
1755 dev_mask <<= AMDGPU_RAS_INST_SHIFT;
1756 dev_mask &= AMDGPU_RAS_INST_MASK;
1757 info->sub_block_index |= dev_mask;
1758
1759 ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1760 memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
1761
1762 ras_cmd->cmd_id = TA_RAS_COMMAND__TRIGGER_ERROR;
1763 ras_cmd->ras_in_message.trigger_error = *info;
1764
1765 ret = psp_ras_invoke(psp, ras_cmd->cmd_id);
1766 if (ret)
1767 return -EINVAL;
1768
1769 /* If err_event_athub occurs error inject was successful, however
1770 * return status from TA is no long reliable
1771 */
1772 if (amdgpu_ras_intr_triggered())
1773 return 0;
1774
1775 if (ras_cmd->ras_status == TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED)
1776 return -EACCES;
1777 else if (ras_cmd->ras_status)
1778 return -EINVAL;
1779
1780 return 0;
1781}
1782// ras end
1783
1784// HDCP start
1785static int psp_hdcp_initialize(struct psp_context *psp)
1786{
1787 int ret;
1788
1789 /*
1790 * TODO: bypass the initialize in sriov for now
1791 */
1792 if (amdgpu_sriov_vf(psp->adev))
1793 return 0;
1794
1795 if (!psp->hdcp_context.context.bin_desc.size_bytes ||
1796 !psp->hdcp_context.context.bin_desc.start_addr) {
1797 dev_info(psp->adev->dev, "HDCP: optional hdcp ta ucode is not available\n");
1798 return 0;
1799 }
1800
1801 psp->hdcp_context.context.mem_context.shared_mem_size = PSP_HDCP_SHARED_MEM_SIZE;
1802 psp->hdcp_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
1803
1804 if (!psp->hdcp_context.context.mem_context.shared_buf) {
1805 ret = psp_ta_init_shared_buf(psp, &psp->hdcp_context.context.mem_context);
1806 if (ret)
1807 return ret;
1808 }
1809
1810 ret = psp_ta_load(psp, &psp->hdcp_context.context);
1811 if (!ret) {
1812 psp->hdcp_context.context.initialized = true;
1813 mutex_init(&psp->hdcp_context.mutex);
1814 }
1815
1816 return ret;
1817}
1818
1819int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
1820{
1821 /*
1822 * TODO: bypass the loading in sriov for now
1823 */
1824 if (amdgpu_sriov_vf(psp->adev))
1825 return 0;
1826
1827 return psp_ta_invoke(psp, ta_cmd_id, &psp->hdcp_context.context);
1828}
1829
1830static int psp_hdcp_terminate(struct psp_context *psp)
1831{
1832 int ret;
1833
1834 /*
1835 * TODO: bypass the terminate in sriov for now
1836 */
1837 if (amdgpu_sriov_vf(psp->adev))
1838 return 0;
1839
1840 if (!psp->hdcp_context.context.initialized)
1841 return 0;
1842
1843 ret = psp_ta_unload(psp, &psp->hdcp_context.context);
1844
1845 psp->hdcp_context.context.initialized = false;
1846
1847 return ret;
1848}
1849// HDCP end
1850
1851// DTM start
1852static int psp_dtm_initialize(struct psp_context *psp)
1853{
1854 int ret;
1855
1856 /*
1857 * TODO: bypass the initialize in sriov for now
1858 */
1859 if (amdgpu_sriov_vf(psp->adev))
1860 return 0;
1861
1862 if (!psp->dtm_context.context.bin_desc.size_bytes ||
1863 !psp->dtm_context.context.bin_desc.start_addr) {
1864 dev_info(psp->adev->dev, "DTM: optional dtm ta ucode is not available\n");
1865 return 0;
1866 }
1867
1868 psp->dtm_context.context.mem_context.shared_mem_size = PSP_DTM_SHARED_MEM_SIZE;
1869 psp->dtm_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
1870
1871 if (!psp->dtm_context.context.mem_context.shared_buf) {
1872 ret = psp_ta_init_shared_buf(psp, &psp->dtm_context.context.mem_context);
1873 if (ret)
1874 return ret;
1875 }
1876
1877 ret = psp_ta_load(psp, &psp->dtm_context.context);
1878 if (!ret) {
1879 psp->dtm_context.context.initialized = true;
1880 mutex_init(&psp->dtm_context.mutex);
1881 }
1882
1883 return ret;
1884}
1885
1886int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
1887{
1888 /*
1889 * TODO: bypass the loading in sriov for now
1890 */
1891 if (amdgpu_sriov_vf(psp->adev))
1892 return 0;
1893
1894 return psp_ta_invoke(psp, ta_cmd_id, &psp->dtm_context.context);
1895}
1896
1897static int psp_dtm_terminate(struct psp_context *psp)
1898{
1899 int ret;
1900
1901 /*
1902 * TODO: bypass the terminate in sriov for now
1903 */
1904 if (amdgpu_sriov_vf(psp->adev))
1905 return 0;
1906
1907 if (!psp->dtm_context.context.initialized)
1908 return 0;
1909
1910 ret = psp_ta_unload(psp, &psp->dtm_context.context);
1911
1912 psp->dtm_context.context.initialized = false;
1913
1914 return ret;
1915}
1916// DTM end
1917
1918// RAP start
1919static int psp_rap_initialize(struct psp_context *psp)
1920{
1921 int ret;
1922 enum ta_rap_status status = TA_RAP_STATUS__SUCCESS;
1923
1924 /*
1925 * TODO: bypass the initialize in sriov for now
1926 */
1927 if (amdgpu_sriov_vf(psp->adev))
1928 return 0;
1929
1930 if (!psp->rap_context.context.bin_desc.size_bytes ||
1931 !psp->rap_context.context.bin_desc.start_addr) {
1932 dev_info(psp->adev->dev, "RAP: optional rap ta ucode is not available\n");
1933 return 0;
1934 }
1935
1936 psp->rap_context.context.mem_context.shared_mem_size = PSP_RAP_SHARED_MEM_SIZE;
1937 psp->rap_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
1938
1939 if (!psp->rap_context.context.mem_context.shared_buf) {
1940 ret = psp_ta_init_shared_buf(psp, &psp->rap_context.context.mem_context);
1941 if (ret)
1942 return ret;
1943 }
1944
1945 ret = psp_ta_load(psp, &psp->rap_context.context);
1946 if (!ret) {
1947 psp->rap_context.context.initialized = true;
1948 mutex_init(&psp->rap_context.mutex);
1949 } else
1950 return ret;
1951
1952 ret = psp_rap_invoke(psp, TA_CMD_RAP__INITIALIZE, &status);
1953 if (ret || status != TA_RAP_STATUS__SUCCESS) {
1954 psp_rap_terminate(psp);
1955 /* free rap shared memory */
1956 psp_ta_free_shared_buf(&psp->rap_context.context.mem_context);
1957
1958 dev_warn(psp->adev->dev, "RAP TA initialize fail (%d) status %d.\n",
1959 ret, status);
1960
1961 return ret;
1962 }
1963
1964 return 0;
1965}
1966
1967static int psp_rap_terminate(struct psp_context *psp)
1968{
1969 int ret;
1970
1971 if (!psp->rap_context.context.initialized)
1972 return 0;
1973
1974 ret = psp_ta_unload(psp, &psp->rap_context.context);
1975
1976 psp->rap_context.context.initialized = false;
1977
1978 return ret;
1979}
1980
1981int psp_rap_invoke(struct psp_context *psp, uint32_t ta_cmd_id, enum ta_rap_status *status)
1982{
1983 struct ta_rap_shared_memory *rap_cmd;
1984 int ret = 0;
1985
1986 if (!psp->rap_context.context.initialized)
1987 return 0;
1988
1989 if (ta_cmd_id != TA_CMD_RAP__INITIALIZE &&
1990 ta_cmd_id != TA_CMD_RAP__VALIDATE_L0)
1991 return -EINVAL;
1992
1993 mutex_lock(&psp->rap_context.mutex);
1994
1995 rap_cmd = (struct ta_rap_shared_memory *)
1996 psp->rap_context.context.mem_context.shared_buf;
1997 memset(rap_cmd, 0, sizeof(struct ta_rap_shared_memory));
1998
1999 rap_cmd->cmd_id = ta_cmd_id;
2000 rap_cmd->validation_method_id = METHOD_A;
2001
2002 ret = psp_ta_invoke(psp, rap_cmd->cmd_id, &psp->rap_context.context);
2003 if (ret)
2004 goto out_unlock;
2005
2006 if (status)
2007 *status = rap_cmd->rap_status;
2008
2009out_unlock:
2010 mutex_unlock(&psp->rap_context.mutex);
2011
2012 return ret;
2013}
2014// RAP end
2015
2016/* securedisplay start */
2017static int psp_securedisplay_initialize(struct psp_context *psp)
2018{
2019 int ret;
2020 struct ta_securedisplay_cmd *securedisplay_cmd;
2021
2022 /*
2023 * TODO: bypass the initialize in sriov for now
2024 */
2025 if (amdgpu_sriov_vf(psp->adev))
2026 return 0;
2027
2028 if (!psp->securedisplay_context.context.bin_desc.size_bytes ||
2029 !psp->securedisplay_context.context.bin_desc.start_addr) {
2030 dev_info(psp->adev->dev, "SECUREDISPLAY: securedisplay ta ucode is not available\n");
2031 return 0;
2032 }
2033
2034 psp->securedisplay_context.context.mem_context.shared_mem_size =
2035 PSP_SECUREDISPLAY_SHARED_MEM_SIZE;
2036 psp->securedisplay_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
2037
2038 if (!psp->securedisplay_context.context.initialized) {
2039 ret = psp_ta_init_shared_buf(psp,
2040 &psp->securedisplay_context.context.mem_context);
2041 if (ret)
2042 return ret;
2043 }
2044
2045 ret = psp_ta_load(psp, &psp->securedisplay_context.context);
2046 if (!ret) {
2047 psp->securedisplay_context.context.initialized = true;
2048 mutex_init(&psp->securedisplay_context.mutex);
2049 } else
2050 return ret;
2051
2052 mutex_lock(&psp->securedisplay_context.mutex);
2053
2054 psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd,
2055 TA_SECUREDISPLAY_COMMAND__QUERY_TA);
2056
2057 ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__QUERY_TA);
2058
2059 mutex_unlock(&psp->securedisplay_context.mutex);
2060
2061 if (ret) {
2062 psp_securedisplay_terminate(psp);
2063 /* free securedisplay shared memory */
2064 psp_ta_free_shared_buf(&psp->securedisplay_context.context.mem_context);
2065 dev_err(psp->adev->dev, "SECUREDISPLAY TA initialize fail.\n");
2066 return -EINVAL;
2067 }
2068
2069 if (securedisplay_cmd->status != TA_SECUREDISPLAY_STATUS__SUCCESS) {
2070 psp_securedisplay_parse_resp_status(psp, securedisplay_cmd->status);
2071 dev_err(psp->adev->dev, "SECUREDISPLAY: query securedisplay TA failed. ret 0x%x\n",
2072 securedisplay_cmd->securedisplay_out_message.query_ta.query_cmd_ret);
2073 /* don't try again */
2074 psp->securedisplay_context.context.bin_desc.size_bytes = 0;
2075 }
2076
2077 return 0;
2078}
2079
2080static int psp_securedisplay_terminate(struct psp_context *psp)
2081{
2082 int ret;
2083
2084 /*
2085 * TODO:bypass the terminate in sriov for now
2086 */
2087 if (amdgpu_sriov_vf(psp->adev))
2088 return 0;
2089
2090 if (!psp->securedisplay_context.context.initialized)
2091 return 0;
2092
2093 ret = psp_ta_unload(psp, &psp->securedisplay_context.context);
2094
2095 psp->securedisplay_context.context.initialized = false;
2096
2097 return ret;
2098}
2099
2100int psp_securedisplay_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
2101{
2102 int ret;
2103
2104 if (!psp->securedisplay_context.context.initialized)
2105 return -EINVAL;
2106
2107 if (ta_cmd_id != TA_SECUREDISPLAY_COMMAND__QUERY_TA &&
2108 ta_cmd_id != TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC)
2109 return -EINVAL;
2110
2111 ret = psp_ta_invoke(psp, ta_cmd_id, &psp->securedisplay_context.context);
2112
2113 return ret;
2114}
2115/* SECUREDISPLAY end */
2116
2117int amdgpu_psp_wait_for_bootloader(struct amdgpu_device *adev)
2118{
2119 struct psp_context *psp = &adev->psp;
2120 int ret = 0;
2121
2122 if (!amdgpu_sriov_vf(adev) && psp->funcs && psp->funcs->wait_for_bootloader != NULL)
2123 ret = psp->funcs->wait_for_bootloader(psp);
2124
2125 return ret;
2126}
2127
2128int amdgpu_psp_query_boot_status(struct amdgpu_device *adev)
2129{
2130 struct psp_context *psp = &adev->psp;
2131 int ret = 0;
2132
2133 if (amdgpu_sriov_vf(adev) || (adev->flags & AMD_IS_APU))
2134 return 0;
2135
2136 if (psp->funcs &&
2137 psp->funcs->query_boot_status)
2138 ret = psp->funcs->query_boot_status(psp);
2139
2140 return ret;
2141}
2142
2143static int psp_hw_start(struct psp_context *psp)
2144{
2145 struct amdgpu_device *adev = psp->adev;
2146 int ret;
2147
2148 if (!amdgpu_sriov_vf(adev)) {
2149 if ((is_psp_fw_valid(psp->kdb)) &&
2150 (psp->funcs->bootloader_load_kdb != NULL)) {
2151 ret = psp_bootloader_load_kdb(psp);
2152 if (ret) {
2153 DRM_ERROR("PSP load kdb failed!\n");
2154 return ret;
2155 }
2156 }
2157
2158 if ((is_psp_fw_valid(psp->spl)) &&
2159 (psp->funcs->bootloader_load_spl != NULL)) {
2160 ret = psp_bootloader_load_spl(psp);
2161 if (ret) {
2162 DRM_ERROR("PSP load spl failed!\n");
2163 return ret;
2164 }
2165 }
2166
2167 if ((is_psp_fw_valid(psp->sys)) &&
2168 (psp->funcs->bootloader_load_sysdrv != NULL)) {
2169 ret = psp_bootloader_load_sysdrv(psp);
2170 if (ret) {
2171 DRM_ERROR("PSP load sys drv failed!\n");
2172 return ret;
2173 }
2174 }
2175
2176 if ((is_psp_fw_valid(psp->soc_drv)) &&
2177 (psp->funcs->bootloader_load_soc_drv != NULL)) {
2178 ret = psp_bootloader_load_soc_drv(psp);
2179 if (ret) {
2180 DRM_ERROR("PSP load soc drv failed!\n");
2181 return ret;
2182 }
2183 }
2184
2185 if ((is_psp_fw_valid(psp->intf_drv)) &&
2186 (psp->funcs->bootloader_load_intf_drv != NULL)) {
2187 ret = psp_bootloader_load_intf_drv(psp);
2188 if (ret) {
2189 DRM_ERROR("PSP load intf drv failed!\n");
2190 return ret;
2191 }
2192 }
2193
2194 if ((is_psp_fw_valid(psp->dbg_drv)) &&
2195 (psp->funcs->bootloader_load_dbg_drv != NULL)) {
2196 ret = psp_bootloader_load_dbg_drv(psp);
2197 if (ret) {
2198 DRM_ERROR("PSP load dbg drv failed!\n");
2199 return ret;
2200 }
2201 }
2202
2203 if ((is_psp_fw_valid(psp->ras_drv)) &&
2204 (psp->funcs->bootloader_load_ras_drv != NULL)) {
2205 ret = psp_bootloader_load_ras_drv(psp);
2206 if (ret) {
2207 DRM_ERROR("PSP load ras_drv failed!\n");
2208 return ret;
2209 }
2210 }
2211
2212 if ((is_psp_fw_valid(psp->sos)) &&
2213 (psp->funcs->bootloader_load_sos != NULL)) {
2214 ret = psp_bootloader_load_sos(psp);
2215 if (ret) {
2216 DRM_ERROR("PSP load sos failed!\n");
2217 return ret;
2218 }
2219 }
2220 }
2221
2222 ret = psp_ring_create(psp, PSP_RING_TYPE__KM);
2223 if (ret) {
2224 DRM_ERROR("PSP create ring failed!\n");
2225 return ret;
2226 }
2227
2228 if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev))
2229 goto skip_pin_bo;
2230
2231 if (!psp_boottime_tmr(psp)) {
2232 ret = psp_tmr_init(psp);
2233 if (ret) {
2234 DRM_ERROR("PSP tmr init failed!\n");
2235 return ret;
2236 }
2237 }
2238
2239skip_pin_bo:
2240 /*
2241 * For ASICs with DF Cstate management centralized
2242 * to PMFW, TMR setup should be performed after PMFW
2243 * loaded and before other non-psp firmware loaded.
2244 */
2245 if (psp->pmfw_centralized_cstate_management) {
2246 ret = psp_load_smu_fw(psp);
2247 if (ret)
2248 return ret;
2249 }
2250
2251 ret = psp_tmr_load(psp);
2252 if (ret) {
2253 DRM_ERROR("PSP load tmr failed!\n");
2254 return ret;
2255 }
2256
2257 return 0;
2258}
2259
2260static int psp_get_fw_type(struct amdgpu_firmware_info *ucode,
2261 enum psp_gfx_fw_type *type)
2262{
2263 switch (ucode->ucode_id) {
2264 case AMDGPU_UCODE_ID_CAP:
2265 *type = GFX_FW_TYPE_CAP;
2266 break;
2267 case AMDGPU_UCODE_ID_SDMA0:
2268 *type = GFX_FW_TYPE_SDMA0;
2269 break;
2270 case AMDGPU_UCODE_ID_SDMA1:
2271 *type = GFX_FW_TYPE_SDMA1;
2272 break;
2273 case AMDGPU_UCODE_ID_SDMA2:
2274 *type = GFX_FW_TYPE_SDMA2;
2275 break;
2276 case AMDGPU_UCODE_ID_SDMA3:
2277 *type = GFX_FW_TYPE_SDMA3;
2278 break;
2279 case AMDGPU_UCODE_ID_SDMA4:
2280 *type = GFX_FW_TYPE_SDMA4;
2281 break;
2282 case AMDGPU_UCODE_ID_SDMA5:
2283 *type = GFX_FW_TYPE_SDMA5;
2284 break;
2285 case AMDGPU_UCODE_ID_SDMA6:
2286 *type = GFX_FW_TYPE_SDMA6;
2287 break;
2288 case AMDGPU_UCODE_ID_SDMA7:
2289 *type = GFX_FW_TYPE_SDMA7;
2290 break;
2291 case AMDGPU_UCODE_ID_CP_MES:
2292 *type = GFX_FW_TYPE_CP_MES;
2293 break;
2294 case AMDGPU_UCODE_ID_CP_MES_DATA:
2295 *type = GFX_FW_TYPE_MES_STACK;
2296 break;
2297 case AMDGPU_UCODE_ID_CP_MES1:
2298 *type = GFX_FW_TYPE_CP_MES_KIQ;
2299 break;
2300 case AMDGPU_UCODE_ID_CP_MES1_DATA:
2301 *type = GFX_FW_TYPE_MES_KIQ_STACK;
2302 break;
2303 case AMDGPU_UCODE_ID_CP_CE:
2304 *type = GFX_FW_TYPE_CP_CE;
2305 break;
2306 case AMDGPU_UCODE_ID_CP_PFP:
2307 *type = GFX_FW_TYPE_CP_PFP;
2308 break;
2309 case AMDGPU_UCODE_ID_CP_ME:
2310 *type = GFX_FW_TYPE_CP_ME;
2311 break;
2312 case AMDGPU_UCODE_ID_CP_MEC1:
2313 *type = GFX_FW_TYPE_CP_MEC;
2314 break;
2315 case AMDGPU_UCODE_ID_CP_MEC1_JT:
2316 *type = GFX_FW_TYPE_CP_MEC_ME1;
2317 break;
2318 case AMDGPU_UCODE_ID_CP_MEC2:
2319 *type = GFX_FW_TYPE_CP_MEC;
2320 break;
2321 case AMDGPU_UCODE_ID_CP_MEC2_JT:
2322 *type = GFX_FW_TYPE_CP_MEC_ME2;
2323 break;
2324 case AMDGPU_UCODE_ID_RLC_P:
2325 *type = GFX_FW_TYPE_RLC_P;
2326 break;
2327 case AMDGPU_UCODE_ID_RLC_V:
2328 *type = GFX_FW_TYPE_RLC_V;
2329 break;
2330 case AMDGPU_UCODE_ID_RLC_G:
2331 *type = GFX_FW_TYPE_RLC_G;
2332 break;
2333 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL:
2334 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_CNTL;
2335 break;
2336 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM:
2337 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_GPM_MEM;
2338 break;
2339 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM:
2340 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_MEM;
2341 break;
2342 case AMDGPU_UCODE_ID_RLC_IRAM:
2343 *type = GFX_FW_TYPE_RLC_IRAM;
2344 break;
2345 case AMDGPU_UCODE_ID_RLC_DRAM:
2346 *type = GFX_FW_TYPE_RLC_DRAM_BOOT;
2347 break;
2348 case AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS:
2349 *type = GFX_FW_TYPE_GLOBAL_TAP_DELAYS;
2350 break;
2351 case AMDGPU_UCODE_ID_SE0_TAP_DELAYS:
2352 *type = GFX_FW_TYPE_SE0_TAP_DELAYS;
2353 break;
2354 case AMDGPU_UCODE_ID_SE1_TAP_DELAYS:
2355 *type = GFX_FW_TYPE_SE1_TAP_DELAYS;
2356 break;
2357 case AMDGPU_UCODE_ID_SE2_TAP_DELAYS:
2358 *type = GFX_FW_TYPE_SE2_TAP_DELAYS;
2359 break;
2360 case AMDGPU_UCODE_ID_SE3_TAP_DELAYS:
2361 *type = GFX_FW_TYPE_SE3_TAP_DELAYS;
2362 break;
2363 case AMDGPU_UCODE_ID_SMC:
2364 *type = GFX_FW_TYPE_SMU;
2365 break;
2366 case AMDGPU_UCODE_ID_PPTABLE:
2367 *type = GFX_FW_TYPE_PPTABLE;
2368 break;
2369 case AMDGPU_UCODE_ID_UVD:
2370 *type = GFX_FW_TYPE_UVD;
2371 break;
2372 case AMDGPU_UCODE_ID_UVD1:
2373 *type = GFX_FW_TYPE_UVD1;
2374 break;
2375 case AMDGPU_UCODE_ID_VCE:
2376 *type = GFX_FW_TYPE_VCE;
2377 break;
2378 case AMDGPU_UCODE_ID_VCN:
2379 *type = GFX_FW_TYPE_VCN;
2380 break;
2381 case AMDGPU_UCODE_ID_VCN1:
2382 *type = GFX_FW_TYPE_VCN1;
2383 break;
2384 case AMDGPU_UCODE_ID_DMCU_ERAM:
2385 *type = GFX_FW_TYPE_DMCU_ERAM;
2386 break;
2387 case AMDGPU_UCODE_ID_DMCU_INTV:
2388 *type = GFX_FW_TYPE_DMCU_ISR;
2389 break;
2390 case AMDGPU_UCODE_ID_VCN0_RAM:
2391 *type = GFX_FW_TYPE_VCN0_RAM;
2392 break;
2393 case AMDGPU_UCODE_ID_VCN1_RAM:
2394 *type = GFX_FW_TYPE_VCN1_RAM;
2395 break;
2396 case AMDGPU_UCODE_ID_DMCUB:
2397 *type = GFX_FW_TYPE_DMUB;
2398 break;
2399 case AMDGPU_UCODE_ID_SDMA_UCODE_TH0:
2400 *type = GFX_FW_TYPE_SDMA_UCODE_TH0;
2401 break;
2402 case AMDGPU_UCODE_ID_SDMA_UCODE_TH1:
2403 *type = GFX_FW_TYPE_SDMA_UCODE_TH1;
2404 break;
2405 case AMDGPU_UCODE_ID_IMU_I:
2406 *type = GFX_FW_TYPE_IMU_I;
2407 break;
2408 case AMDGPU_UCODE_ID_IMU_D:
2409 *type = GFX_FW_TYPE_IMU_D;
2410 break;
2411 case AMDGPU_UCODE_ID_CP_RS64_PFP:
2412 *type = GFX_FW_TYPE_RS64_PFP;
2413 break;
2414 case AMDGPU_UCODE_ID_CP_RS64_ME:
2415 *type = GFX_FW_TYPE_RS64_ME;
2416 break;
2417 case AMDGPU_UCODE_ID_CP_RS64_MEC:
2418 *type = GFX_FW_TYPE_RS64_MEC;
2419 break;
2420 case AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK:
2421 *type = GFX_FW_TYPE_RS64_PFP_P0_STACK;
2422 break;
2423 case AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK:
2424 *type = GFX_FW_TYPE_RS64_PFP_P1_STACK;
2425 break;
2426 case AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK:
2427 *type = GFX_FW_TYPE_RS64_ME_P0_STACK;
2428 break;
2429 case AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK:
2430 *type = GFX_FW_TYPE_RS64_ME_P1_STACK;
2431 break;
2432 case AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK:
2433 *type = GFX_FW_TYPE_RS64_MEC_P0_STACK;
2434 break;
2435 case AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK:
2436 *type = GFX_FW_TYPE_RS64_MEC_P1_STACK;
2437 break;
2438 case AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK:
2439 *type = GFX_FW_TYPE_RS64_MEC_P2_STACK;
2440 break;
2441 case AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK:
2442 *type = GFX_FW_TYPE_RS64_MEC_P3_STACK;
2443 break;
2444 case AMDGPU_UCODE_ID_VPE_CTX:
2445 *type = GFX_FW_TYPE_VPEC_FW1;
2446 break;
2447 case AMDGPU_UCODE_ID_VPE_CTL:
2448 *type = GFX_FW_TYPE_VPEC_FW2;
2449 break;
2450 case AMDGPU_UCODE_ID_VPE:
2451 *type = GFX_FW_TYPE_VPE;
2452 break;
2453 case AMDGPU_UCODE_ID_UMSCH_MM_UCODE:
2454 *type = GFX_FW_TYPE_UMSCH_UCODE;
2455 break;
2456 case AMDGPU_UCODE_ID_UMSCH_MM_DATA:
2457 *type = GFX_FW_TYPE_UMSCH_DATA;
2458 break;
2459 case AMDGPU_UCODE_ID_UMSCH_MM_CMD_BUFFER:
2460 *type = GFX_FW_TYPE_UMSCH_CMD_BUFFER;
2461 break;
2462 case AMDGPU_UCODE_ID_P2S_TABLE:
2463 *type = GFX_FW_TYPE_P2S_TABLE;
2464 break;
2465 case AMDGPU_UCODE_ID_MAXIMUM:
2466 default:
2467 return -EINVAL;
2468 }
2469
2470 return 0;
2471}
2472
2473static void psp_print_fw_hdr(struct psp_context *psp,
2474 struct amdgpu_firmware_info *ucode)
2475{
2476 struct amdgpu_device *adev = psp->adev;
2477 struct common_firmware_header *hdr;
2478
2479 switch (ucode->ucode_id) {
2480 case AMDGPU_UCODE_ID_SDMA0:
2481 case AMDGPU_UCODE_ID_SDMA1:
2482 case AMDGPU_UCODE_ID_SDMA2:
2483 case AMDGPU_UCODE_ID_SDMA3:
2484 case AMDGPU_UCODE_ID_SDMA4:
2485 case AMDGPU_UCODE_ID_SDMA5:
2486 case AMDGPU_UCODE_ID_SDMA6:
2487 case AMDGPU_UCODE_ID_SDMA7:
2488 hdr = (struct common_firmware_header *)
2489 adev->sdma.instance[ucode->ucode_id - AMDGPU_UCODE_ID_SDMA0].fw->data;
2490 amdgpu_ucode_print_sdma_hdr(hdr);
2491 break;
2492 case AMDGPU_UCODE_ID_CP_CE:
2493 hdr = (struct common_firmware_header *)adev->gfx.ce_fw->data;
2494 amdgpu_ucode_print_gfx_hdr(hdr);
2495 break;
2496 case AMDGPU_UCODE_ID_CP_PFP:
2497 hdr = (struct common_firmware_header *)adev->gfx.pfp_fw->data;
2498 amdgpu_ucode_print_gfx_hdr(hdr);
2499 break;
2500 case AMDGPU_UCODE_ID_CP_ME:
2501 hdr = (struct common_firmware_header *)adev->gfx.me_fw->data;
2502 amdgpu_ucode_print_gfx_hdr(hdr);
2503 break;
2504 case AMDGPU_UCODE_ID_CP_MEC1:
2505 hdr = (struct common_firmware_header *)adev->gfx.mec_fw->data;
2506 amdgpu_ucode_print_gfx_hdr(hdr);
2507 break;
2508 case AMDGPU_UCODE_ID_RLC_G:
2509 hdr = (struct common_firmware_header *)adev->gfx.rlc_fw->data;
2510 amdgpu_ucode_print_rlc_hdr(hdr);
2511 break;
2512 case AMDGPU_UCODE_ID_SMC:
2513 hdr = (struct common_firmware_header *)adev->pm.fw->data;
2514 amdgpu_ucode_print_smc_hdr(hdr);
2515 break;
2516 default:
2517 break;
2518 }
2519}
2520
2521static int psp_prep_load_ip_fw_cmd_buf(struct amdgpu_firmware_info *ucode,
2522 struct psp_gfx_cmd_resp *cmd)
2523{
2524 int ret;
2525 uint64_t fw_mem_mc_addr = ucode->mc_addr;
2526
2527 cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW;
2528 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(fw_mem_mc_addr);
2529 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(fw_mem_mc_addr);
2530 cmd->cmd.cmd_load_ip_fw.fw_size = ucode->ucode_size;
2531
2532 ret = psp_get_fw_type(ucode, &cmd->cmd.cmd_load_ip_fw.fw_type);
2533 if (ret)
2534 DRM_ERROR("Unknown firmware type\n");
2535
2536 return ret;
2537}
2538
2539int psp_execute_ip_fw_load(struct psp_context *psp,
2540 struct amdgpu_firmware_info *ucode)
2541{
2542 int ret = 0;
2543 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
2544
2545 ret = psp_prep_load_ip_fw_cmd_buf(ucode, cmd);
2546 if (!ret) {
2547 ret = psp_cmd_submit_buf(psp, ucode, cmd,
2548 psp->fence_buf_mc_addr);
2549 }
2550
2551 release_psp_cmd_buf(psp);
2552
2553 return ret;
2554}
2555
2556static int psp_load_p2s_table(struct psp_context *psp)
2557{
2558 int ret;
2559 struct amdgpu_device *adev = psp->adev;
2560 struct amdgpu_firmware_info *ucode =
2561 &adev->firmware.ucode[AMDGPU_UCODE_ID_P2S_TABLE];
2562
2563 if (adev->in_runpm && (adev->pm.rpm_mode == AMDGPU_RUNPM_BACO))
2564 return 0;
2565
2566 if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6)) {
2567 uint32_t supp_vers = adev->flags & AMD_IS_APU ? 0x0036013D :
2568 0x0036003C;
2569 if (psp->sos.fw_version < supp_vers)
2570 return 0;
2571 }
2572
2573 if (!ucode->fw || amdgpu_sriov_vf(psp->adev))
2574 return 0;
2575
2576 ret = psp_execute_ip_fw_load(psp, ucode);
2577
2578 return ret;
2579}
2580
2581static int psp_load_smu_fw(struct psp_context *psp)
2582{
2583 int ret;
2584 struct amdgpu_device *adev = psp->adev;
2585 struct amdgpu_firmware_info *ucode =
2586 &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC];
2587 struct amdgpu_ras *ras = psp->ras_context.ras;
2588
2589 /*
2590 * Skip SMU FW reloading in case of using BACO for runpm only,
2591 * as SMU is always alive.
2592 */
2593 if (adev->in_runpm && (adev->pm.rpm_mode == AMDGPU_RUNPM_BACO))
2594 return 0;
2595
2596 if (!ucode->fw || amdgpu_sriov_vf(psp->adev))
2597 return 0;
2598
2599 if ((amdgpu_in_reset(adev) && ras && adev->ras_enabled &&
2600 (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 4) ||
2601 amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 2)))) {
2602 ret = amdgpu_dpm_set_mp1_state(adev, PP_MP1_STATE_UNLOAD);
2603 if (ret)
2604 DRM_WARN("Failed to set MP1 state prepare for reload\n");
2605 }
2606
2607 ret = psp_execute_ip_fw_load(psp, ucode);
2608
2609 if (ret)
2610 DRM_ERROR("PSP load smu failed!\n");
2611
2612 return ret;
2613}
2614
2615static bool fw_load_skip_check(struct psp_context *psp,
2616 struct amdgpu_firmware_info *ucode)
2617{
2618 if (!ucode->fw || !ucode->ucode_size)
2619 return true;
2620
2621 if (ucode->ucode_id == AMDGPU_UCODE_ID_P2S_TABLE)
2622 return true;
2623
2624 if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC &&
2625 (psp_smu_reload_quirk(psp) ||
2626 psp->autoload_supported ||
2627 psp->pmfw_centralized_cstate_management))
2628 return true;
2629
2630 if (amdgpu_sriov_vf(psp->adev) &&
2631 amdgpu_virt_fw_load_skip_check(psp->adev, ucode->ucode_id))
2632 return true;
2633
2634 if (psp->autoload_supported &&
2635 (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC1_JT ||
2636 ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT))
2637 /* skip mec JT when autoload is enabled */
2638 return true;
2639
2640 return false;
2641}
2642
2643int psp_load_fw_list(struct psp_context *psp,
2644 struct amdgpu_firmware_info **ucode_list, int ucode_count)
2645{
2646 int ret = 0, i;
2647 struct amdgpu_firmware_info *ucode;
2648
2649 for (i = 0; i < ucode_count; ++i) {
2650 ucode = ucode_list[i];
2651 psp_print_fw_hdr(psp, ucode);
2652 ret = psp_execute_ip_fw_load(psp, ucode);
2653 if (ret)
2654 return ret;
2655 }
2656 return ret;
2657}
2658
2659static int psp_load_non_psp_fw(struct psp_context *psp)
2660{
2661 int i, ret;
2662 struct amdgpu_firmware_info *ucode;
2663 struct amdgpu_device *adev = psp->adev;
2664
2665 if (psp->autoload_supported &&
2666 !psp->pmfw_centralized_cstate_management) {
2667 ret = psp_load_smu_fw(psp);
2668 if (ret)
2669 return ret;
2670 }
2671
2672 /* Load P2S table first if it's available */
2673 psp_load_p2s_table(psp);
2674
2675 for (i = 0; i < adev->firmware.max_ucodes; i++) {
2676 ucode = &adev->firmware.ucode[i];
2677
2678 if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC &&
2679 !fw_load_skip_check(psp, ucode)) {
2680 ret = psp_load_smu_fw(psp);
2681 if (ret)
2682 return ret;
2683 continue;
2684 }
2685
2686 if (fw_load_skip_check(psp, ucode))
2687 continue;
2688
2689 if (psp->autoload_supported &&
2690 (amdgpu_ip_version(adev, MP0_HWIP, 0) ==
2691 IP_VERSION(11, 0, 7) ||
2692 amdgpu_ip_version(adev, MP0_HWIP, 0) ==
2693 IP_VERSION(11, 0, 11) ||
2694 amdgpu_ip_version(adev, MP0_HWIP, 0) ==
2695 IP_VERSION(11, 0, 12)) &&
2696 (ucode->ucode_id == AMDGPU_UCODE_ID_SDMA1 ||
2697 ucode->ucode_id == AMDGPU_UCODE_ID_SDMA2 ||
2698 ucode->ucode_id == AMDGPU_UCODE_ID_SDMA3))
2699 /* PSP only receive one SDMA fw for sienna_cichlid,
2700 * as all four sdma fw are same
2701 */
2702 continue;
2703
2704 psp_print_fw_hdr(psp, ucode);
2705
2706 ret = psp_execute_ip_fw_load(psp, ucode);
2707 if (ret)
2708 return ret;
2709
2710 /* Start rlc autoload after psp recieved all the gfx firmware */
2711 if (psp->autoload_supported && ucode->ucode_id == (amdgpu_sriov_vf(adev) ?
2712 adev->virt.autoload_ucode_id : AMDGPU_UCODE_ID_RLC_G)) {
2713 ret = psp_rlc_autoload_start(psp);
2714 if (ret) {
2715 DRM_ERROR("Failed to start rlc autoload\n");
2716 return ret;
2717 }
2718 }
2719 }
2720
2721 return 0;
2722}
2723
2724static int psp_load_fw(struct amdgpu_device *adev)
2725{
2726 int ret;
2727 struct psp_context *psp = &adev->psp;
2728
2729 if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) {
2730 /* should not destroy ring, only stop */
2731 psp_ring_stop(psp, PSP_RING_TYPE__KM);
2732 } else {
2733 memset(psp->fence_buf, 0, PSP_FENCE_BUFFER_SIZE);
2734
2735 ret = psp_ring_init(psp, PSP_RING_TYPE__KM);
2736 if (ret) {
2737 DRM_ERROR("PSP ring init failed!\n");
2738 goto failed;
2739 }
2740 }
2741
2742 ret = psp_hw_start(psp);
2743 if (ret)
2744 goto failed;
2745
2746 ret = psp_load_non_psp_fw(psp);
2747 if (ret)
2748 goto failed1;
2749
2750 ret = psp_asd_initialize(psp);
2751 if (ret) {
2752 DRM_ERROR("PSP load asd failed!\n");
2753 goto failed1;
2754 }
2755
2756 ret = psp_rl_load(adev);
2757 if (ret) {
2758 DRM_ERROR("PSP load RL failed!\n");
2759 goto failed1;
2760 }
2761
2762 if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) {
2763 if (adev->gmc.xgmi.num_physical_nodes > 1) {
2764 ret = psp_xgmi_initialize(psp, false, true);
2765 /* Warning the XGMI seesion initialize failure
2766 * Instead of stop driver initialization
2767 */
2768 if (ret)
2769 dev_err(psp->adev->dev,
2770 "XGMI: Failed to initialize XGMI session\n");
2771 }
2772 }
2773
2774 if (psp->ta_fw) {
2775 ret = psp_ras_initialize(psp);
2776 if (ret)
2777 dev_err(psp->adev->dev,
2778 "RAS: Failed to initialize RAS\n");
2779
2780 ret = psp_hdcp_initialize(psp);
2781 if (ret)
2782 dev_err(psp->adev->dev,
2783 "HDCP: Failed to initialize HDCP\n");
2784
2785 ret = psp_dtm_initialize(psp);
2786 if (ret)
2787 dev_err(psp->adev->dev,
2788 "DTM: Failed to initialize DTM\n");
2789
2790 ret = psp_rap_initialize(psp);
2791 if (ret)
2792 dev_err(psp->adev->dev,
2793 "RAP: Failed to initialize RAP\n");
2794
2795 ret = psp_securedisplay_initialize(psp);
2796 if (ret)
2797 dev_err(psp->adev->dev,
2798 "SECUREDISPLAY: Failed to initialize SECUREDISPLAY\n");
2799 }
2800
2801 return 0;
2802
2803failed1:
2804 psp_free_shared_bufs(psp);
2805failed:
2806 /*
2807 * all cleanup jobs (xgmi terminate, ras terminate,
2808 * ring destroy, cmd/fence/fw buffers destory,
2809 * psp->cmd destory) are delayed to psp_hw_fini
2810 */
2811 psp_ring_destroy(psp, PSP_RING_TYPE__KM);
2812 return ret;
2813}
2814
2815static int psp_hw_init(void *handle)
2816{
2817 int ret;
2818 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2819
2820 mutex_lock(&adev->firmware.mutex);
2821 /*
2822 * This sequence is just used on hw_init only once, no need on
2823 * resume.
2824 */
2825 ret = amdgpu_ucode_init_bo(adev);
2826 if (ret)
2827 goto failed;
2828
2829 ret = psp_load_fw(adev);
2830 if (ret) {
2831 DRM_ERROR("PSP firmware loading failed\n");
2832 goto failed;
2833 }
2834
2835 mutex_unlock(&adev->firmware.mutex);
2836 return 0;
2837
2838failed:
2839 adev->firmware.load_type = AMDGPU_FW_LOAD_DIRECT;
2840 mutex_unlock(&adev->firmware.mutex);
2841 return -EINVAL;
2842}
2843
2844static int psp_hw_fini(void *handle)
2845{
2846 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2847 struct psp_context *psp = &adev->psp;
2848
2849 if (psp->ta_fw) {
2850 psp_ras_terminate(psp);
2851 psp_securedisplay_terminate(psp);
2852 psp_rap_terminate(psp);
2853 psp_dtm_terminate(psp);
2854 psp_hdcp_terminate(psp);
2855
2856 if (adev->gmc.xgmi.num_physical_nodes > 1)
2857 psp_xgmi_terminate(psp);
2858 }
2859
2860 psp_asd_terminate(psp);
2861 psp_tmr_terminate(psp);
2862
2863 psp_ring_destroy(psp, PSP_RING_TYPE__KM);
2864
2865 return 0;
2866}
2867
2868static int psp_suspend(void *handle)
2869{
2870 int ret = 0;
2871 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2872 struct psp_context *psp = &adev->psp;
2873
2874 if (adev->gmc.xgmi.num_physical_nodes > 1 &&
2875 psp->xgmi_context.context.initialized) {
2876 ret = psp_xgmi_terminate(psp);
2877 if (ret) {
2878 DRM_ERROR("Failed to terminate xgmi ta\n");
2879 goto out;
2880 }
2881 }
2882
2883 if (psp->ta_fw) {
2884 ret = psp_ras_terminate(psp);
2885 if (ret) {
2886 DRM_ERROR("Failed to terminate ras ta\n");
2887 goto out;
2888 }
2889 ret = psp_hdcp_terminate(psp);
2890 if (ret) {
2891 DRM_ERROR("Failed to terminate hdcp ta\n");
2892 goto out;
2893 }
2894 ret = psp_dtm_terminate(psp);
2895 if (ret) {
2896 DRM_ERROR("Failed to terminate dtm ta\n");
2897 goto out;
2898 }
2899 ret = psp_rap_terminate(psp);
2900 if (ret) {
2901 DRM_ERROR("Failed to terminate rap ta\n");
2902 goto out;
2903 }
2904 ret = psp_securedisplay_terminate(psp);
2905 if (ret) {
2906 DRM_ERROR("Failed to terminate securedisplay ta\n");
2907 goto out;
2908 }
2909 }
2910
2911 ret = psp_asd_terminate(psp);
2912 if (ret) {
2913 DRM_ERROR("Failed to terminate asd\n");
2914 goto out;
2915 }
2916
2917 ret = psp_tmr_terminate(psp);
2918 if (ret) {
2919 DRM_ERROR("Failed to terminate tmr\n");
2920 goto out;
2921 }
2922
2923 ret = psp_ring_stop(psp, PSP_RING_TYPE__KM);
2924 if (ret)
2925 DRM_ERROR("PSP ring stop failed\n");
2926
2927out:
2928 return ret;
2929}
2930
2931static int psp_resume(void *handle)
2932{
2933 int ret;
2934 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2935 struct psp_context *psp = &adev->psp;
2936
2937 DRM_INFO("PSP is resuming...\n");
2938
2939 if (psp->mem_train_ctx.enable_mem_training) {
2940 ret = psp_mem_training(psp, PSP_MEM_TRAIN_RESUME);
2941 if (ret) {
2942 DRM_ERROR("Failed to process memory training!\n");
2943 return ret;
2944 }
2945 }
2946
2947 mutex_lock(&adev->firmware.mutex);
2948
2949 ret = psp_hw_start(psp);
2950 if (ret)
2951 goto failed;
2952
2953 ret = psp_load_non_psp_fw(psp);
2954 if (ret)
2955 goto failed;
2956
2957 ret = psp_asd_initialize(psp);
2958 if (ret) {
2959 DRM_ERROR("PSP load asd failed!\n");
2960 goto failed;
2961 }
2962
2963 ret = psp_rl_load(adev);
2964 if (ret) {
2965 dev_err(adev->dev, "PSP load RL failed!\n");
2966 goto failed;
2967 }
2968
2969 if (adev->gmc.xgmi.num_physical_nodes > 1) {
2970 ret = psp_xgmi_initialize(psp, false, true);
2971 /* Warning the XGMI seesion initialize failure
2972 * Instead of stop driver initialization
2973 */
2974 if (ret)
2975 dev_err(psp->adev->dev,
2976 "XGMI: Failed to initialize XGMI session\n");
2977 }
2978
2979 if (psp->ta_fw) {
2980 ret = psp_ras_initialize(psp);
2981 if (ret)
2982 dev_err(psp->adev->dev,
2983 "RAS: Failed to initialize RAS\n");
2984
2985 ret = psp_hdcp_initialize(psp);
2986 if (ret)
2987 dev_err(psp->adev->dev,
2988 "HDCP: Failed to initialize HDCP\n");
2989
2990 ret = psp_dtm_initialize(psp);
2991 if (ret)
2992 dev_err(psp->adev->dev,
2993 "DTM: Failed to initialize DTM\n");
2994
2995 ret = psp_rap_initialize(psp);
2996 if (ret)
2997 dev_err(psp->adev->dev,
2998 "RAP: Failed to initialize RAP\n");
2999
3000 ret = psp_securedisplay_initialize(psp);
3001 if (ret)
3002 dev_err(psp->adev->dev,
3003 "SECUREDISPLAY: Failed to initialize SECUREDISPLAY\n");
3004 }
3005
3006 mutex_unlock(&adev->firmware.mutex);
3007
3008 return 0;
3009
3010failed:
3011 DRM_ERROR("PSP resume failed\n");
3012 mutex_unlock(&adev->firmware.mutex);
3013 return ret;
3014}
3015
3016int psp_gpu_reset(struct amdgpu_device *adev)
3017{
3018 int ret;
3019
3020 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
3021 return 0;
3022
3023 mutex_lock(&adev->psp.mutex);
3024 ret = psp_mode1_reset(&adev->psp);
3025 mutex_unlock(&adev->psp.mutex);
3026
3027 return ret;
3028}
3029
3030int psp_rlc_autoload_start(struct psp_context *psp)
3031{
3032 int ret;
3033 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
3034
3035 cmd->cmd_id = GFX_CMD_ID_AUTOLOAD_RLC;
3036
3037 ret = psp_cmd_submit_buf(psp, NULL, cmd,
3038 psp->fence_buf_mc_addr);
3039
3040 release_psp_cmd_buf(psp);
3041
3042 return ret;
3043}
3044
3045int psp_ring_cmd_submit(struct psp_context *psp,
3046 uint64_t cmd_buf_mc_addr,
3047 uint64_t fence_mc_addr,
3048 int index)
3049{
3050 unsigned int psp_write_ptr_reg = 0;
3051 struct psp_gfx_rb_frame *write_frame;
3052 struct psp_ring *ring = &psp->km_ring;
3053 struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem;
3054 struct psp_gfx_rb_frame *ring_buffer_end = ring_buffer_start +
3055 ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1;
3056 struct amdgpu_device *adev = psp->adev;
3057 uint32_t ring_size_dw = ring->ring_size / 4;
3058 uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4;
3059
3060 /* KM (GPCOM) prepare write pointer */
3061 psp_write_ptr_reg = psp_ring_get_wptr(psp);
3062
3063 /* Update KM RB frame pointer to new frame */
3064 /* write_frame ptr increments by size of rb_frame in bytes */
3065 /* psp_write_ptr_reg increments by size of rb_frame in DWORDs */
3066 if ((psp_write_ptr_reg % ring_size_dw) == 0)
3067 write_frame = ring_buffer_start;
3068 else
3069 write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw);
3070 /* Check invalid write_frame ptr address */
3071 if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) {
3072 DRM_ERROR("ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n",
3073 ring_buffer_start, ring_buffer_end, write_frame);
3074 DRM_ERROR("write_frame is pointing to address out of bounds\n");
3075 return -EINVAL;
3076 }
3077
3078 /* Initialize KM RB frame */
3079 memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame));
3080
3081 /* Update KM RB frame */
3082 write_frame->cmd_buf_addr_hi = upper_32_bits(cmd_buf_mc_addr);
3083 write_frame->cmd_buf_addr_lo = lower_32_bits(cmd_buf_mc_addr);
3084 write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr);
3085 write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr);
3086 write_frame->fence_value = index;
3087 amdgpu_device_flush_hdp(adev, NULL);
3088
3089 /* Update the write Pointer in DWORDs */
3090 psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw;
3091 psp_ring_set_wptr(psp, psp_write_ptr_reg);
3092 return 0;
3093}
3094
3095int psp_init_asd_microcode(struct psp_context *psp, const char *chip_name)
3096{
3097 struct amdgpu_device *adev = psp->adev;
3098 char fw_name[PSP_FW_NAME_LEN];
3099 const struct psp_firmware_header_v1_0 *asd_hdr;
3100 int err = 0;
3101
3102 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_asd.bin", chip_name);
3103 err = amdgpu_ucode_request(adev, &adev->psp.asd_fw, fw_name);
3104 if (err)
3105 goto out;
3106
3107 asd_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data;
3108 adev->psp.asd_context.bin_desc.fw_version = le32_to_cpu(asd_hdr->header.ucode_version);
3109 adev->psp.asd_context.bin_desc.feature_version = le32_to_cpu(asd_hdr->sos.fw_version);
3110 adev->psp.asd_context.bin_desc.size_bytes = le32_to_cpu(asd_hdr->header.ucode_size_bytes);
3111 adev->psp.asd_context.bin_desc.start_addr = (uint8_t *)asd_hdr +
3112 le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes);
3113 return 0;
3114out:
3115 amdgpu_ucode_release(&adev->psp.asd_fw);
3116 return err;
3117}
3118
3119int psp_init_toc_microcode(struct psp_context *psp, const char *chip_name)
3120{
3121 struct amdgpu_device *adev = psp->adev;
3122 char fw_name[PSP_FW_NAME_LEN];
3123 const struct psp_firmware_header_v1_0 *toc_hdr;
3124 int err = 0;
3125
3126 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_toc.bin", chip_name);
3127 err = amdgpu_ucode_request(adev, &adev->psp.toc_fw, fw_name);
3128 if (err)
3129 goto out;
3130
3131 toc_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.toc_fw->data;
3132 adev->psp.toc.fw_version = le32_to_cpu(toc_hdr->header.ucode_version);
3133 adev->psp.toc.feature_version = le32_to_cpu(toc_hdr->sos.fw_version);
3134 adev->psp.toc.size_bytes = le32_to_cpu(toc_hdr->header.ucode_size_bytes);
3135 adev->psp.toc.start_addr = (uint8_t *)toc_hdr +
3136 le32_to_cpu(toc_hdr->header.ucode_array_offset_bytes);
3137 return 0;
3138out:
3139 amdgpu_ucode_release(&adev->psp.toc_fw);
3140 return err;
3141}
3142
3143static int parse_sos_bin_descriptor(struct psp_context *psp,
3144 const struct psp_fw_bin_desc *desc,
3145 const struct psp_firmware_header_v2_0 *sos_hdr)
3146{
3147 uint8_t *ucode_start_addr = NULL;
3148
3149 if (!psp || !desc || !sos_hdr)
3150 return -EINVAL;
3151
3152 ucode_start_addr = (uint8_t *)sos_hdr +
3153 le32_to_cpu(desc->offset_bytes) +
3154 le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
3155
3156 switch (desc->fw_type) {
3157 case PSP_FW_TYPE_PSP_SOS:
3158 psp->sos.fw_version = le32_to_cpu(desc->fw_version);
3159 psp->sos.feature_version = le32_to_cpu(desc->fw_version);
3160 psp->sos.size_bytes = le32_to_cpu(desc->size_bytes);
3161 psp->sos.start_addr = ucode_start_addr;
3162 break;
3163 case PSP_FW_TYPE_PSP_SYS_DRV:
3164 psp->sys.fw_version = le32_to_cpu(desc->fw_version);
3165 psp->sys.feature_version = le32_to_cpu(desc->fw_version);
3166 psp->sys.size_bytes = le32_to_cpu(desc->size_bytes);
3167 psp->sys.start_addr = ucode_start_addr;
3168 break;
3169 case PSP_FW_TYPE_PSP_KDB:
3170 psp->kdb.fw_version = le32_to_cpu(desc->fw_version);
3171 psp->kdb.feature_version = le32_to_cpu(desc->fw_version);
3172 psp->kdb.size_bytes = le32_to_cpu(desc->size_bytes);
3173 psp->kdb.start_addr = ucode_start_addr;
3174 break;
3175 case PSP_FW_TYPE_PSP_TOC:
3176 psp->toc.fw_version = le32_to_cpu(desc->fw_version);
3177 psp->toc.feature_version = le32_to_cpu(desc->fw_version);
3178 psp->toc.size_bytes = le32_to_cpu(desc->size_bytes);
3179 psp->toc.start_addr = ucode_start_addr;
3180 break;
3181 case PSP_FW_TYPE_PSP_SPL:
3182 psp->spl.fw_version = le32_to_cpu(desc->fw_version);
3183 psp->spl.feature_version = le32_to_cpu(desc->fw_version);
3184 psp->spl.size_bytes = le32_to_cpu(desc->size_bytes);
3185 psp->spl.start_addr = ucode_start_addr;
3186 break;
3187 case PSP_FW_TYPE_PSP_RL:
3188 psp->rl.fw_version = le32_to_cpu(desc->fw_version);
3189 psp->rl.feature_version = le32_to_cpu(desc->fw_version);
3190 psp->rl.size_bytes = le32_to_cpu(desc->size_bytes);
3191 psp->rl.start_addr = ucode_start_addr;
3192 break;
3193 case PSP_FW_TYPE_PSP_SOC_DRV:
3194 psp->soc_drv.fw_version = le32_to_cpu(desc->fw_version);
3195 psp->soc_drv.feature_version = le32_to_cpu(desc->fw_version);
3196 psp->soc_drv.size_bytes = le32_to_cpu(desc->size_bytes);
3197 psp->soc_drv.start_addr = ucode_start_addr;
3198 break;
3199 case PSP_FW_TYPE_PSP_INTF_DRV:
3200 psp->intf_drv.fw_version = le32_to_cpu(desc->fw_version);
3201 psp->intf_drv.feature_version = le32_to_cpu(desc->fw_version);
3202 psp->intf_drv.size_bytes = le32_to_cpu(desc->size_bytes);
3203 psp->intf_drv.start_addr = ucode_start_addr;
3204 break;
3205 case PSP_FW_TYPE_PSP_DBG_DRV:
3206 psp->dbg_drv.fw_version = le32_to_cpu(desc->fw_version);
3207 psp->dbg_drv.feature_version = le32_to_cpu(desc->fw_version);
3208 psp->dbg_drv.size_bytes = le32_to_cpu(desc->size_bytes);
3209 psp->dbg_drv.start_addr = ucode_start_addr;
3210 break;
3211 case PSP_FW_TYPE_PSP_RAS_DRV:
3212 psp->ras_drv.fw_version = le32_to_cpu(desc->fw_version);
3213 psp->ras_drv.feature_version = le32_to_cpu(desc->fw_version);
3214 psp->ras_drv.size_bytes = le32_to_cpu(desc->size_bytes);
3215 psp->ras_drv.start_addr = ucode_start_addr;
3216 break;
3217 default:
3218 dev_warn(psp->adev->dev, "Unsupported PSP FW type: %d\n", desc->fw_type);
3219 break;
3220 }
3221
3222 return 0;
3223}
3224
3225static int psp_init_sos_base_fw(struct amdgpu_device *adev)
3226{
3227 const struct psp_firmware_header_v1_0 *sos_hdr;
3228 const struct psp_firmware_header_v1_3 *sos_hdr_v1_3;
3229 uint8_t *ucode_array_start_addr;
3230
3231 sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data;
3232 ucode_array_start_addr = (uint8_t *)sos_hdr +
3233 le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
3234
3235 if (adev->gmc.xgmi.connected_to_cpu ||
3236 (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 2))) {
3237 adev->psp.sos.fw_version = le32_to_cpu(sos_hdr->header.ucode_version);
3238 adev->psp.sos.feature_version = le32_to_cpu(sos_hdr->sos.fw_version);
3239
3240 adev->psp.sys.size_bytes = le32_to_cpu(sos_hdr->sos.offset_bytes);
3241 adev->psp.sys.start_addr = ucode_array_start_addr;
3242
3243 adev->psp.sos.size_bytes = le32_to_cpu(sos_hdr->sos.size_bytes);
3244 adev->psp.sos.start_addr = ucode_array_start_addr +
3245 le32_to_cpu(sos_hdr->sos.offset_bytes);
3246 } else {
3247 /* Load alternate PSP SOS FW */
3248 sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data;
3249
3250 adev->psp.sos.fw_version = le32_to_cpu(sos_hdr_v1_3->sos_aux.fw_version);
3251 adev->psp.sos.feature_version = le32_to_cpu(sos_hdr_v1_3->sos_aux.fw_version);
3252
3253 adev->psp.sys.size_bytes = le32_to_cpu(sos_hdr_v1_3->sys_drv_aux.size_bytes);
3254 adev->psp.sys.start_addr = ucode_array_start_addr +
3255 le32_to_cpu(sos_hdr_v1_3->sys_drv_aux.offset_bytes);
3256
3257 adev->psp.sos.size_bytes = le32_to_cpu(sos_hdr_v1_3->sos_aux.size_bytes);
3258 adev->psp.sos.start_addr = ucode_array_start_addr +
3259 le32_to_cpu(sos_hdr_v1_3->sos_aux.offset_bytes);
3260 }
3261
3262 if ((adev->psp.sys.size_bytes == 0) || (adev->psp.sos.size_bytes == 0)) {
3263 dev_warn(adev->dev, "PSP SOS FW not available");
3264 return -EINVAL;
3265 }
3266
3267 return 0;
3268}
3269
3270int psp_init_sos_microcode(struct psp_context *psp, const char *chip_name)
3271{
3272 struct amdgpu_device *adev = psp->adev;
3273 char fw_name[PSP_FW_NAME_LEN];
3274 const struct psp_firmware_header_v1_0 *sos_hdr;
3275 const struct psp_firmware_header_v1_1 *sos_hdr_v1_1;
3276 const struct psp_firmware_header_v1_2 *sos_hdr_v1_2;
3277 const struct psp_firmware_header_v1_3 *sos_hdr_v1_3;
3278 const struct psp_firmware_header_v2_0 *sos_hdr_v2_0;
3279 int err = 0;
3280 uint8_t *ucode_array_start_addr;
3281 int fw_index = 0;
3282
3283 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sos.bin", chip_name);
3284 err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, fw_name);
3285 if (err)
3286 goto out;
3287
3288 sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data;
3289 ucode_array_start_addr = (uint8_t *)sos_hdr +
3290 le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
3291 amdgpu_ucode_print_psp_hdr(&sos_hdr->header);
3292
3293 switch (sos_hdr->header.header_version_major) {
3294 case 1:
3295 err = psp_init_sos_base_fw(adev);
3296 if (err)
3297 goto out;
3298
3299 if (sos_hdr->header.header_version_minor == 1) {
3300 sos_hdr_v1_1 = (const struct psp_firmware_header_v1_1 *)adev->psp.sos_fw->data;
3301 adev->psp.toc.size_bytes = le32_to_cpu(sos_hdr_v1_1->toc.size_bytes);
3302 adev->psp.toc.start_addr = (uint8_t *)adev->psp.sys.start_addr +
3303 le32_to_cpu(sos_hdr_v1_1->toc.offset_bytes);
3304 adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_1->kdb.size_bytes);
3305 adev->psp.kdb.start_addr = (uint8_t *)adev->psp.sys.start_addr +
3306 le32_to_cpu(sos_hdr_v1_1->kdb.offset_bytes);
3307 }
3308 if (sos_hdr->header.header_version_minor == 2) {
3309 sos_hdr_v1_2 = (const struct psp_firmware_header_v1_2 *)adev->psp.sos_fw->data;
3310 adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_2->kdb.size_bytes);
3311 adev->psp.kdb.start_addr = (uint8_t *)adev->psp.sys.start_addr +
3312 le32_to_cpu(sos_hdr_v1_2->kdb.offset_bytes);
3313 }
3314 if (sos_hdr->header.header_version_minor == 3) {
3315 sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data;
3316 adev->psp.toc.size_bytes = le32_to_cpu(sos_hdr_v1_3->v1_1.toc.size_bytes);
3317 adev->psp.toc.start_addr = ucode_array_start_addr +
3318 le32_to_cpu(sos_hdr_v1_3->v1_1.toc.offset_bytes);
3319 adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_3->v1_1.kdb.size_bytes);
3320 adev->psp.kdb.start_addr = ucode_array_start_addr +
3321 le32_to_cpu(sos_hdr_v1_3->v1_1.kdb.offset_bytes);
3322 adev->psp.spl.size_bytes = le32_to_cpu(sos_hdr_v1_3->spl.size_bytes);
3323 adev->psp.spl.start_addr = ucode_array_start_addr +
3324 le32_to_cpu(sos_hdr_v1_3->spl.offset_bytes);
3325 adev->psp.rl.size_bytes = le32_to_cpu(sos_hdr_v1_3->rl.size_bytes);
3326 adev->psp.rl.start_addr = ucode_array_start_addr +
3327 le32_to_cpu(sos_hdr_v1_3->rl.offset_bytes);
3328 }
3329 break;
3330 case 2:
3331 sos_hdr_v2_0 = (const struct psp_firmware_header_v2_0 *)adev->psp.sos_fw->data;
3332
3333 if (le32_to_cpu(sos_hdr_v2_0->psp_fw_bin_count) >= UCODE_MAX_PSP_PACKAGING) {
3334 dev_err(adev->dev, "packed SOS count exceeds maximum limit\n");
3335 err = -EINVAL;
3336 goto out;
3337 }
3338
3339 for (fw_index = 0; fw_index < le32_to_cpu(sos_hdr_v2_0->psp_fw_bin_count); fw_index++) {
3340 err = parse_sos_bin_descriptor(psp,
3341 &sos_hdr_v2_0->psp_fw_bin[fw_index],
3342 sos_hdr_v2_0);
3343 if (err)
3344 goto out;
3345 }
3346 break;
3347 default:
3348 dev_err(adev->dev,
3349 "unsupported psp sos firmware\n");
3350 err = -EINVAL;
3351 goto out;
3352 }
3353
3354 return 0;
3355out:
3356 amdgpu_ucode_release(&adev->psp.sos_fw);
3357
3358 return err;
3359}
3360
3361static int parse_ta_bin_descriptor(struct psp_context *psp,
3362 const struct psp_fw_bin_desc *desc,
3363 const struct ta_firmware_header_v2_0 *ta_hdr)
3364{
3365 uint8_t *ucode_start_addr = NULL;
3366
3367 if (!psp || !desc || !ta_hdr)
3368 return -EINVAL;
3369
3370 ucode_start_addr = (uint8_t *)ta_hdr +
3371 le32_to_cpu(desc->offset_bytes) +
3372 le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
3373
3374 switch (desc->fw_type) {
3375 case TA_FW_TYPE_PSP_ASD:
3376 psp->asd_context.bin_desc.fw_version = le32_to_cpu(desc->fw_version);
3377 psp->asd_context.bin_desc.feature_version = le32_to_cpu(desc->fw_version);
3378 psp->asd_context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
3379 psp->asd_context.bin_desc.start_addr = ucode_start_addr;
3380 break;
3381 case TA_FW_TYPE_PSP_XGMI:
3382 psp->xgmi_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version);
3383 psp->xgmi_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
3384 psp->xgmi_context.context.bin_desc.start_addr = ucode_start_addr;
3385 break;
3386 case TA_FW_TYPE_PSP_RAS:
3387 psp->ras_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version);
3388 psp->ras_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
3389 psp->ras_context.context.bin_desc.start_addr = ucode_start_addr;
3390 break;
3391 case TA_FW_TYPE_PSP_HDCP:
3392 psp->hdcp_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version);
3393 psp->hdcp_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
3394 psp->hdcp_context.context.bin_desc.start_addr = ucode_start_addr;
3395 break;
3396 case TA_FW_TYPE_PSP_DTM:
3397 psp->dtm_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version);
3398 psp->dtm_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
3399 psp->dtm_context.context.bin_desc.start_addr = ucode_start_addr;
3400 break;
3401 case TA_FW_TYPE_PSP_RAP:
3402 psp->rap_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version);
3403 psp->rap_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
3404 psp->rap_context.context.bin_desc.start_addr = ucode_start_addr;
3405 break;
3406 case TA_FW_TYPE_PSP_SECUREDISPLAY:
3407 psp->securedisplay_context.context.bin_desc.fw_version =
3408 le32_to_cpu(desc->fw_version);
3409 psp->securedisplay_context.context.bin_desc.size_bytes =
3410 le32_to_cpu(desc->size_bytes);
3411 psp->securedisplay_context.context.bin_desc.start_addr =
3412 ucode_start_addr;
3413 break;
3414 default:
3415 dev_warn(psp->adev->dev, "Unsupported TA type: %d\n", desc->fw_type);
3416 break;
3417 }
3418
3419 return 0;
3420}
3421
3422static int parse_ta_v1_microcode(struct psp_context *psp)
3423{
3424 const struct ta_firmware_header_v1_0 *ta_hdr;
3425 struct amdgpu_device *adev = psp->adev;
3426
3427 ta_hdr = (const struct ta_firmware_header_v1_0 *) adev->psp.ta_fw->data;
3428
3429 if (le16_to_cpu(ta_hdr->header.header_version_major) != 1)
3430 return -EINVAL;
3431
3432 adev->psp.xgmi_context.context.bin_desc.fw_version =
3433 le32_to_cpu(ta_hdr->xgmi.fw_version);
3434 adev->psp.xgmi_context.context.bin_desc.size_bytes =
3435 le32_to_cpu(ta_hdr->xgmi.size_bytes);
3436 adev->psp.xgmi_context.context.bin_desc.start_addr =
3437 (uint8_t *)ta_hdr +
3438 le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
3439
3440 adev->psp.ras_context.context.bin_desc.fw_version =
3441 le32_to_cpu(ta_hdr->ras.fw_version);
3442 adev->psp.ras_context.context.bin_desc.size_bytes =
3443 le32_to_cpu(ta_hdr->ras.size_bytes);
3444 adev->psp.ras_context.context.bin_desc.start_addr =
3445 (uint8_t *)adev->psp.xgmi_context.context.bin_desc.start_addr +
3446 le32_to_cpu(ta_hdr->ras.offset_bytes);
3447
3448 adev->psp.hdcp_context.context.bin_desc.fw_version =
3449 le32_to_cpu(ta_hdr->hdcp.fw_version);
3450 adev->psp.hdcp_context.context.bin_desc.size_bytes =
3451 le32_to_cpu(ta_hdr->hdcp.size_bytes);
3452 adev->psp.hdcp_context.context.bin_desc.start_addr =
3453 (uint8_t *)ta_hdr +
3454 le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
3455
3456 adev->psp.dtm_context.context.bin_desc.fw_version =
3457 le32_to_cpu(ta_hdr->dtm.fw_version);
3458 adev->psp.dtm_context.context.bin_desc.size_bytes =
3459 le32_to_cpu(ta_hdr->dtm.size_bytes);
3460 adev->psp.dtm_context.context.bin_desc.start_addr =
3461 (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr +
3462 le32_to_cpu(ta_hdr->dtm.offset_bytes);
3463
3464 adev->psp.securedisplay_context.context.bin_desc.fw_version =
3465 le32_to_cpu(ta_hdr->securedisplay.fw_version);
3466 adev->psp.securedisplay_context.context.bin_desc.size_bytes =
3467 le32_to_cpu(ta_hdr->securedisplay.size_bytes);
3468 adev->psp.securedisplay_context.context.bin_desc.start_addr =
3469 (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr +
3470 le32_to_cpu(ta_hdr->securedisplay.offset_bytes);
3471
3472 adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version);
3473
3474 return 0;
3475}
3476
3477static int parse_ta_v2_microcode(struct psp_context *psp)
3478{
3479 const struct ta_firmware_header_v2_0 *ta_hdr;
3480 struct amdgpu_device *adev = psp->adev;
3481 int err = 0;
3482 int ta_index = 0;
3483
3484 ta_hdr = (const struct ta_firmware_header_v2_0 *)adev->psp.ta_fw->data;
3485
3486 if (le16_to_cpu(ta_hdr->header.header_version_major) != 2)
3487 return -EINVAL;
3488
3489 if (le32_to_cpu(ta_hdr->ta_fw_bin_count) >= UCODE_MAX_PSP_PACKAGING) {
3490 dev_err(adev->dev, "packed TA count exceeds maximum limit\n");
3491 return -EINVAL;
3492 }
3493
3494 for (ta_index = 0; ta_index < le32_to_cpu(ta_hdr->ta_fw_bin_count); ta_index++) {
3495 err = parse_ta_bin_descriptor(psp,
3496 &ta_hdr->ta_fw_bin[ta_index],
3497 ta_hdr);
3498 if (err)
3499 return err;
3500 }
3501
3502 return 0;
3503}
3504
3505int psp_init_ta_microcode(struct psp_context *psp, const char *chip_name)
3506{
3507 const struct common_firmware_header *hdr;
3508 struct amdgpu_device *adev = psp->adev;
3509 char fw_name[PSP_FW_NAME_LEN];
3510 int err;
3511
3512 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name);
3513 err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, fw_name);
3514 if (err)
3515 return err;
3516
3517 hdr = (const struct common_firmware_header *)adev->psp.ta_fw->data;
3518 switch (le16_to_cpu(hdr->header_version_major)) {
3519 case 1:
3520 err = parse_ta_v1_microcode(psp);
3521 break;
3522 case 2:
3523 err = parse_ta_v2_microcode(psp);
3524 break;
3525 default:
3526 dev_err(adev->dev, "unsupported TA header version\n");
3527 err = -EINVAL;
3528 }
3529
3530 if (err)
3531 amdgpu_ucode_release(&adev->psp.ta_fw);
3532
3533 return err;
3534}
3535
3536int psp_init_cap_microcode(struct psp_context *psp, const char *chip_name)
3537{
3538 struct amdgpu_device *adev = psp->adev;
3539 char fw_name[PSP_FW_NAME_LEN];
3540 const struct psp_firmware_header_v1_0 *cap_hdr_v1_0;
3541 struct amdgpu_firmware_info *info = NULL;
3542 int err = 0;
3543
3544 if (!amdgpu_sriov_vf(adev)) {
3545 dev_err(adev->dev, "cap microcode should only be loaded under SRIOV\n");
3546 return -EINVAL;
3547 }
3548
3549 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_cap.bin", chip_name);
3550 err = amdgpu_ucode_request(adev, &adev->psp.cap_fw, fw_name);
3551 if (err) {
3552 if (err == -ENODEV) {
3553 dev_warn(adev->dev, "cap microcode does not exist, skip\n");
3554 err = 0;
3555 goto out;
3556 }
3557 dev_err(adev->dev, "fail to initialize cap microcode\n");
3558 }
3559
3560 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CAP];
3561 info->ucode_id = AMDGPU_UCODE_ID_CAP;
3562 info->fw = adev->psp.cap_fw;
3563 cap_hdr_v1_0 = (const struct psp_firmware_header_v1_0 *)
3564 adev->psp.cap_fw->data;
3565 adev->firmware.fw_size += ALIGN(
3566 le32_to_cpu(cap_hdr_v1_0->header.ucode_size_bytes), PAGE_SIZE);
3567 adev->psp.cap_fw_version = le32_to_cpu(cap_hdr_v1_0->header.ucode_version);
3568 adev->psp.cap_feature_version = le32_to_cpu(cap_hdr_v1_0->sos.fw_version);
3569 adev->psp.cap_ucode_size = le32_to_cpu(cap_hdr_v1_0->header.ucode_size_bytes);
3570
3571 return 0;
3572
3573out:
3574 amdgpu_ucode_release(&adev->psp.cap_fw);
3575 return err;
3576}
3577
3578static int psp_set_clockgating_state(void *handle,
3579 enum amd_clockgating_state state)
3580{
3581 return 0;
3582}
3583
3584static int psp_set_powergating_state(void *handle,
3585 enum amd_powergating_state state)
3586{
3587 return 0;
3588}
3589
3590static ssize_t psp_usbc_pd_fw_sysfs_read(struct device *dev,
3591 struct device_attribute *attr,
3592 char *buf)
3593{
3594 struct drm_device *ddev = dev_get_drvdata(dev);
3595 struct amdgpu_device *adev = drm_to_adev(ddev);
3596 uint32_t fw_ver;
3597 int ret;
3598
3599 if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) {
3600 DRM_INFO("PSP block is not ready yet.");
3601 return -EBUSY;
3602 }
3603
3604 mutex_lock(&adev->psp.mutex);
3605 ret = psp_read_usbc_pd_fw(&adev->psp, &fw_ver);
3606 mutex_unlock(&adev->psp.mutex);
3607
3608 if (ret) {
3609 DRM_ERROR("Failed to read USBC PD FW, err = %d", ret);
3610 return ret;
3611 }
3612
3613 return sysfs_emit(buf, "%x\n", fw_ver);
3614}
3615
3616static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev,
3617 struct device_attribute *attr,
3618 const char *buf,
3619 size_t count)
3620{
3621 struct drm_device *ddev = dev_get_drvdata(dev);
3622 struct amdgpu_device *adev = drm_to_adev(ddev);
3623 int ret, idx;
3624 char fw_name[100];
3625 const struct firmware *usbc_pd_fw;
3626 struct amdgpu_bo *fw_buf_bo = NULL;
3627 uint64_t fw_pri_mc_addr;
3628 void *fw_pri_cpu_addr;
3629
3630 if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) {
3631 DRM_INFO("PSP block is not ready yet.");
3632 return -EBUSY;
3633 }
3634
3635 if (!drm_dev_enter(ddev, &idx))
3636 return -ENODEV;
3637
3638 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s", buf);
3639 ret = request_firmware(&usbc_pd_fw, fw_name, adev->dev);
3640 if (ret)
3641 goto fail;
3642
3643 /* LFB address which is aligned to 1MB boundary per PSP request */
3644 ret = amdgpu_bo_create_kernel(adev, usbc_pd_fw->size, 0x100000,
3645 AMDGPU_GEM_DOMAIN_VRAM |
3646 AMDGPU_GEM_DOMAIN_GTT,
3647 &fw_buf_bo, &fw_pri_mc_addr,
3648 &fw_pri_cpu_addr);
3649 if (ret)
3650 goto rel_buf;
3651
3652 memcpy_toio(fw_pri_cpu_addr, usbc_pd_fw->data, usbc_pd_fw->size);
3653
3654 mutex_lock(&adev->psp.mutex);
3655 ret = psp_load_usbc_pd_fw(&adev->psp, fw_pri_mc_addr);
3656 mutex_unlock(&adev->psp.mutex);
3657
3658 amdgpu_bo_free_kernel(&fw_buf_bo, &fw_pri_mc_addr, &fw_pri_cpu_addr);
3659
3660rel_buf:
3661 release_firmware(usbc_pd_fw);
3662fail:
3663 if (ret) {
3664 DRM_ERROR("Failed to load USBC PD FW, err = %d", ret);
3665 count = ret;
3666 }
3667
3668 drm_dev_exit(idx);
3669 return count;
3670}
3671
3672void psp_copy_fw(struct psp_context *psp, uint8_t *start_addr, uint32_t bin_size)
3673{
3674 int idx;
3675
3676 if (!drm_dev_enter(adev_to_drm(psp->adev), &idx))
3677 return;
3678
3679 memset(psp->fw_pri_buf, 0, PSP_1_MEG);
3680 memcpy(psp->fw_pri_buf, start_addr, bin_size);
3681
3682 drm_dev_exit(idx);
3683}
3684
3685/**
3686 * DOC: usbc_pd_fw
3687 * Reading from this file will retrieve the USB-C PD firmware version. Writing to
3688 * this file will trigger the update process.
3689 */
3690static DEVICE_ATTR(usbc_pd_fw, 0644,
3691 psp_usbc_pd_fw_sysfs_read,
3692 psp_usbc_pd_fw_sysfs_write);
3693
3694int is_psp_fw_valid(struct psp_bin_desc bin)
3695{
3696 return bin.size_bytes;
3697}
3698
3699static ssize_t amdgpu_psp_vbflash_write(struct file *filp, struct kobject *kobj,
3700 struct bin_attribute *bin_attr,
3701 char *buffer, loff_t pos, size_t count)
3702{
3703 struct device *dev = kobj_to_dev(kobj);
3704 struct drm_device *ddev = dev_get_drvdata(dev);
3705 struct amdgpu_device *adev = drm_to_adev(ddev);
3706
3707 adev->psp.vbflash_done = false;
3708
3709 /* Safeguard against memory drain */
3710 if (adev->psp.vbflash_image_size > AMD_VBIOS_FILE_MAX_SIZE_B) {
3711 dev_err(adev->dev, "File size cannot exceed %u", AMD_VBIOS_FILE_MAX_SIZE_B);
3712 kvfree(adev->psp.vbflash_tmp_buf);
3713 adev->psp.vbflash_tmp_buf = NULL;
3714 adev->psp.vbflash_image_size = 0;
3715 return -ENOMEM;
3716 }
3717
3718 /* TODO Just allocate max for now and optimize to realloc later if needed */
3719 if (!adev->psp.vbflash_tmp_buf) {
3720 adev->psp.vbflash_tmp_buf = kvmalloc(AMD_VBIOS_FILE_MAX_SIZE_B, GFP_KERNEL);
3721 if (!adev->psp.vbflash_tmp_buf)
3722 return -ENOMEM;
3723 }
3724
3725 mutex_lock(&adev->psp.mutex);
3726 memcpy(adev->psp.vbflash_tmp_buf + pos, buffer, count);
3727 adev->psp.vbflash_image_size += count;
3728 mutex_unlock(&adev->psp.mutex);
3729
3730 dev_dbg(adev->dev, "IFWI staged for update");
3731
3732 return count;
3733}
3734
3735static ssize_t amdgpu_psp_vbflash_read(struct file *filp, struct kobject *kobj,
3736 struct bin_attribute *bin_attr, char *buffer,
3737 loff_t pos, size_t count)
3738{
3739 struct device *dev = kobj_to_dev(kobj);
3740 struct drm_device *ddev = dev_get_drvdata(dev);
3741 struct amdgpu_device *adev = drm_to_adev(ddev);
3742 struct amdgpu_bo *fw_buf_bo = NULL;
3743 uint64_t fw_pri_mc_addr;
3744 void *fw_pri_cpu_addr;
3745 int ret;
3746
3747 if (adev->psp.vbflash_image_size == 0)
3748 return -EINVAL;
3749
3750 dev_dbg(adev->dev, "PSP IFWI flash process initiated");
3751
3752 ret = amdgpu_bo_create_kernel(adev, adev->psp.vbflash_image_size,
3753 AMDGPU_GPU_PAGE_SIZE,
3754 AMDGPU_GEM_DOMAIN_VRAM,
3755 &fw_buf_bo,
3756 &fw_pri_mc_addr,
3757 &fw_pri_cpu_addr);
3758 if (ret)
3759 goto rel_buf;
3760
3761 memcpy_toio(fw_pri_cpu_addr, adev->psp.vbflash_tmp_buf, adev->psp.vbflash_image_size);
3762
3763 mutex_lock(&adev->psp.mutex);
3764 ret = psp_update_spirom(&adev->psp, fw_pri_mc_addr);
3765 mutex_unlock(&adev->psp.mutex);
3766
3767 amdgpu_bo_free_kernel(&fw_buf_bo, &fw_pri_mc_addr, &fw_pri_cpu_addr);
3768
3769rel_buf:
3770 kvfree(adev->psp.vbflash_tmp_buf);
3771 adev->psp.vbflash_tmp_buf = NULL;
3772 adev->psp.vbflash_image_size = 0;
3773
3774 if (ret) {
3775 dev_err(adev->dev, "Failed to load IFWI, err = %d", ret);
3776 return ret;
3777 }
3778
3779 dev_dbg(adev->dev, "PSP IFWI flash process done");
3780 return 0;
3781}
3782
3783/**
3784 * DOC: psp_vbflash
3785 * Writing to this file will stage an IFWI for update. Reading from this file
3786 * will trigger the update process.
3787 */
3788static struct bin_attribute psp_vbflash_bin_attr = {
3789 .attr = {.name = "psp_vbflash", .mode = 0660},
3790 .size = 0,
3791 .write = amdgpu_psp_vbflash_write,
3792 .read = amdgpu_psp_vbflash_read,
3793};
3794
3795/**
3796 * DOC: psp_vbflash_status
3797 * The status of the flash process.
3798 * 0: IFWI flash not complete.
3799 * 1: IFWI flash complete.
3800 */
3801static ssize_t amdgpu_psp_vbflash_status(struct device *dev,
3802 struct device_attribute *attr,
3803 char *buf)
3804{
3805 struct drm_device *ddev = dev_get_drvdata(dev);
3806 struct amdgpu_device *adev = drm_to_adev(ddev);
3807 uint32_t vbflash_status;
3808
3809 vbflash_status = psp_vbflash_status(&adev->psp);
3810 if (!adev->psp.vbflash_done)
3811 vbflash_status = 0;
3812 else if (adev->psp.vbflash_done && !(vbflash_status & 0x80000000))
3813 vbflash_status = 1;
3814
3815 return sysfs_emit(buf, "0x%x\n", vbflash_status);
3816}
3817static DEVICE_ATTR(psp_vbflash_status, 0440, amdgpu_psp_vbflash_status, NULL);
3818
3819static struct bin_attribute *bin_flash_attrs[] = {
3820 &psp_vbflash_bin_attr,
3821 NULL
3822};
3823
3824static struct attribute *flash_attrs[] = {
3825 &dev_attr_psp_vbflash_status.attr,
3826 &dev_attr_usbc_pd_fw.attr,
3827 NULL
3828};
3829
3830static umode_t amdgpu_flash_attr_is_visible(struct kobject *kobj, struct attribute *attr, int idx)
3831{
3832 struct device *dev = kobj_to_dev(kobj);
3833 struct drm_device *ddev = dev_get_drvdata(dev);
3834 struct amdgpu_device *adev = drm_to_adev(ddev);
3835
3836 if (attr == &dev_attr_usbc_pd_fw.attr)
3837 return adev->psp.sup_pd_fw_up ? 0660 : 0;
3838
3839 return adev->psp.sup_ifwi_up ? 0440 : 0;
3840}
3841
3842static umode_t amdgpu_bin_flash_attr_is_visible(struct kobject *kobj,
3843 struct bin_attribute *attr,
3844 int idx)
3845{
3846 struct device *dev = kobj_to_dev(kobj);
3847 struct drm_device *ddev = dev_get_drvdata(dev);
3848 struct amdgpu_device *adev = drm_to_adev(ddev);
3849
3850 return adev->psp.sup_ifwi_up ? 0660 : 0;
3851}
3852
3853const struct attribute_group amdgpu_flash_attr_group = {
3854 .attrs = flash_attrs,
3855 .bin_attrs = bin_flash_attrs,
3856 .is_bin_visible = amdgpu_bin_flash_attr_is_visible,
3857 .is_visible = amdgpu_flash_attr_is_visible,
3858};
3859
3860const struct amd_ip_funcs psp_ip_funcs = {
3861 .name = "psp",
3862 .early_init = psp_early_init,
3863 .late_init = NULL,
3864 .sw_init = psp_sw_init,
3865 .sw_fini = psp_sw_fini,
3866 .hw_init = psp_hw_init,
3867 .hw_fini = psp_hw_fini,
3868 .suspend = psp_suspend,
3869 .resume = psp_resume,
3870 .is_idle = NULL,
3871 .check_soft_reset = NULL,
3872 .wait_for_idle = NULL,
3873 .soft_reset = NULL,
3874 .set_clockgating_state = psp_set_clockgating_state,
3875 .set_powergating_state = psp_set_powergating_state,
3876};
3877
3878const struct amdgpu_ip_block_version psp_v3_1_ip_block = {
3879 .type = AMD_IP_BLOCK_TYPE_PSP,
3880 .major = 3,
3881 .minor = 1,
3882 .rev = 0,
3883 .funcs = &psp_ip_funcs,
3884};
3885
3886const struct amdgpu_ip_block_version psp_v10_0_ip_block = {
3887 .type = AMD_IP_BLOCK_TYPE_PSP,
3888 .major = 10,
3889 .minor = 0,
3890 .rev = 0,
3891 .funcs = &psp_ip_funcs,
3892};
3893
3894const struct amdgpu_ip_block_version psp_v11_0_ip_block = {
3895 .type = AMD_IP_BLOCK_TYPE_PSP,
3896 .major = 11,
3897 .minor = 0,
3898 .rev = 0,
3899 .funcs = &psp_ip_funcs,
3900};
3901
3902const struct amdgpu_ip_block_version psp_v11_0_8_ip_block = {
3903 .type = AMD_IP_BLOCK_TYPE_PSP,
3904 .major = 11,
3905 .minor = 0,
3906 .rev = 8,
3907 .funcs = &psp_ip_funcs,
3908};
3909
3910const struct amdgpu_ip_block_version psp_v12_0_ip_block = {
3911 .type = AMD_IP_BLOCK_TYPE_PSP,
3912 .major = 12,
3913 .minor = 0,
3914 .rev = 0,
3915 .funcs = &psp_ip_funcs,
3916};
3917
3918const struct amdgpu_ip_block_version psp_v13_0_ip_block = {
3919 .type = AMD_IP_BLOCK_TYPE_PSP,
3920 .major = 13,
3921 .minor = 0,
3922 .rev = 0,
3923 .funcs = &psp_ip_funcs,
3924};
3925
3926const struct amdgpu_ip_block_version psp_v13_0_4_ip_block = {
3927 .type = AMD_IP_BLOCK_TYPE_PSP,
3928 .major = 13,
3929 .minor = 0,
3930 .rev = 4,
3931 .funcs = &psp_ip_funcs,
3932};