Loading...
1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Author: Huang Rui
23 *
24 */
25
26#include <linux/firmware.h>
27#include <drm/drm_drv.h>
28
29#include "amdgpu.h"
30#include "amdgpu_psp.h"
31#include "amdgpu_ucode.h"
32#include "amdgpu_xgmi.h"
33#include "soc15_common.h"
34#include "psp_v3_1.h"
35#include "psp_v10_0.h"
36#include "psp_v11_0.h"
37#include "psp_v11_0_8.h"
38#include "psp_v12_0.h"
39#include "psp_v13_0.h"
40#include "psp_v13_0_4.h"
41#include "psp_v14_0.h"
42
43#include "amdgpu_ras.h"
44#include "amdgpu_securedisplay.h"
45#include "amdgpu_atomfirmware.h"
46
47#define AMD_VBIOS_FILE_MAX_SIZE_B (1024*1024*3)
48
49static int psp_load_smu_fw(struct psp_context *psp);
50static int psp_rap_terminate(struct psp_context *psp);
51static int psp_securedisplay_terminate(struct psp_context *psp);
52
53static int psp_ring_init(struct psp_context *psp,
54 enum psp_ring_type ring_type)
55{
56 int ret = 0;
57 struct psp_ring *ring;
58 struct amdgpu_device *adev = psp->adev;
59
60 ring = &psp->km_ring;
61
62 ring->ring_type = ring_type;
63
64 /* allocate 4k Page of Local Frame Buffer memory for ring */
65 ring->ring_size = 0x1000;
66 ret = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE,
67 AMDGPU_GEM_DOMAIN_VRAM |
68 AMDGPU_GEM_DOMAIN_GTT,
69 &adev->firmware.rbuf,
70 &ring->ring_mem_mc_addr,
71 (void **)&ring->ring_mem);
72 if (ret) {
73 ring->ring_size = 0;
74 return ret;
75 }
76
77 return 0;
78}
79
80/*
81 * Due to DF Cstate management centralized to PMFW, the firmware
82 * loading sequence will be updated as below:
83 * - Load KDB
84 * - Load SYS_DRV
85 * - Load tOS
86 * - Load PMFW
87 * - Setup TMR
88 * - Load other non-psp fw
89 * - Load ASD
90 * - Load XGMI/RAS/HDCP/DTM TA if any
91 *
92 * This new sequence is required for
93 * - Arcturus and onwards
94 */
95static void psp_check_pmfw_centralized_cstate_management(struct psp_context *psp)
96{
97 struct amdgpu_device *adev = psp->adev;
98
99 if (amdgpu_sriov_vf(adev)) {
100 psp->pmfw_centralized_cstate_management = false;
101 return;
102 }
103
104 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
105 case IP_VERSION(11, 0, 0):
106 case IP_VERSION(11, 0, 4):
107 case IP_VERSION(11, 0, 5):
108 case IP_VERSION(11, 0, 7):
109 case IP_VERSION(11, 0, 9):
110 case IP_VERSION(11, 0, 11):
111 case IP_VERSION(11, 0, 12):
112 case IP_VERSION(11, 0, 13):
113 case IP_VERSION(13, 0, 0):
114 case IP_VERSION(13, 0, 2):
115 case IP_VERSION(13, 0, 7):
116 psp->pmfw_centralized_cstate_management = true;
117 break;
118 default:
119 psp->pmfw_centralized_cstate_management = false;
120 break;
121 }
122}
123
124static int psp_init_sriov_microcode(struct psp_context *psp)
125{
126 struct amdgpu_device *adev = psp->adev;
127 char ucode_prefix[30];
128 int ret = 0;
129
130 amdgpu_ucode_ip_version_decode(adev, MP0_HWIP, ucode_prefix, sizeof(ucode_prefix));
131
132 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
133 case IP_VERSION(9, 0, 0):
134 case IP_VERSION(11, 0, 7):
135 case IP_VERSION(11, 0, 9):
136 adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
137 ret = psp_init_cap_microcode(psp, ucode_prefix);
138 break;
139 case IP_VERSION(13, 0, 2):
140 adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
141 ret = psp_init_cap_microcode(psp, ucode_prefix);
142 ret &= psp_init_ta_microcode(psp, ucode_prefix);
143 break;
144 case IP_VERSION(13, 0, 0):
145 adev->virt.autoload_ucode_id = 0;
146 break;
147 case IP_VERSION(13, 0, 6):
148 ret = psp_init_cap_microcode(psp, ucode_prefix);
149 ret &= psp_init_ta_microcode(psp, ucode_prefix);
150 break;
151 case IP_VERSION(13, 0, 10):
152 adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MES1_DATA;
153 ret = psp_init_cap_microcode(psp, ucode_prefix);
154 break;
155 default:
156 return -EINVAL;
157 }
158 return ret;
159}
160
161static int psp_early_init(void *handle)
162{
163 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
164 struct psp_context *psp = &adev->psp;
165
166 psp->autoload_supported = true;
167 psp->boot_time_tmr = true;
168
169 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
170 case IP_VERSION(9, 0, 0):
171 psp_v3_1_set_psp_funcs(psp);
172 psp->autoload_supported = false;
173 psp->boot_time_tmr = false;
174 break;
175 case IP_VERSION(10, 0, 0):
176 case IP_VERSION(10, 0, 1):
177 psp_v10_0_set_psp_funcs(psp);
178 psp->autoload_supported = false;
179 psp->boot_time_tmr = false;
180 break;
181 case IP_VERSION(11, 0, 2):
182 case IP_VERSION(11, 0, 4):
183 psp_v11_0_set_psp_funcs(psp);
184 psp->autoload_supported = false;
185 psp->boot_time_tmr = false;
186 break;
187 case IP_VERSION(11, 0, 0):
188 case IP_VERSION(11, 0, 7):
189 adev->psp.sup_pd_fw_up = !amdgpu_sriov_vf(adev);
190 fallthrough;
191 case IP_VERSION(11, 0, 5):
192 case IP_VERSION(11, 0, 9):
193 case IP_VERSION(11, 0, 11):
194 case IP_VERSION(11, 5, 0):
195 case IP_VERSION(11, 0, 12):
196 case IP_VERSION(11, 0, 13):
197 psp_v11_0_set_psp_funcs(psp);
198 psp->boot_time_tmr = false;
199 break;
200 case IP_VERSION(11, 0, 3):
201 case IP_VERSION(12, 0, 1):
202 psp_v12_0_set_psp_funcs(psp);
203 psp->autoload_supported = false;
204 psp->boot_time_tmr = false;
205 break;
206 case IP_VERSION(13, 0, 2):
207 psp->boot_time_tmr = false;
208 fallthrough;
209 case IP_VERSION(13, 0, 6):
210 psp_v13_0_set_psp_funcs(psp);
211 psp->autoload_supported = false;
212 break;
213 case IP_VERSION(13, 0, 1):
214 case IP_VERSION(13, 0, 3):
215 case IP_VERSION(13, 0, 5):
216 case IP_VERSION(13, 0, 8):
217 case IP_VERSION(13, 0, 11):
218 case IP_VERSION(14, 0, 0):
219 case IP_VERSION(14, 0, 1):
220 psp_v13_0_set_psp_funcs(psp);
221 psp->boot_time_tmr = false;
222 break;
223 case IP_VERSION(11, 0, 8):
224 if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2) {
225 psp_v11_0_8_set_psp_funcs(psp);
226 }
227 psp->autoload_supported = false;
228 psp->boot_time_tmr = false;
229 break;
230 case IP_VERSION(13, 0, 0):
231 case IP_VERSION(13, 0, 7):
232 case IP_VERSION(13, 0, 10):
233 psp_v13_0_set_psp_funcs(psp);
234 adev->psp.sup_ifwi_up = !amdgpu_sriov_vf(adev);
235 psp->boot_time_tmr = false;
236 break;
237 case IP_VERSION(13, 0, 4):
238 psp_v13_0_4_set_psp_funcs(psp);
239 psp->boot_time_tmr = false;
240 break;
241 case IP_VERSION(14, 0, 2):
242 case IP_VERSION(14, 0, 3):
243 psp_v14_0_set_psp_funcs(psp);
244 break;
245 default:
246 return -EINVAL;
247 }
248
249 psp->adev = adev;
250
251 adev->psp_timeout = 20000;
252
253 psp_check_pmfw_centralized_cstate_management(psp);
254
255 if (amdgpu_sriov_vf(adev))
256 return psp_init_sriov_microcode(psp);
257 else
258 return psp_init_microcode(psp);
259}
260
261void psp_ta_free_shared_buf(struct ta_mem_context *mem_ctx)
262{
263 amdgpu_bo_free_kernel(&mem_ctx->shared_bo, &mem_ctx->shared_mc_addr,
264 &mem_ctx->shared_buf);
265 mem_ctx->shared_bo = NULL;
266}
267
268static void psp_free_shared_bufs(struct psp_context *psp)
269{
270 void *tmr_buf;
271 void **pptr;
272
273 /* free TMR memory buffer */
274 pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
275 amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, pptr);
276 psp->tmr_bo = NULL;
277
278 /* free xgmi shared memory */
279 psp_ta_free_shared_buf(&psp->xgmi_context.context.mem_context);
280
281 /* free ras shared memory */
282 psp_ta_free_shared_buf(&psp->ras_context.context.mem_context);
283
284 /* free hdcp shared memory */
285 psp_ta_free_shared_buf(&psp->hdcp_context.context.mem_context);
286
287 /* free dtm shared memory */
288 psp_ta_free_shared_buf(&psp->dtm_context.context.mem_context);
289
290 /* free rap shared memory */
291 psp_ta_free_shared_buf(&psp->rap_context.context.mem_context);
292
293 /* free securedisplay shared memory */
294 psp_ta_free_shared_buf(&psp->securedisplay_context.context.mem_context);
295
296
297}
298
299static void psp_memory_training_fini(struct psp_context *psp)
300{
301 struct psp_memory_training_context *ctx = &psp->mem_train_ctx;
302
303 ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT;
304 kfree(ctx->sys_cache);
305 ctx->sys_cache = NULL;
306}
307
308static int psp_memory_training_init(struct psp_context *psp)
309{
310 int ret;
311 struct psp_memory_training_context *ctx = &psp->mem_train_ctx;
312
313 if (ctx->init != PSP_MEM_TRAIN_RESERVE_SUCCESS) {
314 dev_dbg(psp->adev->dev, "memory training is not supported!\n");
315 return 0;
316 }
317
318 ctx->sys_cache = kzalloc(ctx->train_data_size, GFP_KERNEL);
319 if (ctx->sys_cache == NULL) {
320 dev_err(psp->adev->dev, "alloc mem_train_ctx.sys_cache failed!\n");
321 ret = -ENOMEM;
322 goto Err_out;
323 }
324
325 dev_dbg(psp->adev->dev,
326 "train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n",
327 ctx->train_data_size,
328 ctx->p2c_train_data_offset,
329 ctx->c2p_train_data_offset);
330 ctx->init = PSP_MEM_TRAIN_INIT_SUCCESS;
331 return 0;
332
333Err_out:
334 psp_memory_training_fini(psp);
335 return ret;
336}
337
338/*
339 * Helper funciton to query psp runtime database entry
340 *
341 * @adev: amdgpu_device pointer
342 * @entry_type: the type of psp runtime database entry
343 * @db_entry: runtime database entry pointer
344 *
345 * Return false if runtime database doesn't exit or entry is invalid
346 * or true if the specific database entry is found, and copy to @db_entry
347 */
348static bool psp_get_runtime_db_entry(struct amdgpu_device *adev,
349 enum psp_runtime_entry_type entry_type,
350 void *db_entry)
351{
352 uint64_t db_header_pos, db_dir_pos;
353 struct psp_runtime_data_header db_header = {0};
354 struct psp_runtime_data_directory db_dir = {0};
355 bool ret = false;
356 int i;
357
358 if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6))
359 return false;
360
361 db_header_pos = adev->gmc.mc_vram_size - PSP_RUNTIME_DB_OFFSET;
362 db_dir_pos = db_header_pos + sizeof(struct psp_runtime_data_header);
363
364 /* read runtime db header from vram */
365 amdgpu_device_vram_access(adev, db_header_pos, (uint32_t *)&db_header,
366 sizeof(struct psp_runtime_data_header), false);
367
368 if (db_header.cookie != PSP_RUNTIME_DB_COOKIE_ID) {
369 /* runtime db doesn't exist, exit */
370 dev_dbg(adev->dev, "PSP runtime database doesn't exist\n");
371 return false;
372 }
373
374 /* read runtime database entry from vram */
375 amdgpu_device_vram_access(adev, db_dir_pos, (uint32_t *)&db_dir,
376 sizeof(struct psp_runtime_data_directory), false);
377
378 if (db_dir.entry_count >= PSP_RUNTIME_DB_DIAG_ENTRY_MAX_COUNT) {
379 /* invalid db entry count, exit */
380 dev_warn(adev->dev, "Invalid PSP runtime database entry count\n");
381 return false;
382 }
383
384 /* look up for requested entry type */
385 for (i = 0; i < db_dir.entry_count && !ret; i++) {
386 if (db_dir.entry_list[i].entry_type == entry_type) {
387 switch (entry_type) {
388 case PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG:
389 if (db_dir.entry_list[i].size < sizeof(struct psp_runtime_boot_cfg_entry)) {
390 /* invalid db entry size */
391 dev_warn(adev->dev, "Invalid PSP runtime database boot cfg entry size\n");
392 return false;
393 }
394 /* read runtime database entry */
395 amdgpu_device_vram_access(adev, db_header_pos + db_dir.entry_list[i].offset,
396 (uint32_t *)db_entry, sizeof(struct psp_runtime_boot_cfg_entry), false);
397 ret = true;
398 break;
399 case PSP_RUNTIME_ENTRY_TYPE_PPTABLE_ERR_STATUS:
400 if (db_dir.entry_list[i].size < sizeof(struct psp_runtime_scpm_entry)) {
401 /* invalid db entry size */
402 dev_warn(adev->dev, "Invalid PSP runtime database scpm entry size\n");
403 return false;
404 }
405 /* read runtime database entry */
406 amdgpu_device_vram_access(adev, db_header_pos + db_dir.entry_list[i].offset,
407 (uint32_t *)db_entry, sizeof(struct psp_runtime_scpm_entry), false);
408 ret = true;
409 break;
410 default:
411 ret = false;
412 break;
413 }
414 }
415 }
416
417 return ret;
418}
419
420static int psp_sw_init(void *handle)
421{
422 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
423 struct psp_context *psp = &adev->psp;
424 int ret;
425 struct psp_runtime_boot_cfg_entry boot_cfg_entry;
426 struct psp_memory_training_context *mem_training_ctx = &psp->mem_train_ctx;
427 struct psp_runtime_scpm_entry scpm_entry;
428
429 psp->cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
430 if (!psp->cmd) {
431 dev_err(adev->dev, "Failed to allocate memory to command buffer!\n");
432 ret = -ENOMEM;
433 }
434
435 adev->psp.xgmi_context.supports_extended_data =
436 !adev->gmc.xgmi.connected_to_cpu &&
437 amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2);
438
439 memset(&scpm_entry, 0, sizeof(scpm_entry));
440 if ((psp_get_runtime_db_entry(adev,
441 PSP_RUNTIME_ENTRY_TYPE_PPTABLE_ERR_STATUS,
442 &scpm_entry)) &&
443 (scpm_entry.scpm_status != SCPM_DISABLE)) {
444 adev->scpm_enabled = true;
445 adev->scpm_status = scpm_entry.scpm_status;
446 } else {
447 adev->scpm_enabled = false;
448 adev->scpm_status = SCPM_DISABLE;
449 }
450
451 /* TODO: stop gpu driver services and print alarm if scpm is enabled with error status */
452
453 memset(&boot_cfg_entry, 0, sizeof(boot_cfg_entry));
454 if (psp_get_runtime_db_entry(adev,
455 PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG,
456 &boot_cfg_entry)) {
457 psp->boot_cfg_bitmask = boot_cfg_entry.boot_cfg_bitmask;
458 if ((psp->boot_cfg_bitmask) &
459 BOOT_CFG_FEATURE_TWO_STAGE_DRAM_TRAINING) {
460 /* If psp runtime database exists, then
461 * only enable two stage memory training
462 * when TWO_STAGE_DRAM_TRAINING bit is set
463 * in runtime database
464 */
465 mem_training_ctx->enable_mem_training = true;
466 }
467
468 } else {
469 /* If psp runtime database doesn't exist or is
470 * invalid, force enable two stage memory training
471 */
472 mem_training_ctx->enable_mem_training = true;
473 }
474
475 if (mem_training_ctx->enable_mem_training) {
476 ret = psp_memory_training_init(psp);
477 if (ret) {
478 dev_err(adev->dev, "Failed to initialize memory training!\n");
479 return ret;
480 }
481
482 ret = psp_mem_training(psp, PSP_MEM_TRAIN_COLD_BOOT);
483 if (ret) {
484 dev_err(adev->dev, "Failed to process memory training!\n");
485 return ret;
486 }
487 }
488
489 ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG,
490 (amdgpu_sriov_vf(adev) || adev->debug_use_vram_fw_buf) ?
491 AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
492 &psp->fw_pri_bo,
493 &psp->fw_pri_mc_addr,
494 &psp->fw_pri_buf);
495 if (ret)
496 return ret;
497
498 ret = amdgpu_bo_create_kernel(adev, PSP_FENCE_BUFFER_SIZE, PAGE_SIZE,
499 AMDGPU_GEM_DOMAIN_VRAM |
500 AMDGPU_GEM_DOMAIN_GTT,
501 &psp->fence_buf_bo,
502 &psp->fence_buf_mc_addr,
503 &psp->fence_buf);
504 if (ret)
505 goto failed1;
506
507 ret = amdgpu_bo_create_kernel(adev, PSP_CMD_BUFFER_SIZE, PAGE_SIZE,
508 AMDGPU_GEM_DOMAIN_VRAM |
509 AMDGPU_GEM_DOMAIN_GTT,
510 &psp->cmd_buf_bo, &psp->cmd_buf_mc_addr,
511 (void **)&psp->cmd_buf_mem);
512 if (ret)
513 goto failed2;
514
515 return 0;
516
517failed2:
518 amdgpu_bo_free_kernel(&psp->fence_buf_bo,
519 &psp->fence_buf_mc_addr, &psp->fence_buf);
520failed1:
521 amdgpu_bo_free_kernel(&psp->fw_pri_bo,
522 &psp->fw_pri_mc_addr, &psp->fw_pri_buf);
523 return ret;
524}
525
526static int psp_sw_fini(void *handle)
527{
528 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
529 struct psp_context *psp = &adev->psp;
530 struct psp_gfx_cmd_resp *cmd = psp->cmd;
531
532 psp_memory_training_fini(psp);
533
534 amdgpu_ucode_release(&psp->sos_fw);
535 amdgpu_ucode_release(&psp->asd_fw);
536 amdgpu_ucode_release(&psp->ta_fw);
537 amdgpu_ucode_release(&psp->cap_fw);
538 amdgpu_ucode_release(&psp->toc_fw);
539
540 kfree(cmd);
541 cmd = NULL;
542
543 psp_free_shared_bufs(psp);
544
545 if (psp->km_ring.ring_mem)
546 amdgpu_bo_free_kernel(&adev->firmware.rbuf,
547 &psp->km_ring.ring_mem_mc_addr,
548 (void **)&psp->km_ring.ring_mem);
549
550 amdgpu_bo_free_kernel(&psp->fw_pri_bo,
551 &psp->fw_pri_mc_addr, &psp->fw_pri_buf);
552 amdgpu_bo_free_kernel(&psp->fence_buf_bo,
553 &psp->fence_buf_mc_addr, &psp->fence_buf);
554 amdgpu_bo_free_kernel(&psp->cmd_buf_bo, &psp->cmd_buf_mc_addr,
555 (void **)&psp->cmd_buf_mem);
556
557 return 0;
558}
559
560int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
561 uint32_t reg_val, uint32_t mask, bool check_changed)
562{
563 uint32_t val;
564 int i;
565 struct amdgpu_device *adev = psp->adev;
566
567 if (psp->adev->no_hw_access)
568 return 0;
569
570 for (i = 0; i < adev->usec_timeout; i++) {
571 val = RREG32(reg_index);
572 if (check_changed) {
573 if (val != reg_val)
574 return 0;
575 } else {
576 if ((val & mask) == reg_val)
577 return 0;
578 }
579 udelay(1);
580 }
581
582 return -ETIME;
583}
584
585int psp_wait_for_spirom_update(struct psp_context *psp, uint32_t reg_index,
586 uint32_t reg_val, uint32_t mask, uint32_t msec_timeout)
587{
588 uint32_t val;
589 int i;
590 struct amdgpu_device *adev = psp->adev;
591
592 if (psp->adev->no_hw_access)
593 return 0;
594
595 for (i = 0; i < msec_timeout; i++) {
596 val = RREG32(reg_index);
597 if ((val & mask) == reg_val)
598 return 0;
599 msleep(1);
600 }
601
602 return -ETIME;
603}
604
605static const char *psp_gfx_cmd_name(enum psp_gfx_cmd_id cmd_id)
606{
607 switch (cmd_id) {
608 case GFX_CMD_ID_LOAD_TA:
609 return "LOAD_TA";
610 case GFX_CMD_ID_UNLOAD_TA:
611 return "UNLOAD_TA";
612 case GFX_CMD_ID_INVOKE_CMD:
613 return "INVOKE_CMD";
614 case GFX_CMD_ID_LOAD_ASD:
615 return "LOAD_ASD";
616 case GFX_CMD_ID_SETUP_TMR:
617 return "SETUP_TMR";
618 case GFX_CMD_ID_LOAD_IP_FW:
619 return "LOAD_IP_FW";
620 case GFX_CMD_ID_DESTROY_TMR:
621 return "DESTROY_TMR";
622 case GFX_CMD_ID_SAVE_RESTORE:
623 return "SAVE_RESTORE_IP_FW";
624 case GFX_CMD_ID_SETUP_VMR:
625 return "SETUP_VMR";
626 case GFX_CMD_ID_DESTROY_VMR:
627 return "DESTROY_VMR";
628 case GFX_CMD_ID_PROG_REG:
629 return "PROG_REG";
630 case GFX_CMD_ID_GET_FW_ATTESTATION:
631 return "GET_FW_ATTESTATION";
632 case GFX_CMD_ID_LOAD_TOC:
633 return "ID_LOAD_TOC";
634 case GFX_CMD_ID_AUTOLOAD_RLC:
635 return "AUTOLOAD_RLC";
636 case GFX_CMD_ID_BOOT_CFG:
637 return "BOOT_CFG";
638 default:
639 return "UNKNOWN CMD";
640 }
641}
642
643static int
644psp_cmd_submit_buf(struct psp_context *psp,
645 struct amdgpu_firmware_info *ucode,
646 struct psp_gfx_cmd_resp *cmd, uint64_t fence_mc_addr)
647{
648 int ret;
649 int index;
650 int timeout = psp->adev->psp_timeout;
651 bool ras_intr = false;
652 bool skip_unsupport = false;
653
654 if (psp->adev->no_hw_access)
655 return 0;
656
657 memset(psp->cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE);
658
659 memcpy(psp->cmd_buf_mem, cmd, sizeof(struct psp_gfx_cmd_resp));
660
661 index = atomic_inc_return(&psp->fence_value);
662 ret = psp_ring_cmd_submit(psp, psp->cmd_buf_mc_addr, fence_mc_addr, index);
663 if (ret) {
664 atomic_dec(&psp->fence_value);
665 goto exit;
666 }
667
668 amdgpu_device_invalidate_hdp(psp->adev, NULL);
669 while (*((unsigned int *)psp->fence_buf) != index) {
670 if (--timeout == 0)
671 break;
672 /*
673 * Shouldn't wait for timeout when err_event_athub occurs,
674 * because gpu reset thread triggered and lock resource should
675 * be released for psp resume sequence.
676 */
677 ras_intr = amdgpu_ras_intr_triggered();
678 if (ras_intr)
679 break;
680 usleep_range(10, 100);
681 amdgpu_device_invalidate_hdp(psp->adev, NULL);
682 }
683
684 /* We allow TEE_ERROR_NOT_SUPPORTED for VMR command and PSP_ERR_UNKNOWN_COMMAND in SRIOV */
685 skip_unsupport = (psp->cmd_buf_mem->resp.status == TEE_ERROR_NOT_SUPPORTED ||
686 psp->cmd_buf_mem->resp.status == PSP_ERR_UNKNOWN_COMMAND) && amdgpu_sriov_vf(psp->adev);
687
688 memcpy(&cmd->resp, &psp->cmd_buf_mem->resp, sizeof(struct psp_gfx_resp));
689
690 /* In some cases, psp response status is not 0 even there is no
691 * problem while the command is submitted. Some version of PSP FW
692 * doesn't write 0 to that field.
693 * So here we would like to only print a warning instead of an error
694 * during psp initialization to avoid breaking hw_init and it doesn't
695 * return -EINVAL.
696 */
697 if (!skip_unsupport && (psp->cmd_buf_mem->resp.status || !timeout) && !ras_intr) {
698 if (ucode)
699 dev_warn(psp->adev->dev,
700 "failed to load ucode %s(0x%X) ",
701 amdgpu_ucode_name(ucode->ucode_id), ucode->ucode_id);
702 dev_warn(psp->adev->dev,
703 "psp gfx command %s(0x%X) failed and response status is (0x%X)\n",
704 psp_gfx_cmd_name(psp->cmd_buf_mem->cmd_id), psp->cmd_buf_mem->cmd_id,
705 psp->cmd_buf_mem->resp.status);
706 /* If any firmware (including CAP) load fails under SRIOV, it should
707 * return failure to stop the VF from initializing.
708 * Also return failure in case of timeout
709 */
710 if ((ucode && amdgpu_sriov_vf(psp->adev)) || !timeout) {
711 ret = -EINVAL;
712 goto exit;
713 }
714 }
715
716 if (ucode) {
717 ucode->tmr_mc_addr_lo = psp->cmd_buf_mem->resp.fw_addr_lo;
718 ucode->tmr_mc_addr_hi = psp->cmd_buf_mem->resp.fw_addr_hi;
719 }
720
721exit:
722 return ret;
723}
724
725static struct psp_gfx_cmd_resp *acquire_psp_cmd_buf(struct psp_context *psp)
726{
727 struct psp_gfx_cmd_resp *cmd = psp->cmd;
728
729 mutex_lock(&psp->mutex);
730
731 memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp));
732
733 return cmd;
734}
735
736static void release_psp_cmd_buf(struct psp_context *psp)
737{
738 mutex_unlock(&psp->mutex);
739}
740
741static void psp_prep_tmr_cmd_buf(struct psp_context *psp,
742 struct psp_gfx_cmd_resp *cmd,
743 uint64_t tmr_mc, struct amdgpu_bo *tmr_bo)
744{
745 struct amdgpu_device *adev = psp->adev;
746 uint32_t size = 0;
747 uint64_t tmr_pa = 0;
748
749 if (tmr_bo) {
750 size = amdgpu_bo_size(tmr_bo);
751 tmr_pa = amdgpu_gmc_vram_pa(adev, tmr_bo);
752 }
753
754 if (amdgpu_sriov_vf(psp->adev))
755 cmd->cmd_id = GFX_CMD_ID_SETUP_VMR;
756 else
757 cmd->cmd_id = GFX_CMD_ID_SETUP_TMR;
758 cmd->cmd.cmd_setup_tmr.buf_phy_addr_lo = lower_32_bits(tmr_mc);
759 cmd->cmd.cmd_setup_tmr.buf_phy_addr_hi = upper_32_bits(tmr_mc);
760 cmd->cmd.cmd_setup_tmr.buf_size = size;
761 cmd->cmd.cmd_setup_tmr.bitfield.virt_phy_addr = 1;
762 cmd->cmd.cmd_setup_tmr.system_phy_addr_lo = lower_32_bits(tmr_pa);
763 cmd->cmd.cmd_setup_tmr.system_phy_addr_hi = upper_32_bits(tmr_pa);
764}
765
766static void psp_prep_load_toc_cmd_buf(struct psp_gfx_cmd_resp *cmd,
767 uint64_t pri_buf_mc, uint32_t size)
768{
769 cmd->cmd_id = GFX_CMD_ID_LOAD_TOC;
770 cmd->cmd.cmd_load_toc.toc_phy_addr_lo = lower_32_bits(pri_buf_mc);
771 cmd->cmd.cmd_load_toc.toc_phy_addr_hi = upper_32_bits(pri_buf_mc);
772 cmd->cmd.cmd_load_toc.toc_size = size;
773}
774
775/* Issue LOAD TOC cmd to PSP to part toc and calculate tmr size needed */
776static int psp_load_toc(struct psp_context *psp,
777 uint32_t *tmr_size)
778{
779 int ret;
780 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
781
782 /* Copy toc to psp firmware private buffer */
783 psp_copy_fw(psp, psp->toc.start_addr, psp->toc.size_bytes);
784
785 psp_prep_load_toc_cmd_buf(cmd, psp->fw_pri_mc_addr, psp->toc.size_bytes);
786
787 ret = psp_cmd_submit_buf(psp, NULL, cmd,
788 psp->fence_buf_mc_addr);
789 if (!ret)
790 *tmr_size = psp->cmd_buf_mem->resp.tmr_size;
791
792 release_psp_cmd_buf(psp);
793
794 return ret;
795}
796
797/* Set up Trusted Memory Region */
798static int psp_tmr_init(struct psp_context *psp)
799{
800 int ret = 0;
801 int tmr_size;
802 void *tmr_buf;
803 void **pptr;
804
805 /*
806 * According to HW engineer, they prefer the TMR address be "naturally
807 * aligned" , e.g. the start address be an integer divide of TMR size.
808 *
809 * Note: this memory need be reserved till the driver
810 * uninitializes.
811 */
812 tmr_size = PSP_TMR_SIZE(psp->adev);
813
814 /* For ASICs support RLC autoload, psp will parse the toc
815 * and calculate the total size of TMR needed
816 */
817 if (!amdgpu_sriov_vf(psp->adev) &&
818 psp->toc.start_addr &&
819 psp->toc.size_bytes &&
820 psp->fw_pri_buf) {
821 ret = psp_load_toc(psp, &tmr_size);
822 if (ret) {
823 dev_err(psp->adev->dev, "Failed to load toc\n");
824 return ret;
825 }
826 }
827
828 if (!psp->tmr_bo && !psp->boot_time_tmr) {
829 pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
830 ret = amdgpu_bo_create_kernel(psp->adev, tmr_size,
831 PSP_TMR_ALIGNMENT,
832 AMDGPU_HAS_VRAM(psp->adev) ?
833 AMDGPU_GEM_DOMAIN_VRAM :
834 AMDGPU_GEM_DOMAIN_GTT,
835 &psp->tmr_bo, &psp->tmr_mc_addr,
836 pptr);
837 }
838
839 return ret;
840}
841
842static bool psp_skip_tmr(struct psp_context *psp)
843{
844 switch (amdgpu_ip_version(psp->adev, MP0_HWIP, 0)) {
845 case IP_VERSION(11, 0, 9):
846 case IP_VERSION(11, 0, 7):
847 case IP_VERSION(13, 0, 2):
848 case IP_VERSION(13, 0, 6):
849 case IP_VERSION(13, 0, 10):
850 return true;
851 default:
852 return false;
853 }
854}
855
856static int psp_tmr_load(struct psp_context *psp)
857{
858 int ret;
859 struct psp_gfx_cmd_resp *cmd;
860
861 /* For Navi12 and CHIP_SIENNA_CICHLID SRIOV, do not set up TMR.
862 * Already set up by host driver.
863 */
864 if (amdgpu_sriov_vf(psp->adev) && psp_skip_tmr(psp))
865 return 0;
866
867 cmd = acquire_psp_cmd_buf(psp);
868
869 psp_prep_tmr_cmd_buf(psp, cmd, psp->tmr_mc_addr, psp->tmr_bo);
870 if (psp->tmr_bo)
871 dev_info(psp->adev->dev, "reserve 0x%lx from 0x%llx for PSP TMR\n",
872 amdgpu_bo_size(psp->tmr_bo), psp->tmr_mc_addr);
873
874 ret = psp_cmd_submit_buf(psp, NULL, cmd,
875 psp->fence_buf_mc_addr);
876
877 release_psp_cmd_buf(psp);
878
879 return ret;
880}
881
882static void psp_prep_tmr_unload_cmd_buf(struct psp_context *psp,
883 struct psp_gfx_cmd_resp *cmd)
884{
885 if (amdgpu_sriov_vf(psp->adev))
886 cmd->cmd_id = GFX_CMD_ID_DESTROY_VMR;
887 else
888 cmd->cmd_id = GFX_CMD_ID_DESTROY_TMR;
889}
890
891static int psp_tmr_unload(struct psp_context *psp)
892{
893 int ret;
894 struct psp_gfx_cmd_resp *cmd;
895
896 /* skip TMR unload for Navi12 and CHIP_SIENNA_CICHLID SRIOV,
897 * as TMR is not loaded at all
898 */
899 if (amdgpu_sriov_vf(psp->adev) && psp_skip_tmr(psp))
900 return 0;
901
902 cmd = acquire_psp_cmd_buf(psp);
903
904 psp_prep_tmr_unload_cmd_buf(psp, cmd);
905 dev_dbg(psp->adev->dev, "free PSP TMR buffer\n");
906
907 ret = psp_cmd_submit_buf(psp, NULL, cmd,
908 psp->fence_buf_mc_addr);
909
910 release_psp_cmd_buf(psp);
911
912 return ret;
913}
914
915static int psp_tmr_terminate(struct psp_context *psp)
916{
917 return psp_tmr_unload(psp);
918}
919
920int psp_get_fw_attestation_records_addr(struct psp_context *psp,
921 uint64_t *output_ptr)
922{
923 int ret;
924 struct psp_gfx_cmd_resp *cmd;
925
926 if (!output_ptr)
927 return -EINVAL;
928
929 if (amdgpu_sriov_vf(psp->adev))
930 return 0;
931
932 cmd = acquire_psp_cmd_buf(psp);
933
934 cmd->cmd_id = GFX_CMD_ID_GET_FW_ATTESTATION;
935
936 ret = psp_cmd_submit_buf(psp, NULL, cmd,
937 psp->fence_buf_mc_addr);
938
939 if (!ret) {
940 *output_ptr = ((uint64_t)cmd->resp.uresp.fwar_db_info.fwar_db_addr_lo) +
941 ((uint64_t)cmd->resp.uresp.fwar_db_info.fwar_db_addr_hi << 32);
942 }
943
944 release_psp_cmd_buf(psp);
945
946 return ret;
947}
948
949static int psp_boot_config_get(struct amdgpu_device *adev, uint32_t *boot_cfg)
950{
951 struct psp_context *psp = &adev->psp;
952 struct psp_gfx_cmd_resp *cmd;
953 int ret;
954
955 if (amdgpu_sriov_vf(adev))
956 return 0;
957
958 cmd = acquire_psp_cmd_buf(psp);
959
960 cmd->cmd_id = GFX_CMD_ID_BOOT_CFG;
961 cmd->cmd.boot_cfg.sub_cmd = BOOTCFG_CMD_GET;
962
963 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
964 if (!ret) {
965 *boot_cfg =
966 (cmd->resp.uresp.boot_cfg.boot_cfg & BOOT_CONFIG_GECC) ? 1 : 0;
967 }
968
969 release_psp_cmd_buf(psp);
970
971 return ret;
972}
973
974static int psp_boot_config_set(struct amdgpu_device *adev, uint32_t boot_cfg)
975{
976 int ret;
977 struct psp_context *psp = &adev->psp;
978 struct psp_gfx_cmd_resp *cmd;
979
980 if (amdgpu_sriov_vf(adev))
981 return 0;
982
983 cmd = acquire_psp_cmd_buf(psp);
984
985 cmd->cmd_id = GFX_CMD_ID_BOOT_CFG;
986 cmd->cmd.boot_cfg.sub_cmd = BOOTCFG_CMD_SET;
987 cmd->cmd.boot_cfg.boot_config = boot_cfg;
988 cmd->cmd.boot_cfg.boot_config_valid = boot_cfg;
989
990 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
991
992 release_psp_cmd_buf(psp);
993
994 return ret;
995}
996
997static int psp_rl_load(struct amdgpu_device *adev)
998{
999 int ret;
1000 struct psp_context *psp = &adev->psp;
1001 struct psp_gfx_cmd_resp *cmd;
1002
1003 if (!is_psp_fw_valid(psp->rl))
1004 return 0;
1005
1006 cmd = acquire_psp_cmd_buf(psp);
1007
1008 memset(psp->fw_pri_buf, 0, PSP_1_MEG);
1009 memcpy(psp->fw_pri_buf, psp->rl.start_addr, psp->rl.size_bytes);
1010
1011 cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW;
1012 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(psp->fw_pri_mc_addr);
1013 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(psp->fw_pri_mc_addr);
1014 cmd->cmd.cmd_load_ip_fw.fw_size = psp->rl.size_bytes;
1015 cmd->cmd.cmd_load_ip_fw.fw_type = GFX_FW_TYPE_REG_LIST;
1016
1017 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1018
1019 release_psp_cmd_buf(psp);
1020
1021 return ret;
1022}
1023
1024int psp_spatial_partition(struct psp_context *psp, int mode)
1025{
1026 struct psp_gfx_cmd_resp *cmd;
1027 int ret;
1028
1029 if (amdgpu_sriov_vf(psp->adev))
1030 return 0;
1031
1032 cmd = acquire_psp_cmd_buf(psp);
1033
1034 cmd->cmd_id = GFX_CMD_ID_SRIOV_SPATIAL_PART;
1035 cmd->cmd.cmd_spatial_part.mode = mode;
1036
1037 dev_info(psp->adev->dev, "Requesting %d partitions through PSP", mode);
1038 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1039
1040 release_psp_cmd_buf(psp);
1041
1042 return ret;
1043}
1044
1045static int psp_asd_initialize(struct psp_context *psp)
1046{
1047 int ret;
1048
1049 /* If PSP version doesn't match ASD version, asd loading will be failed.
1050 * add workaround to bypass it for sriov now.
1051 * TODO: add version check to make it common
1052 */
1053 if (amdgpu_sriov_vf(psp->adev) || !psp->asd_context.bin_desc.size_bytes)
1054 return 0;
1055
1056 psp->asd_context.mem_context.shared_mc_addr = 0;
1057 psp->asd_context.mem_context.shared_mem_size = PSP_ASD_SHARED_MEM_SIZE;
1058 psp->asd_context.ta_load_type = GFX_CMD_ID_LOAD_ASD;
1059
1060 ret = psp_ta_load(psp, &psp->asd_context);
1061 if (!ret)
1062 psp->asd_context.initialized = true;
1063
1064 return ret;
1065}
1066
1067static void psp_prep_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd,
1068 uint32_t session_id)
1069{
1070 cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA;
1071 cmd->cmd.cmd_unload_ta.session_id = session_id;
1072}
1073
1074int psp_ta_unload(struct psp_context *psp, struct ta_context *context)
1075{
1076 int ret;
1077 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
1078
1079 psp_prep_ta_unload_cmd_buf(cmd, context->session_id);
1080
1081 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1082
1083 context->resp_status = cmd->resp.status;
1084
1085 release_psp_cmd_buf(psp);
1086
1087 return ret;
1088}
1089
1090static int psp_asd_terminate(struct psp_context *psp)
1091{
1092 int ret;
1093
1094 if (amdgpu_sriov_vf(psp->adev))
1095 return 0;
1096
1097 if (!psp->asd_context.initialized)
1098 return 0;
1099
1100 ret = psp_ta_unload(psp, &psp->asd_context);
1101 if (!ret)
1102 psp->asd_context.initialized = false;
1103
1104 return ret;
1105}
1106
1107static void psp_prep_reg_prog_cmd_buf(struct psp_gfx_cmd_resp *cmd,
1108 uint32_t id, uint32_t value)
1109{
1110 cmd->cmd_id = GFX_CMD_ID_PROG_REG;
1111 cmd->cmd.cmd_setup_reg_prog.reg_value = value;
1112 cmd->cmd.cmd_setup_reg_prog.reg_id = id;
1113}
1114
1115int psp_reg_program(struct psp_context *psp, enum psp_reg_prog_id reg,
1116 uint32_t value)
1117{
1118 struct psp_gfx_cmd_resp *cmd;
1119 int ret = 0;
1120
1121 if (reg >= PSP_REG_LAST)
1122 return -EINVAL;
1123
1124 cmd = acquire_psp_cmd_buf(psp);
1125
1126 psp_prep_reg_prog_cmd_buf(cmd, reg, value);
1127 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1128 if (ret)
1129 dev_err(psp->adev->dev, "PSP failed to program reg id %d\n", reg);
1130
1131 release_psp_cmd_buf(psp);
1132
1133 return ret;
1134}
1135
1136static void psp_prep_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
1137 uint64_t ta_bin_mc,
1138 struct ta_context *context)
1139{
1140 cmd->cmd_id = context->ta_load_type;
1141 cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(ta_bin_mc);
1142 cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(ta_bin_mc);
1143 cmd->cmd.cmd_load_ta.app_len = context->bin_desc.size_bytes;
1144
1145 cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo =
1146 lower_32_bits(context->mem_context.shared_mc_addr);
1147 cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi =
1148 upper_32_bits(context->mem_context.shared_mc_addr);
1149 cmd->cmd.cmd_load_ta.cmd_buf_len = context->mem_context.shared_mem_size;
1150}
1151
1152int psp_ta_init_shared_buf(struct psp_context *psp,
1153 struct ta_mem_context *mem_ctx)
1154{
1155 /*
1156 * Allocate 16k memory aligned to 4k from Frame Buffer (local
1157 * physical) for ta to host memory
1158 */
1159 return amdgpu_bo_create_kernel(psp->adev, mem_ctx->shared_mem_size,
1160 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM |
1161 AMDGPU_GEM_DOMAIN_GTT,
1162 &mem_ctx->shared_bo,
1163 &mem_ctx->shared_mc_addr,
1164 &mem_ctx->shared_buf);
1165}
1166
1167static void psp_prep_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd,
1168 uint32_t ta_cmd_id,
1169 uint32_t session_id)
1170{
1171 cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD;
1172 cmd->cmd.cmd_invoke_cmd.session_id = session_id;
1173 cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id;
1174}
1175
1176int psp_ta_invoke(struct psp_context *psp,
1177 uint32_t ta_cmd_id,
1178 struct ta_context *context)
1179{
1180 int ret;
1181 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
1182
1183 psp_prep_ta_invoke_cmd_buf(cmd, ta_cmd_id, context->session_id);
1184
1185 ret = psp_cmd_submit_buf(psp, NULL, cmd,
1186 psp->fence_buf_mc_addr);
1187
1188 context->resp_status = cmd->resp.status;
1189
1190 release_psp_cmd_buf(psp);
1191
1192 return ret;
1193}
1194
1195int psp_ta_load(struct psp_context *psp, struct ta_context *context)
1196{
1197 int ret;
1198 struct psp_gfx_cmd_resp *cmd;
1199
1200 cmd = acquire_psp_cmd_buf(psp);
1201
1202 psp_copy_fw(psp, context->bin_desc.start_addr,
1203 context->bin_desc.size_bytes);
1204
1205 psp_prep_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr, context);
1206
1207 ret = psp_cmd_submit_buf(psp, NULL, cmd,
1208 psp->fence_buf_mc_addr);
1209
1210 context->resp_status = cmd->resp.status;
1211
1212 if (!ret)
1213 context->session_id = cmd->resp.session_id;
1214
1215 release_psp_cmd_buf(psp);
1216
1217 return ret;
1218}
1219
1220int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
1221{
1222 return psp_ta_invoke(psp, ta_cmd_id, &psp->xgmi_context.context);
1223}
1224
1225int psp_xgmi_terminate(struct psp_context *psp)
1226{
1227 int ret;
1228 struct amdgpu_device *adev = psp->adev;
1229
1230 /* XGMI TA unload currently is not supported on Arcturus/Aldebaran A+A */
1231 if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 4) ||
1232 (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2) &&
1233 adev->gmc.xgmi.connected_to_cpu))
1234 return 0;
1235
1236 if (!psp->xgmi_context.context.initialized)
1237 return 0;
1238
1239 ret = psp_ta_unload(psp, &psp->xgmi_context.context);
1240
1241 psp->xgmi_context.context.initialized = false;
1242
1243 return ret;
1244}
1245
1246int psp_xgmi_initialize(struct psp_context *psp, bool set_extended_data, bool load_ta)
1247{
1248 struct ta_xgmi_shared_memory *xgmi_cmd;
1249 int ret;
1250
1251 if (!psp->ta_fw ||
1252 !psp->xgmi_context.context.bin_desc.size_bytes ||
1253 !psp->xgmi_context.context.bin_desc.start_addr)
1254 return -ENOENT;
1255
1256 if (!load_ta)
1257 goto invoke;
1258
1259 psp->xgmi_context.context.mem_context.shared_mem_size = PSP_XGMI_SHARED_MEM_SIZE;
1260 psp->xgmi_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
1261
1262 if (!psp->xgmi_context.context.mem_context.shared_buf) {
1263 ret = psp_ta_init_shared_buf(psp, &psp->xgmi_context.context.mem_context);
1264 if (ret)
1265 return ret;
1266 }
1267
1268 /* Load XGMI TA */
1269 ret = psp_ta_load(psp, &psp->xgmi_context.context);
1270 if (!ret)
1271 psp->xgmi_context.context.initialized = true;
1272 else
1273 return ret;
1274
1275invoke:
1276 /* Initialize XGMI session */
1277 xgmi_cmd = (struct ta_xgmi_shared_memory *)(psp->xgmi_context.context.mem_context.shared_buf);
1278 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1279 xgmi_cmd->flag_extend_link_record = set_extended_data;
1280 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__INITIALIZE;
1281
1282 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
1283 /* note down the capbility flag for XGMI TA */
1284 psp->xgmi_context.xgmi_ta_caps = xgmi_cmd->caps_flag;
1285
1286 return ret;
1287}
1288
1289int psp_xgmi_get_hive_id(struct psp_context *psp, uint64_t *hive_id)
1290{
1291 struct ta_xgmi_shared_memory *xgmi_cmd;
1292 int ret;
1293
1294 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
1295 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1296
1297 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_HIVE_ID;
1298
1299 /* Invoke xgmi ta to get hive id */
1300 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
1301 if (ret)
1302 return ret;
1303
1304 *hive_id = xgmi_cmd->xgmi_out_message.get_hive_id.hive_id;
1305
1306 return 0;
1307}
1308
1309int psp_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id)
1310{
1311 struct ta_xgmi_shared_memory *xgmi_cmd;
1312 int ret;
1313
1314 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
1315 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1316
1317 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_NODE_ID;
1318
1319 /* Invoke xgmi ta to get the node id */
1320 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
1321 if (ret)
1322 return ret;
1323
1324 *node_id = xgmi_cmd->xgmi_out_message.get_node_id.node_id;
1325
1326 return 0;
1327}
1328
1329static bool psp_xgmi_peer_link_info_supported(struct psp_context *psp)
1330{
1331 return (amdgpu_ip_version(psp->adev, MP0_HWIP, 0) ==
1332 IP_VERSION(13, 0, 2) &&
1333 psp->xgmi_context.context.bin_desc.fw_version >= 0x2000000b) ||
1334 amdgpu_ip_version(psp->adev, MP0_HWIP, 0) >=
1335 IP_VERSION(13, 0, 6);
1336}
1337
1338/*
1339 * Chips that support extended topology information require the driver to
1340 * reflect topology information in the opposite direction. This is
1341 * because the TA has already exceeded its link record limit and if the
1342 * TA holds bi-directional information, the driver would have to do
1343 * multiple fetches instead of just two.
1344 */
1345static void psp_xgmi_reflect_topology_info(struct psp_context *psp,
1346 struct psp_xgmi_node_info node_info)
1347{
1348 struct amdgpu_device *mirror_adev;
1349 struct amdgpu_hive_info *hive;
1350 uint64_t src_node_id = psp->adev->gmc.xgmi.node_id;
1351 uint64_t dst_node_id = node_info.node_id;
1352 uint8_t dst_num_hops = node_info.num_hops;
1353 uint8_t dst_num_links = node_info.num_links;
1354
1355 hive = amdgpu_get_xgmi_hive(psp->adev);
1356 list_for_each_entry(mirror_adev, &hive->device_list, gmc.xgmi.head) {
1357 struct psp_xgmi_topology_info *mirror_top_info;
1358 int j;
1359
1360 if (mirror_adev->gmc.xgmi.node_id != dst_node_id)
1361 continue;
1362
1363 mirror_top_info = &mirror_adev->psp.xgmi_context.top_info;
1364 for (j = 0; j < mirror_top_info->num_nodes; j++) {
1365 if (mirror_top_info->nodes[j].node_id != src_node_id)
1366 continue;
1367
1368 mirror_top_info->nodes[j].num_hops = dst_num_hops;
1369 /*
1370 * prevent 0 num_links value re-reflection since reflection
1371 * criteria is based on num_hops (direct or indirect).
1372 *
1373 */
1374 if (dst_num_links)
1375 mirror_top_info->nodes[j].num_links = dst_num_links;
1376
1377 break;
1378 }
1379
1380 break;
1381 }
1382
1383 amdgpu_put_xgmi_hive(hive);
1384}
1385
1386int psp_xgmi_get_topology_info(struct psp_context *psp,
1387 int number_devices,
1388 struct psp_xgmi_topology_info *topology,
1389 bool get_extended_data)
1390{
1391 struct ta_xgmi_shared_memory *xgmi_cmd;
1392 struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
1393 struct ta_xgmi_cmd_get_topology_info_output *topology_info_output;
1394 int i;
1395 int ret;
1396
1397 if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
1398 return -EINVAL;
1399
1400 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
1401 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1402 xgmi_cmd->flag_extend_link_record = get_extended_data;
1403
1404 /* Fill in the shared memory with topology information as input */
1405 topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
1406 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_TOPOLOGY_INFO;
1407 topology_info_input->num_nodes = number_devices;
1408
1409 for (i = 0; i < topology_info_input->num_nodes; i++) {
1410 topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
1411 topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
1412 topology_info_input->nodes[i].is_sharing_enabled = topology->nodes[i].is_sharing_enabled;
1413 topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
1414 }
1415
1416 /* Invoke xgmi ta to get the topology information */
1417 ret = psp_xgmi_invoke(psp, TA_COMMAND_XGMI__GET_TOPOLOGY_INFO);
1418 if (ret)
1419 return ret;
1420
1421 /* Read the output topology information from the shared memory */
1422 topology_info_output = &xgmi_cmd->xgmi_out_message.get_topology_info;
1423 topology->num_nodes = xgmi_cmd->xgmi_out_message.get_topology_info.num_nodes;
1424 for (i = 0; i < topology->num_nodes; i++) {
1425 /* extended data will either be 0 or equal to non-extended data */
1426 if (topology_info_output->nodes[i].num_hops)
1427 topology->nodes[i].num_hops = topology_info_output->nodes[i].num_hops;
1428
1429 /* non-extended data gets everything here so no need to update */
1430 if (!get_extended_data) {
1431 topology->nodes[i].node_id = topology_info_output->nodes[i].node_id;
1432 topology->nodes[i].is_sharing_enabled =
1433 topology_info_output->nodes[i].is_sharing_enabled;
1434 topology->nodes[i].sdma_engine =
1435 topology_info_output->nodes[i].sdma_engine;
1436 }
1437
1438 }
1439
1440 /* Invoke xgmi ta again to get the link information */
1441 if (psp_xgmi_peer_link_info_supported(psp)) {
1442 struct ta_xgmi_cmd_get_peer_link_info *link_info_output;
1443 struct ta_xgmi_cmd_get_extend_peer_link_info *link_extend_info_output;
1444 bool requires_reflection =
1445 (psp->xgmi_context.supports_extended_data &&
1446 get_extended_data) ||
1447 amdgpu_ip_version(psp->adev, MP0_HWIP, 0) ==
1448 IP_VERSION(13, 0, 6);
1449 bool ta_port_num_support = amdgpu_sriov_vf(psp->adev) ? 0 :
1450 psp->xgmi_context.xgmi_ta_caps & EXTEND_PEER_LINK_INFO_CMD_FLAG;
1451
1452 /* popluate the shared output buffer rather than the cmd input buffer
1453 * with node_ids as the input for GET_PEER_LINKS command execution.
1454 * This is required for GET_PEER_LINKS per xgmi ta implementation.
1455 * The same requirement for GET_EXTEND_PEER_LINKS command.
1456 */
1457 if (ta_port_num_support) {
1458 link_extend_info_output = &xgmi_cmd->xgmi_out_message.get_extend_link_info;
1459
1460 for (i = 0; i < topology->num_nodes; i++)
1461 link_extend_info_output->nodes[i].node_id = topology->nodes[i].node_id;
1462
1463 link_extend_info_output->num_nodes = topology->num_nodes;
1464 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_EXTEND_PEER_LINKS;
1465 } else {
1466 link_info_output = &xgmi_cmd->xgmi_out_message.get_link_info;
1467
1468 for (i = 0; i < topology->num_nodes; i++)
1469 link_info_output->nodes[i].node_id = topology->nodes[i].node_id;
1470
1471 link_info_output->num_nodes = topology->num_nodes;
1472 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_PEER_LINKS;
1473 }
1474
1475 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
1476 if (ret)
1477 return ret;
1478
1479 for (i = 0; i < topology->num_nodes; i++) {
1480 uint8_t node_num_links = ta_port_num_support ?
1481 link_extend_info_output->nodes[i].num_links : link_info_output->nodes[i].num_links;
1482 /* accumulate num_links on extended data */
1483 if (get_extended_data) {
1484 topology->nodes[i].num_links = topology->nodes[i].num_links + node_num_links;
1485 } else {
1486 topology->nodes[i].num_links = (requires_reflection && topology->nodes[i].num_links) ?
1487 topology->nodes[i].num_links : node_num_links;
1488 }
1489 /* popluate the connected port num info if supported and available */
1490 if (ta_port_num_support && topology->nodes[i].num_links) {
1491 memcpy(topology->nodes[i].port_num, link_extend_info_output->nodes[i].port_num,
1492 sizeof(struct xgmi_connected_port_num) * TA_XGMI__MAX_PORT_NUM);
1493 }
1494
1495 /* reflect the topology information for bi-directionality */
1496 if (requires_reflection && topology->nodes[i].num_hops)
1497 psp_xgmi_reflect_topology_info(psp, topology->nodes[i]);
1498 }
1499 }
1500
1501 return 0;
1502}
1503
1504int psp_xgmi_set_topology_info(struct psp_context *psp,
1505 int number_devices,
1506 struct psp_xgmi_topology_info *topology)
1507{
1508 struct ta_xgmi_shared_memory *xgmi_cmd;
1509 struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
1510 int i;
1511
1512 if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
1513 return -EINVAL;
1514
1515 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
1516 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1517
1518 topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
1519 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__SET_TOPOLOGY_INFO;
1520 topology_info_input->num_nodes = number_devices;
1521
1522 for (i = 0; i < topology_info_input->num_nodes; i++) {
1523 topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
1524 topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
1525 topology_info_input->nodes[i].is_sharing_enabled = 1;
1526 topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
1527 }
1528
1529 /* Invoke xgmi ta to set topology information */
1530 return psp_xgmi_invoke(psp, TA_COMMAND_XGMI__SET_TOPOLOGY_INFO);
1531}
1532
1533// ras begin
1534static void psp_ras_ta_check_status(struct psp_context *psp)
1535{
1536 struct ta_ras_shared_memory *ras_cmd =
1537 (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1538
1539 switch (ras_cmd->ras_status) {
1540 case TA_RAS_STATUS__ERROR_UNSUPPORTED_IP:
1541 dev_warn(psp->adev->dev,
1542 "RAS WARNING: cmd failed due to unsupported ip\n");
1543 break;
1544 case TA_RAS_STATUS__ERROR_UNSUPPORTED_ERROR_INJ:
1545 dev_warn(psp->adev->dev,
1546 "RAS WARNING: cmd failed due to unsupported error injection\n");
1547 break;
1548 case TA_RAS_STATUS__SUCCESS:
1549 break;
1550 case TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED:
1551 if (ras_cmd->cmd_id == TA_RAS_COMMAND__TRIGGER_ERROR)
1552 dev_warn(psp->adev->dev,
1553 "RAS WARNING: Inject error to critical region is not allowed\n");
1554 break;
1555 default:
1556 dev_warn(psp->adev->dev,
1557 "RAS WARNING: ras status = 0x%X\n", ras_cmd->ras_status);
1558 break;
1559 }
1560}
1561
1562int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
1563{
1564 struct ta_ras_shared_memory *ras_cmd;
1565 int ret;
1566
1567 ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1568
1569 /*
1570 * TODO: bypass the loading in sriov for now
1571 */
1572 if (amdgpu_sriov_vf(psp->adev))
1573 return 0;
1574
1575 ret = psp_ta_invoke(psp, ta_cmd_id, &psp->ras_context.context);
1576
1577 if (amdgpu_ras_intr_triggered())
1578 return ret;
1579
1580 if (ras_cmd->if_version > RAS_TA_HOST_IF_VER) {
1581 dev_warn(psp->adev->dev, "RAS: Unsupported Interface\n");
1582 return -EINVAL;
1583 }
1584
1585 if (!ret) {
1586 if (ras_cmd->ras_out_message.flags.err_inject_switch_disable_flag) {
1587 dev_warn(psp->adev->dev, "ECC switch disabled\n");
1588
1589 ras_cmd->ras_status = TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE;
1590 } else if (ras_cmd->ras_out_message.flags.reg_access_failure_flag)
1591 dev_warn(psp->adev->dev,
1592 "RAS internal register access blocked\n");
1593
1594 psp_ras_ta_check_status(psp);
1595 }
1596
1597 return ret;
1598}
1599
1600int psp_ras_enable_features(struct psp_context *psp,
1601 union ta_ras_cmd_input *info, bool enable)
1602{
1603 struct ta_ras_shared_memory *ras_cmd;
1604 int ret;
1605
1606 if (!psp->ras_context.context.initialized)
1607 return -EINVAL;
1608
1609 ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1610 memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
1611
1612 if (enable)
1613 ras_cmd->cmd_id = TA_RAS_COMMAND__ENABLE_FEATURES;
1614 else
1615 ras_cmd->cmd_id = TA_RAS_COMMAND__DISABLE_FEATURES;
1616
1617 ras_cmd->ras_in_message = *info;
1618
1619 ret = psp_ras_invoke(psp, ras_cmd->cmd_id);
1620 if (ret)
1621 return -EINVAL;
1622
1623 return 0;
1624}
1625
1626int psp_ras_terminate(struct psp_context *psp)
1627{
1628 int ret;
1629
1630 /*
1631 * TODO: bypass the terminate in sriov for now
1632 */
1633 if (amdgpu_sriov_vf(psp->adev))
1634 return 0;
1635
1636 if (!psp->ras_context.context.initialized)
1637 return 0;
1638
1639 ret = psp_ta_unload(psp, &psp->ras_context.context);
1640
1641 psp->ras_context.context.initialized = false;
1642
1643 return ret;
1644}
1645
1646int psp_ras_initialize(struct psp_context *psp)
1647{
1648 int ret;
1649 uint32_t boot_cfg = 0xFF;
1650 struct amdgpu_device *adev = psp->adev;
1651 struct ta_ras_shared_memory *ras_cmd;
1652
1653 /*
1654 * TODO: bypass the initialize in sriov for now
1655 */
1656 if (amdgpu_sriov_vf(adev))
1657 return 0;
1658
1659 if (!adev->psp.ras_context.context.bin_desc.size_bytes ||
1660 !adev->psp.ras_context.context.bin_desc.start_addr) {
1661 dev_info(adev->dev, "RAS: optional ras ta ucode is not available\n");
1662 return 0;
1663 }
1664
1665 if (amdgpu_atomfirmware_dynamic_boot_config_supported(adev)) {
1666 /* query GECC enablement status from boot config
1667 * boot_cfg: 1: GECC is enabled or 0: GECC is disabled
1668 */
1669 ret = psp_boot_config_get(adev, &boot_cfg);
1670 if (ret)
1671 dev_warn(adev->dev, "PSP get boot config failed\n");
1672
1673 if (!amdgpu_ras_is_supported(psp->adev, AMDGPU_RAS_BLOCK__UMC)) {
1674 if (!boot_cfg) {
1675 dev_info(adev->dev, "GECC is disabled\n");
1676 } else {
1677 /* disable GECC in next boot cycle if ras is
1678 * disabled by module parameter amdgpu_ras_enable
1679 * and/or amdgpu_ras_mask, or boot_config_get call
1680 * is failed
1681 */
1682 ret = psp_boot_config_set(adev, 0);
1683 if (ret)
1684 dev_warn(adev->dev, "PSP set boot config failed\n");
1685 else
1686 dev_warn(adev->dev, "GECC will be disabled in next boot cycle if set amdgpu_ras_enable and/or amdgpu_ras_mask to 0x0\n");
1687 }
1688 } else {
1689 if (boot_cfg == 1) {
1690 dev_info(adev->dev, "GECC is enabled\n");
1691 } else {
1692 /* enable GECC in next boot cycle if it is disabled
1693 * in boot config, or force enable GECC if failed to
1694 * get boot configuration
1695 */
1696 ret = psp_boot_config_set(adev, BOOT_CONFIG_GECC);
1697 if (ret)
1698 dev_warn(adev->dev, "PSP set boot config failed\n");
1699 else
1700 dev_warn(adev->dev, "GECC will be enabled in next boot cycle\n");
1701 }
1702 }
1703 }
1704
1705 psp->ras_context.context.mem_context.shared_mem_size = PSP_RAS_SHARED_MEM_SIZE;
1706 psp->ras_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
1707
1708 if (!psp->ras_context.context.mem_context.shared_buf) {
1709 ret = psp_ta_init_shared_buf(psp, &psp->ras_context.context.mem_context);
1710 if (ret)
1711 return ret;
1712 }
1713
1714 ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1715 memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
1716
1717 if (amdgpu_ras_is_poison_mode_supported(adev))
1718 ras_cmd->ras_in_message.init_flags.poison_mode_en = 1;
1719 if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu)
1720 ras_cmd->ras_in_message.init_flags.dgpu_mode = 1;
1721 ras_cmd->ras_in_message.init_flags.xcc_mask =
1722 adev->gfx.xcc_mask;
1723 ras_cmd->ras_in_message.init_flags.channel_dis_num = hweight32(adev->gmc.m_half_use) * 2;
1724
1725 ret = psp_ta_load(psp, &psp->ras_context.context);
1726
1727 if (!ret && !ras_cmd->ras_status)
1728 psp->ras_context.context.initialized = true;
1729 else {
1730 if (ras_cmd->ras_status)
1731 dev_warn(adev->dev, "RAS Init Status: 0x%X\n", ras_cmd->ras_status);
1732
1733 /* fail to load RAS TA */
1734 psp->ras_context.context.initialized = false;
1735 }
1736
1737 return ret;
1738}
1739
1740int psp_ras_trigger_error(struct psp_context *psp,
1741 struct ta_ras_trigger_error_input *info, uint32_t instance_mask)
1742{
1743 struct ta_ras_shared_memory *ras_cmd;
1744 struct amdgpu_device *adev = psp->adev;
1745 int ret;
1746 uint32_t dev_mask;
1747
1748 if (!psp->ras_context.context.initialized)
1749 return -EINVAL;
1750
1751 switch (info->block_id) {
1752 case TA_RAS_BLOCK__GFX:
1753 dev_mask = GET_MASK(GC, instance_mask);
1754 break;
1755 case TA_RAS_BLOCK__SDMA:
1756 dev_mask = GET_MASK(SDMA0, instance_mask);
1757 break;
1758 case TA_RAS_BLOCK__VCN:
1759 case TA_RAS_BLOCK__JPEG:
1760 dev_mask = GET_MASK(VCN, instance_mask);
1761 break;
1762 default:
1763 dev_mask = instance_mask;
1764 break;
1765 }
1766
1767 /* reuse sub_block_index for backward compatibility */
1768 dev_mask <<= AMDGPU_RAS_INST_SHIFT;
1769 dev_mask &= AMDGPU_RAS_INST_MASK;
1770 info->sub_block_index |= dev_mask;
1771
1772 ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1773 memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
1774
1775 ras_cmd->cmd_id = TA_RAS_COMMAND__TRIGGER_ERROR;
1776 ras_cmd->ras_in_message.trigger_error = *info;
1777
1778 ret = psp_ras_invoke(psp, ras_cmd->cmd_id);
1779 if (ret)
1780 return -EINVAL;
1781
1782 /* If err_event_athub occurs error inject was successful, however
1783 * return status from TA is no long reliable
1784 */
1785 if (amdgpu_ras_intr_triggered())
1786 return 0;
1787
1788 if (ras_cmd->ras_status == TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED)
1789 return -EACCES;
1790 else if (ras_cmd->ras_status)
1791 return -EINVAL;
1792
1793 return 0;
1794}
1795
1796int psp_ras_query_address(struct psp_context *psp,
1797 struct ta_ras_query_address_input *addr_in,
1798 struct ta_ras_query_address_output *addr_out)
1799{
1800 struct ta_ras_shared_memory *ras_cmd;
1801 int ret;
1802
1803 if (!psp->ras_context.context.initialized)
1804 return -EINVAL;
1805
1806 ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1807 memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
1808
1809 ras_cmd->cmd_id = TA_RAS_COMMAND__QUERY_ADDRESS;
1810 ras_cmd->ras_in_message.address = *addr_in;
1811
1812 ret = psp_ras_invoke(psp, ras_cmd->cmd_id);
1813 if (ret || ras_cmd->ras_status || psp->cmd_buf_mem->resp.status)
1814 return -EINVAL;
1815
1816 *addr_out = ras_cmd->ras_out_message.address;
1817
1818 return 0;
1819}
1820// ras end
1821
1822// HDCP start
1823static int psp_hdcp_initialize(struct psp_context *psp)
1824{
1825 int ret;
1826
1827 /*
1828 * TODO: bypass the initialize in sriov for now
1829 */
1830 if (amdgpu_sriov_vf(psp->adev))
1831 return 0;
1832
1833 /* bypass hdcp initialization if dmu is harvested */
1834 if (!amdgpu_device_has_display_hardware(psp->adev))
1835 return 0;
1836
1837 if (!psp->hdcp_context.context.bin_desc.size_bytes ||
1838 !psp->hdcp_context.context.bin_desc.start_addr) {
1839 dev_info(psp->adev->dev, "HDCP: optional hdcp ta ucode is not available\n");
1840 return 0;
1841 }
1842
1843 psp->hdcp_context.context.mem_context.shared_mem_size = PSP_HDCP_SHARED_MEM_SIZE;
1844 psp->hdcp_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
1845
1846 if (!psp->hdcp_context.context.mem_context.shared_buf) {
1847 ret = psp_ta_init_shared_buf(psp, &psp->hdcp_context.context.mem_context);
1848 if (ret)
1849 return ret;
1850 }
1851
1852 ret = psp_ta_load(psp, &psp->hdcp_context.context);
1853 if (!ret) {
1854 psp->hdcp_context.context.initialized = true;
1855 mutex_init(&psp->hdcp_context.mutex);
1856 }
1857
1858 return ret;
1859}
1860
1861int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
1862{
1863 /*
1864 * TODO: bypass the loading in sriov for now
1865 */
1866 if (amdgpu_sriov_vf(psp->adev))
1867 return 0;
1868
1869 if (!psp->hdcp_context.context.initialized)
1870 return 0;
1871
1872 return psp_ta_invoke(psp, ta_cmd_id, &psp->hdcp_context.context);
1873}
1874
1875static int psp_hdcp_terminate(struct psp_context *psp)
1876{
1877 int ret;
1878
1879 /*
1880 * TODO: bypass the terminate in sriov for now
1881 */
1882 if (amdgpu_sriov_vf(psp->adev))
1883 return 0;
1884
1885 if (!psp->hdcp_context.context.initialized)
1886 return 0;
1887
1888 ret = psp_ta_unload(psp, &psp->hdcp_context.context);
1889
1890 psp->hdcp_context.context.initialized = false;
1891
1892 return ret;
1893}
1894// HDCP end
1895
1896// DTM start
1897static int psp_dtm_initialize(struct psp_context *psp)
1898{
1899 int ret;
1900
1901 /*
1902 * TODO: bypass the initialize in sriov for now
1903 */
1904 if (amdgpu_sriov_vf(psp->adev))
1905 return 0;
1906
1907 /* bypass dtm initialization if dmu is harvested */
1908 if (!amdgpu_device_has_display_hardware(psp->adev))
1909 return 0;
1910
1911 if (!psp->dtm_context.context.bin_desc.size_bytes ||
1912 !psp->dtm_context.context.bin_desc.start_addr) {
1913 dev_info(psp->adev->dev, "DTM: optional dtm ta ucode is not available\n");
1914 return 0;
1915 }
1916
1917 psp->dtm_context.context.mem_context.shared_mem_size = PSP_DTM_SHARED_MEM_SIZE;
1918 psp->dtm_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
1919
1920 if (!psp->dtm_context.context.mem_context.shared_buf) {
1921 ret = psp_ta_init_shared_buf(psp, &psp->dtm_context.context.mem_context);
1922 if (ret)
1923 return ret;
1924 }
1925
1926 ret = psp_ta_load(psp, &psp->dtm_context.context);
1927 if (!ret) {
1928 psp->dtm_context.context.initialized = true;
1929 mutex_init(&psp->dtm_context.mutex);
1930 }
1931
1932 return ret;
1933}
1934
1935int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
1936{
1937 /*
1938 * TODO: bypass the loading in sriov for now
1939 */
1940 if (amdgpu_sriov_vf(psp->adev))
1941 return 0;
1942
1943 if (!psp->dtm_context.context.initialized)
1944 return 0;
1945
1946 return psp_ta_invoke(psp, ta_cmd_id, &psp->dtm_context.context);
1947}
1948
1949static int psp_dtm_terminate(struct psp_context *psp)
1950{
1951 int ret;
1952
1953 /*
1954 * TODO: bypass the terminate in sriov for now
1955 */
1956 if (amdgpu_sriov_vf(psp->adev))
1957 return 0;
1958
1959 if (!psp->dtm_context.context.initialized)
1960 return 0;
1961
1962 ret = psp_ta_unload(psp, &psp->dtm_context.context);
1963
1964 psp->dtm_context.context.initialized = false;
1965
1966 return ret;
1967}
1968// DTM end
1969
1970// RAP start
1971static int psp_rap_initialize(struct psp_context *psp)
1972{
1973 int ret;
1974 enum ta_rap_status status = TA_RAP_STATUS__SUCCESS;
1975
1976 /*
1977 * TODO: bypass the initialize in sriov for now
1978 */
1979 if (amdgpu_sriov_vf(psp->adev))
1980 return 0;
1981
1982 if (!psp->rap_context.context.bin_desc.size_bytes ||
1983 !psp->rap_context.context.bin_desc.start_addr) {
1984 dev_info(psp->adev->dev, "RAP: optional rap ta ucode is not available\n");
1985 return 0;
1986 }
1987
1988 psp->rap_context.context.mem_context.shared_mem_size = PSP_RAP_SHARED_MEM_SIZE;
1989 psp->rap_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
1990
1991 if (!psp->rap_context.context.mem_context.shared_buf) {
1992 ret = psp_ta_init_shared_buf(psp, &psp->rap_context.context.mem_context);
1993 if (ret)
1994 return ret;
1995 }
1996
1997 ret = psp_ta_load(psp, &psp->rap_context.context);
1998 if (!ret) {
1999 psp->rap_context.context.initialized = true;
2000 mutex_init(&psp->rap_context.mutex);
2001 } else
2002 return ret;
2003
2004 ret = psp_rap_invoke(psp, TA_CMD_RAP__INITIALIZE, &status);
2005 if (ret || status != TA_RAP_STATUS__SUCCESS) {
2006 psp_rap_terminate(psp);
2007 /* free rap shared memory */
2008 psp_ta_free_shared_buf(&psp->rap_context.context.mem_context);
2009
2010 dev_warn(psp->adev->dev, "RAP TA initialize fail (%d) status %d.\n",
2011 ret, status);
2012
2013 return ret;
2014 }
2015
2016 return 0;
2017}
2018
2019static int psp_rap_terminate(struct psp_context *psp)
2020{
2021 int ret;
2022
2023 if (!psp->rap_context.context.initialized)
2024 return 0;
2025
2026 ret = psp_ta_unload(psp, &psp->rap_context.context);
2027
2028 psp->rap_context.context.initialized = false;
2029
2030 return ret;
2031}
2032
2033int psp_rap_invoke(struct psp_context *psp, uint32_t ta_cmd_id, enum ta_rap_status *status)
2034{
2035 struct ta_rap_shared_memory *rap_cmd;
2036 int ret = 0;
2037
2038 if (!psp->rap_context.context.initialized)
2039 return 0;
2040
2041 if (ta_cmd_id != TA_CMD_RAP__INITIALIZE &&
2042 ta_cmd_id != TA_CMD_RAP__VALIDATE_L0)
2043 return -EINVAL;
2044
2045 mutex_lock(&psp->rap_context.mutex);
2046
2047 rap_cmd = (struct ta_rap_shared_memory *)
2048 psp->rap_context.context.mem_context.shared_buf;
2049 memset(rap_cmd, 0, sizeof(struct ta_rap_shared_memory));
2050
2051 rap_cmd->cmd_id = ta_cmd_id;
2052 rap_cmd->validation_method_id = METHOD_A;
2053
2054 ret = psp_ta_invoke(psp, rap_cmd->cmd_id, &psp->rap_context.context);
2055 if (ret)
2056 goto out_unlock;
2057
2058 if (status)
2059 *status = rap_cmd->rap_status;
2060
2061out_unlock:
2062 mutex_unlock(&psp->rap_context.mutex);
2063
2064 return ret;
2065}
2066// RAP end
2067
2068/* securedisplay start */
2069static int psp_securedisplay_initialize(struct psp_context *psp)
2070{
2071 int ret;
2072 struct ta_securedisplay_cmd *securedisplay_cmd;
2073
2074 /*
2075 * TODO: bypass the initialize in sriov for now
2076 */
2077 if (amdgpu_sriov_vf(psp->adev))
2078 return 0;
2079
2080 /* bypass securedisplay initialization if dmu is harvested */
2081 if (!amdgpu_device_has_display_hardware(psp->adev))
2082 return 0;
2083
2084 if (!psp->securedisplay_context.context.bin_desc.size_bytes ||
2085 !psp->securedisplay_context.context.bin_desc.start_addr) {
2086 dev_info(psp->adev->dev, "SECUREDISPLAY: securedisplay ta ucode is not available\n");
2087 return 0;
2088 }
2089
2090 psp->securedisplay_context.context.mem_context.shared_mem_size =
2091 PSP_SECUREDISPLAY_SHARED_MEM_SIZE;
2092 psp->securedisplay_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
2093
2094 if (!psp->securedisplay_context.context.initialized) {
2095 ret = psp_ta_init_shared_buf(psp,
2096 &psp->securedisplay_context.context.mem_context);
2097 if (ret)
2098 return ret;
2099 }
2100
2101 ret = psp_ta_load(psp, &psp->securedisplay_context.context);
2102 if (!ret) {
2103 psp->securedisplay_context.context.initialized = true;
2104 mutex_init(&psp->securedisplay_context.mutex);
2105 } else
2106 return ret;
2107
2108 mutex_lock(&psp->securedisplay_context.mutex);
2109
2110 psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd,
2111 TA_SECUREDISPLAY_COMMAND__QUERY_TA);
2112
2113 ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__QUERY_TA);
2114
2115 mutex_unlock(&psp->securedisplay_context.mutex);
2116
2117 if (ret) {
2118 psp_securedisplay_terminate(psp);
2119 /* free securedisplay shared memory */
2120 psp_ta_free_shared_buf(&psp->securedisplay_context.context.mem_context);
2121 dev_err(psp->adev->dev, "SECUREDISPLAY TA initialize fail.\n");
2122 return -EINVAL;
2123 }
2124
2125 if (securedisplay_cmd->status != TA_SECUREDISPLAY_STATUS__SUCCESS) {
2126 psp_securedisplay_parse_resp_status(psp, securedisplay_cmd->status);
2127 dev_err(psp->adev->dev, "SECUREDISPLAY: query securedisplay TA failed. ret 0x%x\n",
2128 securedisplay_cmd->securedisplay_out_message.query_ta.query_cmd_ret);
2129 /* don't try again */
2130 psp->securedisplay_context.context.bin_desc.size_bytes = 0;
2131 }
2132
2133 return 0;
2134}
2135
2136static int psp_securedisplay_terminate(struct psp_context *psp)
2137{
2138 int ret;
2139
2140 /*
2141 * TODO:bypass the terminate in sriov for now
2142 */
2143 if (amdgpu_sriov_vf(psp->adev))
2144 return 0;
2145
2146 if (!psp->securedisplay_context.context.initialized)
2147 return 0;
2148
2149 ret = psp_ta_unload(psp, &psp->securedisplay_context.context);
2150
2151 psp->securedisplay_context.context.initialized = false;
2152
2153 return ret;
2154}
2155
2156int psp_securedisplay_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
2157{
2158 int ret;
2159
2160 if (!psp->securedisplay_context.context.initialized)
2161 return -EINVAL;
2162
2163 if (ta_cmd_id != TA_SECUREDISPLAY_COMMAND__QUERY_TA &&
2164 ta_cmd_id != TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC)
2165 return -EINVAL;
2166
2167 ret = psp_ta_invoke(psp, ta_cmd_id, &psp->securedisplay_context.context);
2168
2169 return ret;
2170}
2171/* SECUREDISPLAY end */
2172
2173int amdgpu_psp_wait_for_bootloader(struct amdgpu_device *adev)
2174{
2175 struct psp_context *psp = &adev->psp;
2176 int ret = 0;
2177
2178 if (!amdgpu_sriov_vf(adev) && psp->funcs && psp->funcs->wait_for_bootloader != NULL)
2179 ret = psp->funcs->wait_for_bootloader(psp);
2180
2181 return ret;
2182}
2183
2184bool amdgpu_psp_get_ras_capability(struct psp_context *psp)
2185{
2186 if (psp->funcs &&
2187 psp->funcs->get_ras_capability) {
2188 return psp->funcs->get_ras_capability(psp);
2189 } else {
2190 return false;
2191 }
2192}
2193
2194static int psp_hw_start(struct psp_context *psp)
2195{
2196 struct amdgpu_device *adev = psp->adev;
2197 int ret;
2198
2199 if (!amdgpu_sriov_vf(adev)) {
2200 if ((is_psp_fw_valid(psp->kdb)) &&
2201 (psp->funcs->bootloader_load_kdb != NULL)) {
2202 ret = psp_bootloader_load_kdb(psp);
2203 if (ret) {
2204 dev_err(adev->dev, "PSP load kdb failed!\n");
2205 return ret;
2206 }
2207 }
2208
2209 if ((is_psp_fw_valid(psp->spl)) &&
2210 (psp->funcs->bootloader_load_spl != NULL)) {
2211 ret = psp_bootloader_load_spl(psp);
2212 if (ret) {
2213 dev_err(adev->dev, "PSP load spl failed!\n");
2214 return ret;
2215 }
2216 }
2217
2218 if ((is_psp_fw_valid(psp->sys)) &&
2219 (psp->funcs->bootloader_load_sysdrv != NULL)) {
2220 ret = psp_bootloader_load_sysdrv(psp);
2221 if (ret) {
2222 dev_err(adev->dev, "PSP load sys drv failed!\n");
2223 return ret;
2224 }
2225 }
2226
2227 if ((is_psp_fw_valid(psp->soc_drv)) &&
2228 (psp->funcs->bootloader_load_soc_drv != NULL)) {
2229 ret = psp_bootloader_load_soc_drv(psp);
2230 if (ret) {
2231 dev_err(adev->dev, "PSP load soc drv failed!\n");
2232 return ret;
2233 }
2234 }
2235
2236 if ((is_psp_fw_valid(psp->intf_drv)) &&
2237 (psp->funcs->bootloader_load_intf_drv != NULL)) {
2238 ret = psp_bootloader_load_intf_drv(psp);
2239 if (ret) {
2240 dev_err(adev->dev, "PSP load intf drv failed!\n");
2241 return ret;
2242 }
2243 }
2244
2245 if ((is_psp_fw_valid(psp->dbg_drv)) &&
2246 (psp->funcs->bootloader_load_dbg_drv != NULL)) {
2247 ret = psp_bootloader_load_dbg_drv(psp);
2248 if (ret) {
2249 dev_err(adev->dev, "PSP load dbg drv failed!\n");
2250 return ret;
2251 }
2252 }
2253
2254 if ((is_psp_fw_valid(psp->ras_drv)) &&
2255 (psp->funcs->bootloader_load_ras_drv != NULL)) {
2256 ret = psp_bootloader_load_ras_drv(psp);
2257 if (ret) {
2258 dev_err(adev->dev, "PSP load ras_drv failed!\n");
2259 return ret;
2260 }
2261 }
2262
2263 if ((is_psp_fw_valid(psp->sos)) &&
2264 (psp->funcs->bootloader_load_sos != NULL)) {
2265 ret = psp_bootloader_load_sos(psp);
2266 if (ret) {
2267 dev_err(adev->dev, "PSP load sos failed!\n");
2268 return ret;
2269 }
2270 }
2271 }
2272
2273 ret = psp_ring_create(psp, PSP_RING_TYPE__KM);
2274 if (ret) {
2275 dev_err(adev->dev, "PSP create ring failed!\n");
2276 return ret;
2277 }
2278
2279 if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev))
2280 goto skip_pin_bo;
2281
2282 if (!psp->boot_time_tmr || psp->autoload_supported) {
2283 ret = psp_tmr_init(psp);
2284 if (ret) {
2285 dev_err(adev->dev, "PSP tmr init failed!\n");
2286 return ret;
2287 }
2288 }
2289
2290skip_pin_bo:
2291 /*
2292 * For ASICs with DF Cstate management centralized
2293 * to PMFW, TMR setup should be performed after PMFW
2294 * loaded and before other non-psp firmware loaded.
2295 */
2296 if (psp->pmfw_centralized_cstate_management) {
2297 ret = psp_load_smu_fw(psp);
2298 if (ret)
2299 return ret;
2300 }
2301
2302 if (!psp->boot_time_tmr || !psp->autoload_supported) {
2303 ret = psp_tmr_load(psp);
2304 if (ret) {
2305 dev_err(adev->dev, "PSP load tmr failed!\n");
2306 return ret;
2307 }
2308 }
2309
2310 return 0;
2311}
2312
2313static int psp_get_fw_type(struct amdgpu_firmware_info *ucode,
2314 enum psp_gfx_fw_type *type)
2315{
2316 switch (ucode->ucode_id) {
2317 case AMDGPU_UCODE_ID_CAP:
2318 *type = GFX_FW_TYPE_CAP;
2319 break;
2320 case AMDGPU_UCODE_ID_SDMA0:
2321 *type = GFX_FW_TYPE_SDMA0;
2322 break;
2323 case AMDGPU_UCODE_ID_SDMA1:
2324 *type = GFX_FW_TYPE_SDMA1;
2325 break;
2326 case AMDGPU_UCODE_ID_SDMA2:
2327 *type = GFX_FW_TYPE_SDMA2;
2328 break;
2329 case AMDGPU_UCODE_ID_SDMA3:
2330 *type = GFX_FW_TYPE_SDMA3;
2331 break;
2332 case AMDGPU_UCODE_ID_SDMA4:
2333 *type = GFX_FW_TYPE_SDMA4;
2334 break;
2335 case AMDGPU_UCODE_ID_SDMA5:
2336 *type = GFX_FW_TYPE_SDMA5;
2337 break;
2338 case AMDGPU_UCODE_ID_SDMA6:
2339 *type = GFX_FW_TYPE_SDMA6;
2340 break;
2341 case AMDGPU_UCODE_ID_SDMA7:
2342 *type = GFX_FW_TYPE_SDMA7;
2343 break;
2344 case AMDGPU_UCODE_ID_CP_MES:
2345 *type = GFX_FW_TYPE_CP_MES;
2346 break;
2347 case AMDGPU_UCODE_ID_CP_MES_DATA:
2348 *type = GFX_FW_TYPE_MES_STACK;
2349 break;
2350 case AMDGPU_UCODE_ID_CP_MES1:
2351 *type = GFX_FW_TYPE_CP_MES_KIQ;
2352 break;
2353 case AMDGPU_UCODE_ID_CP_MES1_DATA:
2354 *type = GFX_FW_TYPE_MES_KIQ_STACK;
2355 break;
2356 case AMDGPU_UCODE_ID_CP_CE:
2357 *type = GFX_FW_TYPE_CP_CE;
2358 break;
2359 case AMDGPU_UCODE_ID_CP_PFP:
2360 *type = GFX_FW_TYPE_CP_PFP;
2361 break;
2362 case AMDGPU_UCODE_ID_CP_ME:
2363 *type = GFX_FW_TYPE_CP_ME;
2364 break;
2365 case AMDGPU_UCODE_ID_CP_MEC1:
2366 *type = GFX_FW_TYPE_CP_MEC;
2367 break;
2368 case AMDGPU_UCODE_ID_CP_MEC1_JT:
2369 *type = GFX_FW_TYPE_CP_MEC_ME1;
2370 break;
2371 case AMDGPU_UCODE_ID_CP_MEC2:
2372 *type = GFX_FW_TYPE_CP_MEC;
2373 break;
2374 case AMDGPU_UCODE_ID_CP_MEC2_JT:
2375 *type = GFX_FW_TYPE_CP_MEC_ME2;
2376 break;
2377 case AMDGPU_UCODE_ID_RLC_P:
2378 *type = GFX_FW_TYPE_RLC_P;
2379 break;
2380 case AMDGPU_UCODE_ID_RLC_V:
2381 *type = GFX_FW_TYPE_RLC_V;
2382 break;
2383 case AMDGPU_UCODE_ID_RLC_G:
2384 *type = GFX_FW_TYPE_RLC_G;
2385 break;
2386 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL:
2387 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_CNTL;
2388 break;
2389 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM:
2390 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_GPM_MEM;
2391 break;
2392 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM:
2393 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_MEM;
2394 break;
2395 case AMDGPU_UCODE_ID_RLC_IRAM:
2396 *type = GFX_FW_TYPE_RLC_IRAM;
2397 break;
2398 case AMDGPU_UCODE_ID_RLC_DRAM:
2399 *type = GFX_FW_TYPE_RLC_DRAM_BOOT;
2400 break;
2401 case AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS:
2402 *type = GFX_FW_TYPE_GLOBAL_TAP_DELAYS;
2403 break;
2404 case AMDGPU_UCODE_ID_SE0_TAP_DELAYS:
2405 *type = GFX_FW_TYPE_SE0_TAP_DELAYS;
2406 break;
2407 case AMDGPU_UCODE_ID_SE1_TAP_DELAYS:
2408 *type = GFX_FW_TYPE_SE1_TAP_DELAYS;
2409 break;
2410 case AMDGPU_UCODE_ID_SE2_TAP_DELAYS:
2411 *type = GFX_FW_TYPE_SE2_TAP_DELAYS;
2412 break;
2413 case AMDGPU_UCODE_ID_SE3_TAP_DELAYS:
2414 *type = GFX_FW_TYPE_SE3_TAP_DELAYS;
2415 break;
2416 case AMDGPU_UCODE_ID_SMC:
2417 *type = GFX_FW_TYPE_SMU;
2418 break;
2419 case AMDGPU_UCODE_ID_PPTABLE:
2420 *type = GFX_FW_TYPE_PPTABLE;
2421 break;
2422 case AMDGPU_UCODE_ID_UVD:
2423 *type = GFX_FW_TYPE_UVD;
2424 break;
2425 case AMDGPU_UCODE_ID_UVD1:
2426 *type = GFX_FW_TYPE_UVD1;
2427 break;
2428 case AMDGPU_UCODE_ID_VCE:
2429 *type = GFX_FW_TYPE_VCE;
2430 break;
2431 case AMDGPU_UCODE_ID_VCN:
2432 *type = GFX_FW_TYPE_VCN;
2433 break;
2434 case AMDGPU_UCODE_ID_VCN1:
2435 *type = GFX_FW_TYPE_VCN1;
2436 break;
2437 case AMDGPU_UCODE_ID_DMCU_ERAM:
2438 *type = GFX_FW_TYPE_DMCU_ERAM;
2439 break;
2440 case AMDGPU_UCODE_ID_DMCU_INTV:
2441 *type = GFX_FW_TYPE_DMCU_ISR;
2442 break;
2443 case AMDGPU_UCODE_ID_VCN0_RAM:
2444 *type = GFX_FW_TYPE_VCN0_RAM;
2445 break;
2446 case AMDGPU_UCODE_ID_VCN1_RAM:
2447 *type = GFX_FW_TYPE_VCN1_RAM;
2448 break;
2449 case AMDGPU_UCODE_ID_DMCUB:
2450 *type = GFX_FW_TYPE_DMUB;
2451 break;
2452 case AMDGPU_UCODE_ID_SDMA_UCODE_TH0:
2453 *type = GFX_FW_TYPE_SDMA_UCODE_TH0;
2454 break;
2455 case AMDGPU_UCODE_ID_SDMA_UCODE_TH1:
2456 *type = GFX_FW_TYPE_SDMA_UCODE_TH1;
2457 break;
2458 case AMDGPU_UCODE_ID_IMU_I:
2459 *type = GFX_FW_TYPE_IMU_I;
2460 break;
2461 case AMDGPU_UCODE_ID_IMU_D:
2462 *type = GFX_FW_TYPE_IMU_D;
2463 break;
2464 case AMDGPU_UCODE_ID_CP_RS64_PFP:
2465 *type = GFX_FW_TYPE_RS64_PFP;
2466 break;
2467 case AMDGPU_UCODE_ID_CP_RS64_ME:
2468 *type = GFX_FW_TYPE_RS64_ME;
2469 break;
2470 case AMDGPU_UCODE_ID_CP_RS64_MEC:
2471 *type = GFX_FW_TYPE_RS64_MEC;
2472 break;
2473 case AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK:
2474 *type = GFX_FW_TYPE_RS64_PFP_P0_STACK;
2475 break;
2476 case AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK:
2477 *type = GFX_FW_TYPE_RS64_PFP_P1_STACK;
2478 break;
2479 case AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK:
2480 *type = GFX_FW_TYPE_RS64_ME_P0_STACK;
2481 break;
2482 case AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK:
2483 *type = GFX_FW_TYPE_RS64_ME_P1_STACK;
2484 break;
2485 case AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK:
2486 *type = GFX_FW_TYPE_RS64_MEC_P0_STACK;
2487 break;
2488 case AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK:
2489 *type = GFX_FW_TYPE_RS64_MEC_P1_STACK;
2490 break;
2491 case AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK:
2492 *type = GFX_FW_TYPE_RS64_MEC_P2_STACK;
2493 break;
2494 case AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK:
2495 *type = GFX_FW_TYPE_RS64_MEC_P3_STACK;
2496 break;
2497 case AMDGPU_UCODE_ID_VPE_CTX:
2498 *type = GFX_FW_TYPE_VPEC_FW1;
2499 break;
2500 case AMDGPU_UCODE_ID_VPE_CTL:
2501 *type = GFX_FW_TYPE_VPEC_FW2;
2502 break;
2503 case AMDGPU_UCODE_ID_VPE:
2504 *type = GFX_FW_TYPE_VPE;
2505 break;
2506 case AMDGPU_UCODE_ID_UMSCH_MM_UCODE:
2507 *type = GFX_FW_TYPE_UMSCH_UCODE;
2508 break;
2509 case AMDGPU_UCODE_ID_UMSCH_MM_DATA:
2510 *type = GFX_FW_TYPE_UMSCH_DATA;
2511 break;
2512 case AMDGPU_UCODE_ID_UMSCH_MM_CMD_BUFFER:
2513 *type = GFX_FW_TYPE_UMSCH_CMD_BUFFER;
2514 break;
2515 case AMDGPU_UCODE_ID_P2S_TABLE:
2516 *type = GFX_FW_TYPE_P2S_TABLE;
2517 break;
2518 case AMDGPU_UCODE_ID_JPEG_RAM:
2519 *type = GFX_FW_TYPE_JPEG_RAM;
2520 break;
2521 case AMDGPU_UCODE_ID_MAXIMUM:
2522 default:
2523 return -EINVAL;
2524 }
2525
2526 return 0;
2527}
2528
2529static void psp_print_fw_hdr(struct psp_context *psp,
2530 struct amdgpu_firmware_info *ucode)
2531{
2532 struct amdgpu_device *adev = psp->adev;
2533 struct common_firmware_header *hdr;
2534
2535 switch (ucode->ucode_id) {
2536 case AMDGPU_UCODE_ID_SDMA0:
2537 case AMDGPU_UCODE_ID_SDMA1:
2538 case AMDGPU_UCODE_ID_SDMA2:
2539 case AMDGPU_UCODE_ID_SDMA3:
2540 case AMDGPU_UCODE_ID_SDMA4:
2541 case AMDGPU_UCODE_ID_SDMA5:
2542 case AMDGPU_UCODE_ID_SDMA6:
2543 case AMDGPU_UCODE_ID_SDMA7:
2544 hdr = (struct common_firmware_header *)
2545 adev->sdma.instance[ucode->ucode_id - AMDGPU_UCODE_ID_SDMA0].fw->data;
2546 amdgpu_ucode_print_sdma_hdr(hdr);
2547 break;
2548 case AMDGPU_UCODE_ID_CP_CE:
2549 hdr = (struct common_firmware_header *)adev->gfx.ce_fw->data;
2550 amdgpu_ucode_print_gfx_hdr(hdr);
2551 break;
2552 case AMDGPU_UCODE_ID_CP_PFP:
2553 hdr = (struct common_firmware_header *)adev->gfx.pfp_fw->data;
2554 amdgpu_ucode_print_gfx_hdr(hdr);
2555 break;
2556 case AMDGPU_UCODE_ID_CP_ME:
2557 hdr = (struct common_firmware_header *)adev->gfx.me_fw->data;
2558 amdgpu_ucode_print_gfx_hdr(hdr);
2559 break;
2560 case AMDGPU_UCODE_ID_CP_MEC1:
2561 hdr = (struct common_firmware_header *)adev->gfx.mec_fw->data;
2562 amdgpu_ucode_print_gfx_hdr(hdr);
2563 break;
2564 case AMDGPU_UCODE_ID_RLC_G:
2565 hdr = (struct common_firmware_header *)adev->gfx.rlc_fw->data;
2566 amdgpu_ucode_print_rlc_hdr(hdr);
2567 break;
2568 case AMDGPU_UCODE_ID_SMC:
2569 hdr = (struct common_firmware_header *)adev->pm.fw->data;
2570 amdgpu_ucode_print_smc_hdr(hdr);
2571 break;
2572 default:
2573 break;
2574 }
2575}
2576
2577static int psp_prep_load_ip_fw_cmd_buf(struct psp_context *psp,
2578 struct amdgpu_firmware_info *ucode,
2579 struct psp_gfx_cmd_resp *cmd)
2580{
2581 int ret;
2582 uint64_t fw_mem_mc_addr = ucode->mc_addr;
2583
2584 cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW;
2585 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(fw_mem_mc_addr);
2586 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(fw_mem_mc_addr);
2587 cmd->cmd.cmd_load_ip_fw.fw_size = ucode->ucode_size;
2588
2589 ret = psp_get_fw_type(ucode, &cmd->cmd.cmd_load_ip_fw.fw_type);
2590 if (ret)
2591 dev_err(psp->adev->dev, "Unknown firmware type\n");
2592
2593 return ret;
2594}
2595
2596int psp_execute_ip_fw_load(struct psp_context *psp,
2597 struct amdgpu_firmware_info *ucode)
2598{
2599 int ret = 0;
2600 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
2601
2602 ret = psp_prep_load_ip_fw_cmd_buf(psp, ucode, cmd);
2603 if (!ret) {
2604 ret = psp_cmd_submit_buf(psp, ucode, cmd,
2605 psp->fence_buf_mc_addr);
2606 }
2607
2608 release_psp_cmd_buf(psp);
2609
2610 return ret;
2611}
2612
2613static int psp_load_p2s_table(struct psp_context *psp)
2614{
2615 int ret;
2616 struct amdgpu_device *adev = psp->adev;
2617 struct amdgpu_firmware_info *ucode =
2618 &adev->firmware.ucode[AMDGPU_UCODE_ID_P2S_TABLE];
2619
2620 if (adev->in_runpm && (adev->pm.rpm_mode == AMDGPU_RUNPM_BACO))
2621 return 0;
2622
2623 if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6)) {
2624 uint32_t supp_vers = adev->flags & AMD_IS_APU ? 0x0036013D :
2625 0x0036003C;
2626 if (psp->sos.fw_version < supp_vers)
2627 return 0;
2628 }
2629
2630 if (!ucode->fw || amdgpu_sriov_vf(psp->adev))
2631 return 0;
2632
2633 ret = psp_execute_ip_fw_load(psp, ucode);
2634
2635 return ret;
2636}
2637
2638static int psp_load_smu_fw(struct psp_context *psp)
2639{
2640 int ret;
2641 struct amdgpu_device *adev = psp->adev;
2642 struct amdgpu_firmware_info *ucode =
2643 &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC];
2644 struct amdgpu_ras *ras = psp->ras_context.ras;
2645
2646 /*
2647 * Skip SMU FW reloading in case of using BACO for runpm only,
2648 * as SMU is always alive.
2649 */
2650 if (adev->in_runpm && (adev->pm.rpm_mode == AMDGPU_RUNPM_BACO))
2651 return 0;
2652
2653 if (!ucode->fw || amdgpu_sriov_vf(psp->adev))
2654 return 0;
2655
2656 if ((amdgpu_in_reset(adev) && ras && adev->ras_enabled &&
2657 (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 4) ||
2658 amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 2)))) {
2659 ret = amdgpu_dpm_set_mp1_state(adev, PP_MP1_STATE_UNLOAD);
2660 if (ret)
2661 dev_err(adev->dev, "Failed to set MP1 state prepare for reload\n");
2662 }
2663
2664 ret = psp_execute_ip_fw_load(psp, ucode);
2665
2666 if (ret)
2667 dev_err(adev->dev, "PSP load smu failed!\n");
2668
2669 return ret;
2670}
2671
2672static bool fw_load_skip_check(struct psp_context *psp,
2673 struct amdgpu_firmware_info *ucode)
2674{
2675 if (!ucode->fw || !ucode->ucode_size)
2676 return true;
2677
2678 if (ucode->ucode_id == AMDGPU_UCODE_ID_P2S_TABLE)
2679 return true;
2680
2681 if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC &&
2682 (psp_smu_reload_quirk(psp) ||
2683 psp->autoload_supported ||
2684 psp->pmfw_centralized_cstate_management))
2685 return true;
2686
2687 if (amdgpu_sriov_vf(psp->adev) &&
2688 amdgpu_virt_fw_load_skip_check(psp->adev, ucode->ucode_id))
2689 return true;
2690
2691 if (psp->autoload_supported &&
2692 (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC1_JT ||
2693 ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT))
2694 /* skip mec JT when autoload is enabled */
2695 return true;
2696
2697 return false;
2698}
2699
2700int psp_load_fw_list(struct psp_context *psp,
2701 struct amdgpu_firmware_info **ucode_list, int ucode_count)
2702{
2703 int ret = 0, i;
2704 struct amdgpu_firmware_info *ucode;
2705
2706 for (i = 0; i < ucode_count; ++i) {
2707 ucode = ucode_list[i];
2708 psp_print_fw_hdr(psp, ucode);
2709 ret = psp_execute_ip_fw_load(psp, ucode);
2710 if (ret)
2711 return ret;
2712 }
2713 return ret;
2714}
2715
2716static int psp_load_non_psp_fw(struct psp_context *psp)
2717{
2718 int i, ret;
2719 struct amdgpu_firmware_info *ucode;
2720 struct amdgpu_device *adev = psp->adev;
2721
2722 if (psp->autoload_supported &&
2723 !psp->pmfw_centralized_cstate_management) {
2724 ret = psp_load_smu_fw(psp);
2725 if (ret)
2726 return ret;
2727 }
2728
2729 /* Load P2S table first if it's available */
2730 psp_load_p2s_table(psp);
2731
2732 for (i = 0; i < adev->firmware.max_ucodes; i++) {
2733 ucode = &adev->firmware.ucode[i];
2734
2735 if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC &&
2736 !fw_load_skip_check(psp, ucode)) {
2737 ret = psp_load_smu_fw(psp);
2738 if (ret)
2739 return ret;
2740 continue;
2741 }
2742
2743 if (fw_load_skip_check(psp, ucode))
2744 continue;
2745
2746 if (psp->autoload_supported &&
2747 (amdgpu_ip_version(adev, MP0_HWIP, 0) ==
2748 IP_VERSION(11, 0, 7) ||
2749 amdgpu_ip_version(adev, MP0_HWIP, 0) ==
2750 IP_VERSION(11, 0, 11) ||
2751 amdgpu_ip_version(adev, MP0_HWIP, 0) ==
2752 IP_VERSION(11, 0, 12)) &&
2753 (ucode->ucode_id == AMDGPU_UCODE_ID_SDMA1 ||
2754 ucode->ucode_id == AMDGPU_UCODE_ID_SDMA2 ||
2755 ucode->ucode_id == AMDGPU_UCODE_ID_SDMA3))
2756 /* PSP only receive one SDMA fw for sienna_cichlid,
2757 * as all four sdma fw are same
2758 */
2759 continue;
2760
2761 psp_print_fw_hdr(psp, ucode);
2762
2763 ret = psp_execute_ip_fw_load(psp, ucode);
2764 if (ret)
2765 return ret;
2766
2767 /* Start rlc autoload after psp recieved all the gfx firmware */
2768 if (psp->autoload_supported && ucode->ucode_id == (amdgpu_sriov_vf(adev) ?
2769 adev->virt.autoload_ucode_id : AMDGPU_UCODE_ID_RLC_G)) {
2770 ret = psp_rlc_autoload_start(psp);
2771 if (ret) {
2772 dev_err(adev->dev, "Failed to start rlc autoload\n");
2773 return ret;
2774 }
2775 }
2776 }
2777
2778 return 0;
2779}
2780
2781static int psp_load_fw(struct amdgpu_device *adev)
2782{
2783 int ret;
2784 struct psp_context *psp = &adev->psp;
2785
2786 if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) {
2787 /* should not destroy ring, only stop */
2788 psp_ring_stop(psp, PSP_RING_TYPE__KM);
2789 } else {
2790 memset(psp->fence_buf, 0, PSP_FENCE_BUFFER_SIZE);
2791
2792 ret = psp_ring_init(psp, PSP_RING_TYPE__KM);
2793 if (ret) {
2794 dev_err(adev->dev, "PSP ring init failed!\n");
2795 goto failed;
2796 }
2797 }
2798
2799 ret = psp_hw_start(psp);
2800 if (ret)
2801 goto failed;
2802
2803 ret = psp_load_non_psp_fw(psp);
2804 if (ret)
2805 goto failed1;
2806
2807 ret = psp_asd_initialize(psp);
2808 if (ret) {
2809 dev_err(adev->dev, "PSP load asd failed!\n");
2810 goto failed1;
2811 }
2812
2813 ret = psp_rl_load(adev);
2814 if (ret) {
2815 dev_err(adev->dev, "PSP load RL failed!\n");
2816 goto failed1;
2817 }
2818
2819 if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) {
2820 if (adev->gmc.xgmi.num_physical_nodes > 1) {
2821 ret = psp_xgmi_initialize(psp, false, true);
2822 /* Warning the XGMI seesion initialize failure
2823 * Instead of stop driver initialization
2824 */
2825 if (ret)
2826 dev_err(psp->adev->dev,
2827 "XGMI: Failed to initialize XGMI session\n");
2828 }
2829 }
2830
2831 if (psp->ta_fw) {
2832 ret = psp_ras_initialize(psp);
2833 if (ret)
2834 dev_err(psp->adev->dev,
2835 "RAS: Failed to initialize RAS\n");
2836
2837 ret = psp_hdcp_initialize(psp);
2838 if (ret)
2839 dev_err(psp->adev->dev,
2840 "HDCP: Failed to initialize HDCP\n");
2841
2842 ret = psp_dtm_initialize(psp);
2843 if (ret)
2844 dev_err(psp->adev->dev,
2845 "DTM: Failed to initialize DTM\n");
2846
2847 ret = psp_rap_initialize(psp);
2848 if (ret)
2849 dev_err(psp->adev->dev,
2850 "RAP: Failed to initialize RAP\n");
2851
2852 ret = psp_securedisplay_initialize(psp);
2853 if (ret)
2854 dev_err(psp->adev->dev,
2855 "SECUREDISPLAY: Failed to initialize SECUREDISPLAY\n");
2856 }
2857
2858 return 0;
2859
2860failed1:
2861 psp_free_shared_bufs(psp);
2862failed:
2863 /*
2864 * all cleanup jobs (xgmi terminate, ras terminate,
2865 * ring destroy, cmd/fence/fw buffers destory,
2866 * psp->cmd destory) are delayed to psp_hw_fini
2867 */
2868 psp_ring_destroy(psp, PSP_RING_TYPE__KM);
2869 return ret;
2870}
2871
2872static int psp_hw_init(void *handle)
2873{
2874 int ret;
2875 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2876
2877 mutex_lock(&adev->firmware.mutex);
2878 /*
2879 * This sequence is just used on hw_init only once, no need on
2880 * resume.
2881 */
2882 ret = amdgpu_ucode_init_bo(adev);
2883 if (ret)
2884 goto failed;
2885
2886 ret = psp_load_fw(adev);
2887 if (ret) {
2888 dev_err(adev->dev, "PSP firmware loading failed\n");
2889 goto failed;
2890 }
2891
2892 mutex_unlock(&adev->firmware.mutex);
2893 return 0;
2894
2895failed:
2896 adev->firmware.load_type = AMDGPU_FW_LOAD_DIRECT;
2897 mutex_unlock(&adev->firmware.mutex);
2898 return -EINVAL;
2899}
2900
2901static int psp_hw_fini(void *handle)
2902{
2903 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2904 struct psp_context *psp = &adev->psp;
2905
2906 if (psp->ta_fw) {
2907 psp_ras_terminate(psp);
2908 psp_securedisplay_terminate(psp);
2909 psp_rap_terminate(psp);
2910 psp_dtm_terminate(psp);
2911 psp_hdcp_terminate(psp);
2912
2913 if (adev->gmc.xgmi.num_physical_nodes > 1)
2914 psp_xgmi_terminate(psp);
2915 }
2916
2917 psp_asd_terminate(psp);
2918 psp_tmr_terminate(psp);
2919
2920 psp_ring_destroy(psp, PSP_RING_TYPE__KM);
2921
2922 return 0;
2923}
2924
2925static int psp_suspend(void *handle)
2926{
2927 int ret = 0;
2928 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2929 struct psp_context *psp = &adev->psp;
2930
2931 if (adev->gmc.xgmi.num_physical_nodes > 1 &&
2932 psp->xgmi_context.context.initialized) {
2933 ret = psp_xgmi_terminate(psp);
2934 if (ret) {
2935 dev_err(adev->dev, "Failed to terminate xgmi ta\n");
2936 goto out;
2937 }
2938 }
2939
2940 if (psp->ta_fw) {
2941 ret = psp_ras_terminate(psp);
2942 if (ret) {
2943 dev_err(adev->dev, "Failed to terminate ras ta\n");
2944 goto out;
2945 }
2946 ret = psp_hdcp_terminate(psp);
2947 if (ret) {
2948 dev_err(adev->dev, "Failed to terminate hdcp ta\n");
2949 goto out;
2950 }
2951 ret = psp_dtm_terminate(psp);
2952 if (ret) {
2953 dev_err(adev->dev, "Failed to terminate dtm ta\n");
2954 goto out;
2955 }
2956 ret = psp_rap_terminate(psp);
2957 if (ret) {
2958 dev_err(adev->dev, "Failed to terminate rap ta\n");
2959 goto out;
2960 }
2961 ret = psp_securedisplay_terminate(psp);
2962 if (ret) {
2963 dev_err(adev->dev, "Failed to terminate securedisplay ta\n");
2964 goto out;
2965 }
2966 }
2967
2968 ret = psp_asd_terminate(psp);
2969 if (ret) {
2970 dev_err(adev->dev, "Failed to terminate asd\n");
2971 goto out;
2972 }
2973
2974 ret = psp_tmr_terminate(psp);
2975 if (ret) {
2976 dev_err(adev->dev, "Failed to terminate tmr\n");
2977 goto out;
2978 }
2979
2980 ret = psp_ring_stop(psp, PSP_RING_TYPE__KM);
2981 if (ret)
2982 dev_err(adev->dev, "PSP ring stop failed\n");
2983
2984out:
2985 return ret;
2986}
2987
2988static int psp_resume(void *handle)
2989{
2990 int ret;
2991 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2992 struct psp_context *psp = &adev->psp;
2993
2994 dev_info(adev->dev, "PSP is resuming...\n");
2995
2996 if (psp->mem_train_ctx.enable_mem_training) {
2997 ret = psp_mem_training(psp, PSP_MEM_TRAIN_RESUME);
2998 if (ret) {
2999 dev_err(adev->dev, "Failed to process memory training!\n");
3000 return ret;
3001 }
3002 }
3003
3004 mutex_lock(&adev->firmware.mutex);
3005
3006 ret = psp_hw_start(psp);
3007 if (ret)
3008 goto failed;
3009
3010 ret = psp_load_non_psp_fw(psp);
3011 if (ret)
3012 goto failed;
3013
3014 ret = psp_asd_initialize(psp);
3015 if (ret) {
3016 dev_err(adev->dev, "PSP load asd failed!\n");
3017 goto failed;
3018 }
3019
3020 ret = psp_rl_load(adev);
3021 if (ret) {
3022 dev_err(adev->dev, "PSP load RL failed!\n");
3023 goto failed;
3024 }
3025
3026 if (adev->gmc.xgmi.num_physical_nodes > 1) {
3027 ret = psp_xgmi_initialize(psp, false, true);
3028 /* Warning the XGMI seesion initialize failure
3029 * Instead of stop driver initialization
3030 */
3031 if (ret)
3032 dev_err(psp->adev->dev,
3033 "XGMI: Failed to initialize XGMI session\n");
3034 }
3035
3036 if (psp->ta_fw) {
3037 ret = psp_ras_initialize(psp);
3038 if (ret)
3039 dev_err(psp->adev->dev,
3040 "RAS: Failed to initialize RAS\n");
3041
3042 ret = psp_hdcp_initialize(psp);
3043 if (ret)
3044 dev_err(psp->adev->dev,
3045 "HDCP: Failed to initialize HDCP\n");
3046
3047 ret = psp_dtm_initialize(psp);
3048 if (ret)
3049 dev_err(psp->adev->dev,
3050 "DTM: Failed to initialize DTM\n");
3051
3052 ret = psp_rap_initialize(psp);
3053 if (ret)
3054 dev_err(psp->adev->dev,
3055 "RAP: Failed to initialize RAP\n");
3056
3057 ret = psp_securedisplay_initialize(psp);
3058 if (ret)
3059 dev_err(psp->adev->dev,
3060 "SECUREDISPLAY: Failed to initialize SECUREDISPLAY\n");
3061 }
3062
3063 mutex_unlock(&adev->firmware.mutex);
3064
3065 return 0;
3066
3067failed:
3068 dev_err(adev->dev, "PSP resume failed\n");
3069 mutex_unlock(&adev->firmware.mutex);
3070 return ret;
3071}
3072
3073int psp_gpu_reset(struct amdgpu_device *adev)
3074{
3075 int ret;
3076
3077 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
3078 return 0;
3079
3080 mutex_lock(&adev->psp.mutex);
3081 ret = psp_mode1_reset(&adev->psp);
3082 mutex_unlock(&adev->psp.mutex);
3083
3084 return ret;
3085}
3086
3087int psp_rlc_autoload_start(struct psp_context *psp)
3088{
3089 int ret;
3090 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
3091
3092 cmd->cmd_id = GFX_CMD_ID_AUTOLOAD_RLC;
3093
3094 ret = psp_cmd_submit_buf(psp, NULL, cmd,
3095 psp->fence_buf_mc_addr);
3096
3097 release_psp_cmd_buf(psp);
3098
3099 return ret;
3100}
3101
3102int psp_ring_cmd_submit(struct psp_context *psp,
3103 uint64_t cmd_buf_mc_addr,
3104 uint64_t fence_mc_addr,
3105 int index)
3106{
3107 unsigned int psp_write_ptr_reg = 0;
3108 struct psp_gfx_rb_frame *write_frame;
3109 struct psp_ring *ring = &psp->km_ring;
3110 struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem;
3111 struct psp_gfx_rb_frame *ring_buffer_end = ring_buffer_start +
3112 ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1;
3113 struct amdgpu_device *adev = psp->adev;
3114 uint32_t ring_size_dw = ring->ring_size / 4;
3115 uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4;
3116
3117 /* KM (GPCOM) prepare write pointer */
3118 psp_write_ptr_reg = psp_ring_get_wptr(psp);
3119
3120 /* Update KM RB frame pointer to new frame */
3121 /* write_frame ptr increments by size of rb_frame in bytes */
3122 /* psp_write_ptr_reg increments by size of rb_frame in DWORDs */
3123 if ((psp_write_ptr_reg % ring_size_dw) == 0)
3124 write_frame = ring_buffer_start;
3125 else
3126 write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw);
3127 /* Check invalid write_frame ptr address */
3128 if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) {
3129 dev_err(adev->dev,
3130 "ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n",
3131 ring_buffer_start, ring_buffer_end, write_frame);
3132 dev_err(adev->dev,
3133 "write_frame is pointing to address out of bounds\n");
3134 return -EINVAL;
3135 }
3136
3137 /* Initialize KM RB frame */
3138 memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame));
3139
3140 /* Update KM RB frame */
3141 write_frame->cmd_buf_addr_hi = upper_32_bits(cmd_buf_mc_addr);
3142 write_frame->cmd_buf_addr_lo = lower_32_bits(cmd_buf_mc_addr);
3143 write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr);
3144 write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr);
3145 write_frame->fence_value = index;
3146 amdgpu_device_flush_hdp(adev, NULL);
3147
3148 /* Update the write Pointer in DWORDs */
3149 psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw;
3150 psp_ring_set_wptr(psp, psp_write_ptr_reg);
3151 return 0;
3152}
3153
3154int psp_init_asd_microcode(struct psp_context *psp, const char *chip_name)
3155{
3156 struct amdgpu_device *adev = psp->adev;
3157 char fw_name[PSP_FW_NAME_LEN];
3158 const struct psp_firmware_header_v1_0 *asd_hdr;
3159 int err = 0;
3160
3161 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_asd.bin", chip_name);
3162 err = amdgpu_ucode_request(adev, &adev->psp.asd_fw, fw_name);
3163 if (err)
3164 goto out;
3165
3166 asd_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data;
3167 adev->psp.asd_context.bin_desc.fw_version = le32_to_cpu(asd_hdr->header.ucode_version);
3168 adev->psp.asd_context.bin_desc.feature_version = le32_to_cpu(asd_hdr->sos.fw_version);
3169 adev->psp.asd_context.bin_desc.size_bytes = le32_to_cpu(asd_hdr->header.ucode_size_bytes);
3170 adev->psp.asd_context.bin_desc.start_addr = (uint8_t *)asd_hdr +
3171 le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes);
3172 return 0;
3173out:
3174 amdgpu_ucode_release(&adev->psp.asd_fw);
3175 return err;
3176}
3177
3178int psp_init_toc_microcode(struct psp_context *psp, const char *chip_name)
3179{
3180 struct amdgpu_device *adev = psp->adev;
3181 char fw_name[PSP_FW_NAME_LEN];
3182 const struct psp_firmware_header_v1_0 *toc_hdr;
3183 int err = 0;
3184
3185 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_toc.bin", chip_name);
3186 err = amdgpu_ucode_request(adev, &adev->psp.toc_fw, fw_name);
3187 if (err)
3188 goto out;
3189
3190 toc_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.toc_fw->data;
3191 adev->psp.toc.fw_version = le32_to_cpu(toc_hdr->header.ucode_version);
3192 adev->psp.toc.feature_version = le32_to_cpu(toc_hdr->sos.fw_version);
3193 adev->psp.toc.size_bytes = le32_to_cpu(toc_hdr->header.ucode_size_bytes);
3194 adev->psp.toc.start_addr = (uint8_t *)toc_hdr +
3195 le32_to_cpu(toc_hdr->header.ucode_array_offset_bytes);
3196 return 0;
3197out:
3198 amdgpu_ucode_release(&adev->psp.toc_fw);
3199 return err;
3200}
3201
3202static int parse_sos_bin_descriptor(struct psp_context *psp,
3203 const struct psp_fw_bin_desc *desc,
3204 const struct psp_firmware_header_v2_0 *sos_hdr)
3205{
3206 uint8_t *ucode_start_addr = NULL;
3207
3208 if (!psp || !desc || !sos_hdr)
3209 return -EINVAL;
3210
3211 ucode_start_addr = (uint8_t *)sos_hdr +
3212 le32_to_cpu(desc->offset_bytes) +
3213 le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
3214
3215 switch (desc->fw_type) {
3216 case PSP_FW_TYPE_PSP_SOS:
3217 psp->sos.fw_version = le32_to_cpu(desc->fw_version);
3218 psp->sos.feature_version = le32_to_cpu(desc->fw_version);
3219 psp->sos.size_bytes = le32_to_cpu(desc->size_bytes);
3220 psp->sos.start_addr = ucode_start_addr;
3221 break;
3222 case PSP_FW_TYPE_PSP_SYS_DRV:
3223 psp->sys.fw_version = le32_to_cpu(desc->fw_version);
3224 psp->sys.feature_version = le32_to_cpu(desc->fw_version);
3225 psp->sys.size_bytes = le32_to_cpu(desc->size_bytes);
3226 psp->sys.start_addr = ucode_start_addr;
3227 break;
3228 case PSP_FW_TYPE_PSP_KDB:
3229 psp->kdb.fw_version = le32_to_cpu(desc->fw_version);
3230 psp->kdb.feature_version = le32_to_cpu(desc->fw_version);
3231 psp->kdb.size_bytes = le32_to_cpu(desc->size_bytes);
3232 psp->kdb.start_addr = ucode_start_addr;
3233 break;
3234 case PSP_FW_TYPE_PSP_TOC:
3235 psp->toc.fw_version = le32_to_cpu(desc->fw_version);
3236 psp->toc.feature_version = le32_to_cpu(desc->fw_version);
3237 psp->toc.size_bytes = le32_to_cpu(desc->size_bytes);
3238 psp->toc.start_addr = ucode_start_addr;
3239 break;
3240 case PSP_FW_TYPE_PSP_SPL:
3241 psp->spl.fw_version = le32_to_cpu(desc->fw_version);
3242 psp->spl.feature_version = le32_to_cpu(desc->fw_version);
3243 psp->spl.size_bytes = le32_to_cpu(desc->size_bytes);
3244 psp->spl.start_addr = ucode_start_addr;
3245 break;
3246 case PSP_FW_TYPE_PSP_RL:
3247 psp->rl.fw_version = le32_to_cpu(desc->fw_version);
3248 psp->rl.feature_version = le32_to_cpu(desc->fw_version);
3249 psp->rl.size_bytes = le32_to_cpu(desc->size_bytes);
3250 psp->rl.start_addr = ucode_start_addr;
3251 break;
3252 case PSP_FW_TYPE_PSP_SOC_DRV:
3253 psp->soc_drv.fw_version = le32_to_cpu(desc->fw_version);
3254 psp->soc_drv.feature_version = le32_to_cpu(desc->fw_version);
3255 psp->soc_drv.size_bytes = le32_to_cpu(desc->size_bytes);
3256 psp->soc_drv.start_addr = ucode_start_addr;
3257 break;
3258 case PSP_FW_TYPE_PSP_INTF_DRV:
3259 psp->intf_drv.fw_version = le32_to_cpu(desc->fw_version);
3260 psp->intf_drv.feature_version = le32_to_cpu(desc->fw_version);
3261 psp->intf_drv.size_bytes = le32_to_cpu(desc->size_bytes);
3262 psp->intf_drv.start_addr = ucode_start_addr;
3263 break;
3264 case PSP_FW_TYPE_PSP_DBG_DRV:
3265 psp->dbg_drv.fw_version = le32_to_cpu(desc->fw_version);
3266 psp->dbg_drv.feature_version = le32_to_cpu(desc->fw_version);
3267 psp->dbg_drv.size_bytes = le32_to_cpu(desc->size_bytes);
3268 psp->dbg_drv.start_addr = ucode_start_addr;
3269 break;
3270 case PSP_FW_TYPE_PSP_RAS_DRV:
3271 psp->ras_drv.fw_version = le32_to_cpu(desc->fw_version);
3272 psp->ras_drv.feature_version = le32_to_cpu(desc->fw_version);
3273 psp->ras_drv.size_bytes = le32_to_cpu(desc->size_bytes);
3274 psp->ras_drv.start_addr = ucode_start_addr;
3275 break;
3276 default:
3277 dev_warn(psp->adev->dev, "Unsupported PSP FW type: %d\n", desc->fw_type);
3278 break;
3279 }
3280
3281 return 0;
3282}
3283
3284static int psp_init_sos_base_fw(struct amdgpu_device *adev)
3285{
3286 const struct psp_firmware_header_v1_0 *sos_hdr;
3287 const struct psp_firmware_header_v1_3 *sos_hdr_v1_3;
3288 uint8_t *ucode_array_start_addr;
3289
3290 sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data;
3291 ucode_array_start_addr = (uint8_t *)sos_hdr +
3292 le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
3293
3294 if (adev->gmc.xgmi.connected_to_cpu ||
3295 (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 2))) {
3296 adev->psp.sos.fw_version = le32_to_cpu(sos_hdr->header.ucode_version);
3297 adev->psp.sos.feature_version = le32_to_cpu(sos_hdr->sos.fw_version);
3298
3299 adev->psp.sys.size_bytes = le32_to_cpu(sos_hdr->sos.offset_bytes);
3300 adev->psp.sys.start_addr = ucode_array_start_addr;
3301
3302 adev->psp.sos.size_bytes = le32_to_cpu(sos_hdr->sos.size_bytes);
3303 adev->psp.sos.start_addr = ucode_array_start_addr +
3304 le32_to_cpu(sos_hdr->sos.offset_bytes);
3305 } else {
3306 /* Load alternate PSP SOS FW */
3307 sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data;
3308
3309 adev->psp.sos.fw_version = le32_to_cpu(sos_hdr_v1_3->sos_aux.fw_version);
3310 adev->psp.sos.feature_version = le32_to_cpu(sos_hdr_v1_3->sos_aux.fw_version);
3311
3312 adev->psp.sys.size_bytes = le32_to_cpu(sos_hdr_v1_3->sys_drv_aux.size_bytes);
3313 adev->psp.sys.start_addr = ucode_array_start_addr +
3314 le32_to_cpu(sos_hdr_v1_3->sys_drv_aux.offset_bytes);
3315
3316 adev->psp.sos.size_bytes = le32_to_cpu(sos_hdr_v1_3->sos_aux.size_bytes);
3317 adev->psp.sos.start_addr = ucode_array_start_addr +
3318 le32_to_cpu(sos_hdr_v1_3->sos_aux.offset_bytes);
3319 }
3320
3321 if ((adev->psp.sys.size_bytes == 0) || (adev->psp.sos.size_bytes == 0)) {
3322 dev_warn(adev->dev, "PSP SOS FW not available");
3323 return -EINVAL;
3324 }
3325
3326 return 0;
3327}
3328
3329int psp_init_sos_microcode(struct psp_context *psp, const char *chip_name)
3330{
3331 struct amdgpu_device *adev = psp->adev;
3332 char fw_name[PSP_FW_NAME_LEN];
3333 const struct psp_firmware_header_v1_0 *sos_hdr;
3334 const struct psp_firmware_header_v1_1 *sos_hdr_v1_1;
3335 const struct psp_firmware_header_v1_2 *sos_hdr_v1_2;
3336 const struct psp_firmware_header_v1_3 *sos_hdr_v1_3;
3337 const struct psp_firmware_header_v2_0 *sos_hdr_v2_0;
3338 int err = 0;
3339 uint8_t *ucode_array_start_addr;
3340 int fw_index = 0;
3341
3342 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sos.bin", chip_name);
3343 err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, fw_name);
3344 if (err)
3345 goto out;
3346
3347 sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data;
3348 ucode_array_start_addr = (uint8_t *)sos_hdr +
3349 le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
3350 amdgpu_ucode_print_psp_hdr(&sos_hdr->header);
3351
3352 switch (sos_hdr->header.header_version_major) {
3353 case 1:
3354 err = psp_init_sos_base_fw(adev);
3355 if (err)
3356 goto out;
3357
3358 if (sos_hdr->header.header_version_minor == 1) {
3359 sos_hdr_v1_1 = (const struct psp_firmware_header_v1_1 *)adev->psp.sos_fw->data;
3360 adev->psp.toc.size_bytes = le32_to_cpu(sos_hdr_v1_1->toc.size_bytes);
3361 adev->psp.toc.start_addr = (uint8_t *)adev->psp.sys.start_addr +
3362 le32_to_cpu(sos_hdr_v1_1->toc.offset_bytes);
3363 adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_1->kdb.size_bytes);
3364 adev->psp.kdb.start_addr = (uint8_t *)adev->psp.sys.start_addr +
3365 le32_to_cpu(sos_hdr_v1_1->kdb.offset_bytes);
3366 }
3367 if (sos_hdr->header.header_version_minor == 2) {
3368 sos_hdr_v1_2 = (const struct psp_firmware_header_v1_2 *)adev->psp.sos_fw->data;
3369 adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_2->kdb.size_bytes);
3370 adev->psp.kdb.start_addr = (uint8_t *)adev->psp.sys.start_addr +
3371 le32_to_cpu(sos_hdr_v1_2->kdb.offset_bytes);
3372 }
3373 if (sos_hdr->header.header_version_minor == 3) {
3374 sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data;
3375 adev->psp.toc.size_bytes = le32_to_cpu(sos_hdr_v1_3->v1_1.toc.size_bytes);
3376 adev->psp.toc.start_addr = ucode_array_start_addr +
3377 le32_to_cpu(sos_hdr_v1_3->v1_1.toc.offset_bytes);
3378 adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_3->v1_1.kdb.size_bytes);
3379 adev->psp.kdb.start_addr = ucode_array_start_addr +
3380 le32_to_cpu(sos_hdr_v1_3->v1_1.kdb.offset_bytes);
3381 adev->psp.spl.size_bytes = le32_to_cpu(sos_hdr_v1_3->spl.size_bytes);
3382 adev->psp.spl.start_addr = ucode_array_start_addr +
3383 le32_to_cpu(sos_hdr_v1_3->spl.offset_bytes);
3384 adev->psp.rl.size_bytes = le32_to_cpu(sos_hdr_v1_3->rl.size_bytes);
3385 adev->psp.rl.start_addr = ucode_array_start_addr +
3386 le32_to_cpu(sos_hdr_v1_3->rl.offset_bytes);
3387 }
3388 break;
3389 case 2:
3390 sos_hdr_v2_0 = (const struct psp_firmware_header_v2_0 *)adev->psp.sos_fw->data;
3391
3392 if (le32_to_cpu(sos_hdr_v2_0->psp_fw_bin_count) >= UCODE_MAX_PSP_PACKAGING) {
3393 dev_err(adev->dev, "packed SOS count exceeds maximum limit\n");
3394 err = -EINVAL;
3395 goto out;
3396 }
3397
3398 for (fw_index = 0; fw_index < le32_to_cpu(sos_hdr_v2_0->psp_fw_bin_count); fw_index++) {
3399 err = parse_sos_bin_descriptor(psp,
3400 &sos_hdr_v2_0->psp_fw_bin[fw_index],
3401 sos_hdr_v2_0);
3402 if (err)
3403 goto out;
3404 }
3405 break;
3406 default:
3407 dev_err(adev->dev,
3408 "unsupported psp sos firmware\n");
3409 err = -EINVAL;
3410 goto out;
3411 }
3412
3413 return 0;
3414out:
3415 amdgpu_ucode_release(&adev->psp.sos_fw);
3416
3417 return err;
3418}
3419
3420static int parse_ta_bin_descriptor(struct psp_context *psp,
3421 const struct psp_fw_bin_desc *desc,
3422 const struct ta_firmware_header_v2_0 *ta_hdr)
3423{
3424 uint8_t *ucode_start_addr = NULL;
3425
3426 if (!psp || !desc || !ta_hdr)
3427 return -EINVAL;
3428
3429 ucode_start_addr = (uint8_t *)ta_hdr +
3430 le32_to_cpu(desc->offset_bytes) +
3431 le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
3432
3433 switch (desc->fw_type) {
3434 case TA_FW_TYPE_PSP_ASD:
3435 psp->asd_context.bin_desc.fw_version = le32_to_cpu(desc->fw_version);
3436 psp->asd_context.bin_desc.feature_version = le32_to_cpu(desc->fw_version);
3437 psp->asd_context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
3438 psp->asd_context.bin_desc.start_addr = ucode_start_addr;
3439 break;
3440 case TA_FW_TYPE_PSP_XGMI:
3441 psp->xgmi_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version);
3442 psp->xgmi_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
3443 psp->xgmi_context.context.bin_desc.start_addr = ucode_start_addr;
3444 break;
3445 case TA_FW_TYPE_PSP_RAS:
3446 psp->ras_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version);
3447 psp->ras_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
3448 psp->ras_context.context.bin_desc.start_addr = ucode_start_addr;
3449 break;
3450 case TA_FW_TYPE_PSP_HDCP:
3451 psp->hdcp_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version);
3452 psp->hdcp_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
3453 psp->hdcp_context.context.bin_desc.start_addr = ucode_start_addr;
3454 break;
3455 case TA_FW_TYPE_PSP_DTM:
3456 psp->dtm_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version);
3457 psp->dtm_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
3458 psp->dtm_context.context.bin_desc.start_addr = ucode_start_addr;
3459 break;
3460 case TA_FW_TYPE_PSP_RAP:
3461 psp->rap_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version);
3462 psp->rap_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
3463 psp->rap_context.context.bin_desc.start_addr = ucode_start_addr;
3464 break;
3465 case TA_FW_TYPE_PSP_SECUREDISPLAY:
3466 psp->securedisplay_context.context.bin_desc.fw_version =
3467 le32_to_cpu(desc->fw_version);
3468 psp->securedisplay_context.context.bin_desc.size_bytes =
3469 le32_to_cpu(desc->size_bytes);
3470 psp->securedisplay_context.context.bin_desc.start_addr =
3471 ucode_start_addr;
3472 break;
3473 default:
3474 dev_warn(psp->adev->dev, "Unsupported TA type: %d\n", desc->fw_type);
3475 break;
3476 }
3477
3478 return 0;
3479}
3480
3481static int parse_ta_v1_microcode(struct psp_context *psp)
3482{
3483 const struct ta_firmware_header_v1_0 *ta_hdr;
3484 struct amdgpu_device *adev = psp->adev;
3485
3486 ta_hdr = (const struct ta_firmware_header_v1_0 *) adev->psp.ta_fw->data;
3487
3488 if (le16_to_cpu(ta_hdr->header.header_version_major) != 1)
3489 return -EINVAL;
3490
3491 adev->psp.xgmi_context.context.bin_desc.fw_version =
3492 le32_to_cpu(ta_hdr->xgmi.fw_version);
3493 adev->psp.xgmi_context.context.bin_desc.size_bytes =
3494 le32_to_cpu(ta_hdr->xgmi.size_bytes);
3495 adev->psp.xgmi_context.context.bin_desc.start_addr =
3496 (uint8_t *)ta_hdr +
3497 le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
3498
3499 adev->psp.ras_context.context.bin_desc.fw_version =
3500 le32_to_cpu(ta_hdr->ras.fw_version);
3501 adev->psp.ras_context.context.bin_desc.size_bytes =
3502 le32_to_cpu(ta_hdr->ras.size_bytes);
3503 adev->psp.ras_context.context.bin_desc.start_addr =
3504 (uint8_t *)adev->psp.xgmi_context.context.bin_desc.start_addr +
3505 le32_to_cpu(ta_hdr->ras.offset_bytes);
3506
3507 adev->psp.hdcp_context.context.bin_desc.fw_version =
3508 le32_to_cpu(ta_hdr->hdcp.fw_version);
3509 adev->psp.hdcp_context.context.bin_desc.size_bytes =
3510 le32_to_cpu(ta_hdr->hdcp.size_bytes);
3511 adev->psp.hdcp_context.context.bin_desc.start_addr =
3512 (uint8_t *)ta_hdr +
3513 le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
3514
3515 adev->psp.dtm_context.context.bin_desc.fw_version =
3516 le32_to_cpu(ta_hdr->dtm.fw_version);
3517 adev->psp.dtm_context.context.bin_desc.size_bytes =
3518 le32_to_cpu(ta_hdr->dtm.size_bytes);
3519 adev->psp.dtm_context.context.bin_desc.start_addr =
3520 (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr +
3521 le32_to_cpu(ta_hdr->dtm.offset_bytes);
3522
3523 adev->psp.securedisplay_context.context.bin_desc.fw_version =
3524 le32_to_cpu(ta_hdr->securedisplay.fw_version);
3525 adev->psp.securedisplay_context.context.bin_desc.size_bytes =
3526 le32_to_cpu(ta_hdr->securedisplay.size_bytes);
3527 adev->psp.securedisplay_context.context.bin_desc.start_addr =
3528 (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr +
3529 le32_to_cpu(ta_hdr->securedisplay.offset_bytes);
3530
3531 adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version);
3532
3533 return 0;
3534}
3535
3536static int parse_ta_v2_microcode(struct psp_context *psp)
3537{
3538 const struct ta_firmware_header_v2_0 *ta_hdr;
3539 struct amdgpu_device *adev = psp->adev;
3540 int err = 0;
3541 int ta_index = 0;
3542
3543 ta_hdr = (const struct ta_firmware_header_v2_0 *)adev->psp.ta_fw->data;
3544
3545 if (le16_to_cpu(ta_hdr->header.header_version_major) != 2)
3546 return -EINVAL;
3547
3548 if (le32_to_cpu(ta_hdr->ta_fw_bin_count) >= UCODE_MAX_PSP_PACKAGING) {
3549 dev_err(adev->dev, "packed TA count exceeds maximum limit\n");
3550 return -EINVAL;
3551 }
3552
3553 for (ta_index = 0; ta_index < le32_to_cpu(ta_hdr->ta_fw_bin_count); ta_index++) {
3554 err = parse_ta_bin_descriptor(psp,
3555 &ta_hdr->ta_fw_bin[ta_index],
3556 ta_hdr);
3557 if (err)
3558 return err;
3559 }
3560
3561 return 0;
3562}
3563
3564int psp_init_ta_microcode(struct psp_context *psp, const char *chip_name)
3565{
3566 const struct common_firmware_header *hdr;
3567 struct amdgpu_device *adev = psp->adev;
3568 char fw_name[PSP_FW_NAME_LEN];
3569 int err;
3570
3571 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name);
3572 err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, fw_name);
3573 if (err)
3574 return err;
3575
3576 hdr = (const struct common_firmware_header *)adev->psp.ta_fw->data;
3577 switch (le16_to_cpu(hdr->header_version_major)) {
3578 case 1:
3579 err = parse_ta_v1_microcode(psp);
3580 break;
3581 case 2:
3582 err = parse_ta_v2_microcode(psp);
3583 break;
3584 default:
3585 dev_err(adev->dev, "unsupported TA header version\n");
3586 err = -EINVAL;
3587 }
3588
3589 if (err)
3590 amdgpu_ucode_release(&adev->psp.ta_fw);
3591
3592 return err;
3593}
3594
3595int psp_init_cap_microcode(struct psp_context *psp, const char *chip_name)
3596{
3597 struct amdgpu_device *adev = psp->adev;
3598 char fw_name[PSP_FW_NAME_LEN];
3599 const struct psp_firmware_header_v1_0 *cap_hdr_v1_0;
3600 struct amdgpu_firmware_info *info = NULL;
3601 int err = 0;
3602
3603 if (!amdgpu_sriov_vf(adev)) {
3604 dev_err(adev->dev, "cap microcode should only be loaded under SRIOV\n");
3605 return -EINVAL;
3606 }
3607
3608 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_cap.bin", chip_name);
3609 err = amdgpu_ucode_request(adev, &adev->psp.cap_fw, fw_name);
3610 if (err) {
3611 if (err == -ENODEV) {
3612 dev_warn(adev->dev, "cap microcode does not exist, skip\n");
3613 err = 0;
3614 goto out;
3615 }
3616 dev_err(adev->dev, "fail to initialize cap microcode\n");
3617 }
3618
3619 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CAP];
3620 info->ucode_id = AMDGPU_UCODE_ID_CAP;
3621 info->fw = adev->psp.cap_fw;
3622 cap_hdr_v1_0 = (const struct psp_firmware_header_v1_0 *)
3623 adev->psp.cap_fw->data;
3624 adev->firmware.fw_size += ALIGN(
3625 le32_to_cpu(cap_hdr_v1_0->header.ucode_size_bytes), PAGE_SIZE);
3626 adev->psp.cap_fw_version = le32_to_cpu(cap_hdr_v1_0->header.ucode_version);
3627 adev->psp.cap_feature_version = le32_to_cpu(cap_hdr_v1_0->sos.fw_version);
3628 adev->psp.cap_ucode_size = le32_to_cpu(cap_hdr_v1_0->header.ucode_size_bytes);
3629
3630 return 0;
3631
3632out:
3633 amdgpu_ucode_release(&adev->psp.cap_fw);
3634 return err;
3635}
3636
3637static int psp_set_clockgating_state(void *handle,
3638 enum amd_clockgating_state state)
3639{
3640 return 0;
3641}
3642
3643static int psp_set_powergating_state(void *handle,
3644 enum amd_powergating_state state)
3645{
3646 return 0;
3647}
3648
3649static ssize_t psp_usbc_pd_fw_sysfs_read(struct device *dev,
3650 struct device_attribute *attr,
3651 char *buf)
3652{
3653 struct drm_device *ddev = dev_get_drvdata(dev);
3654 struct amdgpu_device *adev = drm_to_adev(ddev);
3655 uint32_t fw_ver;
3656 int ret;
3657
3658 if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) {
3659 dev_info(adev->dev, "PSP block is not ready yet\n.");
3660 return -EBUSY;
3661 }
3662
3663 mutex_lock(&adev->psp.mutex);
3664 ret = psp_read_usbc_pd_fw(&adev->psp, &fw_ver);
3665 mutex_unlock(&adev->psp.mutex);
3666
3667 if (ret) {
3668 dev_err(adev->dev, "Failed to read USBC PD FW, err = %d\n", ret);
3669 return ret;
3670 }
3671
3672 return sysfs_emit(buf, "%x\n", fw_ver);
3673}
3674
3675static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev,
3676 struct device_attribute *attr,
3677 const char *buf,
3678 size_t count)
3679{
3680 struct drm_device *ddev = dev_get_drvdata(dev);
3681 struct amdgpu_device *adev = drm_to_adev(ddev);
3682 int ret, idx;
3683 char fw_name[100];
3684 const struct firmware *usbc_pd_fw;
3685 struct amdgpu_bo *fw_buf_bo = NULL;
3686 uint64_t fw_pri_mc_addr;
3687 void *fw_pri_cpu_addr;
3688
3689 if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) {
3690 dev_err(adev->dev, "PSP block is not ready yet.");
3691 return -EBUSY;
3692 }
3693
3694 if (!drm_dev_enter(ddev, &idx))
3695 return -ENODEV;
3696
3697 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s", buf);
3698 ret = request_firmware(&usbc_pd_fw, fw_name, adev->dev);
3699 if (ret)
3700 goto fail;
3701
3702 /* LFB address which is aligned to 1MB boundary per PSP request */
3703 ret = amdgpu_bo_create_kernel(adev, usbc_pd_fw->size, 0x100000,
3704 AMDGPU_GEM_DOMAIN_VRAM |
3705 AMDGPU_GEM_DOMAIN_GTT,
3706 &fw_buf_bo, &fw_pri_mc_addr,
3707 &fw_pri_cpu_addr);
3708 if (ret)
3709 goto rel_buf;
3710
3711 memcpy_toio(fw_pri_cpu_addr, usbc_pd_fw->data, usbc_pd_fw->size);
3712
3713 mutex_lock(&adev->psp.mutex);
3714 ret = psp_load_usbc_pd_fw(&adev->psp, fw_pri_mc_addr);
3715 mutex_unlock(&adev->psp.mutex);
3716
3717 amdgpu_bo_free_kernel(&fw_buf_bo, &fw_pri_mc_addr, &fw_pri_cpu_addr);
3718
3719rel_buf:
3720 release_firmware(usbc_pd_fw);
3721fail:
3722 if (ret) {
3723 dev_err(adev->dev, "Failed to load USBC PD FW, err = %d", ret);
3724 count = ret;
3725 }
3726
3727 drm_dev_exit(idx);
3728 return count;
3729}
3730
3731void psp_copy_fw(struct psp_context *psp, uint8_t *start_addr, uint32_t bin_size)
3732{
3733 int idx;
3734
3735 if (!drm_dev_enter(adev_to_drm(psp->adev), &idx))
3736 return;
3737
3738 memset(psp->fw_pri_buf, 0, PSP_1_MEG);
3739 memcpy(psp->fw_pri_buf, start_addr, bin_size);
3740
3741 drm_dev_exit(idx);
3742}
3743
3744/**
3745 * DOC: usbc_pd_fw
3746 * Reading from this file will retrieve the USB-C PD firmware version. Writing to
3747 * this file will trigger the update process.
3748 */
3749static DEVICE_ATTR(usbc_pd_fw, 0644,
3750 psp_usbc_pd_fw_sysfs_read,
3751 psp_usbc_pd_fw_sysfs_write);
3752
3753int is_psp_fw_valid(struct psp_bin_desc bin)
3754{
3755 return bin.size_bytes;
3756}
3757
3758static ssize_t amdgpu_psp_vbflash_write(struct file *filp, struct kobject *kobj,
3759 struct bin_attribute *bin_attr,
3760 char *buffer, loff_t pos, size_t count)
3761{
3762 struct device *dev = kobj_to_dev(kobj);
3763 struct drm_device *ddev = dev_get_drvdata(dev);
3764 struct amdgpu_device *adev = drm_to_adev(ddev);
3765
3766 adev->psp.vbflash_done = false;
3767
3768 /* Safeguard against memory drain */
3769 if (adev->psp.vbflash_image_size > AMD_VBIOS_FILE_MAX_SIZE_B) {
3770 dev_err(adev->dev, "File size cannot exceed %u\n", AMD_VBIOS_FILE_MAX_SIZE_B);
3771 kvfree(adev->psp.vbflash_tmp_buf);
3772 adev->psp.vbflash_tmp_buf = NULL;
3773 adev->psp.vbflash_image_size = 0;
3774 return -ENOMEM;
3775 }
3776
3777 /* TODO Just allocate max for now and optimize to realloc later if needed */
3778 if (!adev->psp.vbflash_tmp_buf) {
3779 adev->psp.vbflash_tmp_buf = kvmalloc(AMD_VBIOS_FILE_MAX_SIZE_B, GFP_KERNEL);
3780 if (!adev->psp.vbflash_tmp_buf)
3781 return -ENOMEM;
3782 }
3783
3784 mutex_lock(&adev->psp.mutex);
3785 memcpy(adev->psp.vbflash_tmp_buf + pos, buffer, count);
3786 adev->psp.vbflash_image_size += count;
3787 mutex_unlock(&adev->psp.mutex);
3788
3789 dev_dbg(adev->dev, "IFWI staged for update\n");
3790
3791 return count;
3792}
3793
3794static ssize_t amdgpu_psp_vbflash_read(struct file *filp, struct kobject *kobj,
3795 struct bin_attribute *bin_attr, char *buffer,
3796 loff_t pos, size_t count)
3797{
3798 struct device *dev = kobj_to_dev(kobj);
3799 struct drm_device *ddev = dev_get_drvdata(dev);
3800 struct amdgpu_device *adev = drm_to_adev(ddev);
3801 struct amdgpu_bo *fw_buf_bo = NULL;
3802 uint64_t fw_pri_mc_addr;
3803 void *fw_pri_cpu_addr;
3804 int ret;
3805
3806 if (adev->psp.vbflash_image_size == 0)
3807 return -EINVAL;
3808
3809 dev_dbg(adev->dev, "PSP IFWI flash process initiated\n");
3810
3811 ret = amdgpu_bo_create_kernel(adev, adev->psp.vbflash_image_size,
3812 AMDGPU_GPU_PAGE_SIZE,
3813 AMDGPU_GEM_DOMAIN_VRAM,
3814 &fw_buf_bo,
3815 &fw_pri_mc_addr,
3816 &fw_pri_cpu_addr);
3817 if (ret)
3818 goto rel_buf;
3819
3820 memcpy_toio(fw_pri_cpu_addr, adev->psp.vbflash_tmp_buf, adev->psp.vbflash_image_size);
3821
3822 mutex_lock(&adev->psp.mutex);
3823 ret = psp_update_spirom(&adev->psp, fw_pri_mc_addr);
3824 mutex_unlock(&adev->psp.mutex);
3825
3826 amdgpu_bo_free_kernel(&fw_buf_bo, &fw_pri_mc_addr, &fw_pri_cpu_addr);
3827
3828rel_buf:
3829 kvfree(adev->psp.vbflash_tmp_buf);
3830 adev->psp.vbflash_tmp_buf = NULL;
3831 adev->psp.vbflash_image_size = 0;
3832
3833 if (ret) {
3834 dev_err(adev->dev, "Failed to load IFWI, err = %d\n", ret);
3835 return ret;
3836 }
3837
3838 dev_dbg(adev->dev, "PSP IFWI flash process done\n");
3839 return 0;
3840}
3841
3842/**
3843 * DOC: psp_vbflash
3844 * Writing to this file will stage an IFWI for update. Reading from this file
3845 * will trigger the update process.
3846 */
3847static struct bin_attribute psp_vbflash_bin_attr = {
3848 .attr = {.name = "psp_vbflash", .mode = 0660},
3849 .size = 0,
3850 .write = amdgpu_psp_vbflash_write,
3851 .read = amdgpu_psp_vbflash_read,
3852};
3853
3854/**
3855 * DOC: psp_vbflash_status
3856 * The status of the flash process.
3857 * 0: IFWI flash not complete.
3858 * 1: IFWI flash complete.
3859 */
3860static ssize_t amdgpu_psp_vbflash_status(struct device *dev,
3861 struct device_attribute *attr,
3862 char *buf)
3863{
3864 struct drm_device *ddev = dev_get_drvdata(dev);
3865 struct amdgpu_device *adev = drm_to_adev(ddev);
3866 uint32_t vbflash_status;
3867
3868 vbflash_status = psp_vbflash_status(&adev->psp);
3869 if (!adev->psp.vbflash_done)
3870 vbflash_status = 0;
3871 else if (adev->psp.vbflash_done && !(vbflash_status & 0x80000000))
3872 vbflash_status = 1;
3873
3874 return sysfs_emit(buf, "0x%x\n", vbflash_status);
3875}
3876static DEVICE_ATTR(psp_vbflash_status, 0440, amdgpu_psp_vbflash_status, NULL);
3877
3878static struct bin_attribute *bin_flash_attrs[] = {
3879 &psp_vbflash_bin_attr,
3880 NULL
3881};
3882
3883static struct attribute *flash_attrs[] = {
3884 &dev_attr_psp_vbflash_status.attr,
3885 &dev_attr_usbc_pd_fw.attr,
3886 NULL
3887};
3888
3889static umode_t amdgpu_flash_attr_is_visible(struct kobject *kobj, struct attribute *attr, int idx)
3890{
3891 struct device *dev = kobj_to_dev(kobj);
3892 struct drm_device *ddev = dev_get_drvdata(dev);
3893 struct amdgpu_device *adev = drm_to_adev(ddev);
3894
3895 if (attr == &dev_attr_usbc_pd_fw.attr)
3896 return adev->psp.sup_pd_fw_up ? 0660 : 0;
3897
3898 return adev->psp.sup_ifwi_up ? 0440 : 0;
3899}
3900
3901static umode_t amdgpu_bin_flash_attr_is_visible(struct kobject *kobj,
3902 struct bin_attribute *attr,
3903 int idx)
3904{
3905 struct device *dev = kobj_to_dev(kobj);
3906 struct drm_device *ddev = dev_get_drvdata(dev);
3907 struct amdgpu_device *adev = drm_to_adev(ddev);
3908
3909 return adev->psp.sup_ifwi_up ? 0660 : 0;
3910}
3911
3912const struct attribute_group amdgpu_flash_attr_group = {
3913 .attrs = flash_attrs,
3914 .bin_attrs = bin_flash_attrs,
3915 .is_bin_visible = amdgpu_bin_flash_attr_is_visible,
3916 .is_visible = amdgpu_flash_attr_is_visible,
3917};
3918
3919const struct amd_ip_funcs psp_ip_funcs = {
3920 .name = "psp",
3921 .early_init = psp_early_init,
3922 .late_init = NULL,
3923 .sw_init = psp_sw_init,
3924 .sw_fini = psp_sw_fini,
3925 .hw_init = psp_hw_init,
3926 .hw_fini = psp_hw_fini,
3927 .suspend = psp_suspend,
3928 .resume = psp_resume,
3929 .is_idle = NULL,
3930 .check_soft_reset = NULL,
3931 .wait_for_idle = NULL,
3932 .soft_reset = NULL,
3933 .set_clockgating_state = psp_set_clockgating_state,
3934 .set_powergating_state = psp_set_powergating_state,
3935};
3936
3937const struct amdgpu_ip_block_version psp_v3_1_ip_block = {
3938 .type = AMD_IP_BLOCK_TYPE_PSP,
3939 .major = 3,
3940 .minor = 1,
3941 .rev = 0,
3942 .funcs = &psp_ip_funcs,
3943};
3944
3945const struct amdgpu_ip_block_version psp_v10_0_ip_block = {
3946 .type = AMD_IP_BLOCK_TYPE_PSP,
3947 .major = 10,
3948 .minor = 0,
3949 .rev = 0,
3950 .funcs = &psp_ip_funcs,
3951};
3952
3953const struct amdgpu_ip_block_version psp_v11_0_ip_block = {
3954 .type = AMD_IP_BLOCK_TYPE_PSP,
3955 .major = 11,
3956 .minor = 0,
3957 .rev = 0,
3958 .funcs = &psp_ip_funcs,
3959};
3960
3961const struct amdgpu_ip_block_version psp_v11_0_8_ip_block = {
3962 .type = AMD_IP_BLOCK_TYPE_PSP,
3963 .major = 11,
3964 .minor = 0,
3965 .rev = 8,
3966 .funcs = &psp_ip_funcs,
3967};
3968
3969const struct amdgpu_ip_block_version psp_v12_0_ip_block = {
3970 .type = AMD_IP_BLOCK_TYPE_PSP,
3971 .major = 12,
3972 .minor = 0,
3973 .rev = 0,
3974 .funcs = &psp_ip_funcs,
3975};
3976
3977const struct amdgpu_ip_block_version psp_v13_0_ip_block = {
3978 .type = AMD_IP_BLOCK_TYPE_PSP,
3979 .major = 13,
3980 .minor = 0,
3981 .rev = 0,
3982 .funcs = &psp_ip_funcs,
3983};
3984
3985const struct amdgpu_ip_block_version psp_v13_0_4_ip_block = {
3986 .type = AMD_IP_BLOCK_TYPE_PSP,
3987 .major = 13,
3988 .minor = 0,
3989 .rev = 4,
3990 .funcs = &psp_ip_funcs,
3991};
3992
3993const struct amdgpu_ip_block_version psp_v14_0_ip_block = {
3994 .type = AMD_IP_BLOCK_TYPE_PSP,
3995 .major = 14,
3996 .minor = 0,
3997 .rev = 0,
3998 .funcs = &psp_ip_funcs,
3999};
1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Author: Huang Rui
23 *
24 */
25
26#include <linux/firmware.h>
27#include <drm/drm_drv.h>
28
29#include "amdgpu.h"
30#include "amdgpu_psp.h"
31#include "amdgpu_ucode.h"
32#include "amdgpu_xgmi.h"
33#include "soc15_common.h"
34#include "psp_v3_1.h"
35#include "psp_v10_0.h"
36#include "psp_v11_0.h"
37#include "psp_v11_0_8.h"
38#include "psp_v12_0.h"
39#include "psp_v13_0.h"
40#include "psp_v13_0_4.h"
41#include "psp_v14_0.h"
42
43#include "amdgpu_ras.h"
44#include "amdgpu_securedisplay.h"
45#include "amdgpu_atomfirmware.h"
46
47#define AMD_VBIOS_FILE_MAX_SIZE_B (1024*1024*3)
48
49static int psp_load_smu_fw(struct psp_context *psp);
50static int psp_rap_terminate(struct psp_context *psp);
51static int psp_securedisplay_terminate(struct psp_context *psp);
52
53static int psp_ring_init(struct psp_context *psp,
54 enum psp_ring_type ring_type)
55{
56 int ret = 0;
57 struct psp_ring *ring;
58 struct amdgpu_device *adev = psp->adev;
59
60 ring = &psp->km_ring;
61
62 ring->ring_type = ring_type;
63
64 /* allocate 4k Page of Local Frame Buffer memory for ring */
65 ring->ring_size = 0x1000;
66 ret = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE,
67 AMDGPU_GEM_DOMAIN_VRAM |
68 AMDGPU_GEM_DOMAIN_GTT,
69 &adev->firmware.rbuf,
70 &ring->ring_mem_mc_addr,
71 (void **)&ring->ring_mem);
72 if (ret) {
73 ring->ring_size = 0;
74 return ret;
75 }
76
77 return 0;
78}
79
80/*
81 * Due to DF Cstate management centralized to PMFW, the firmware
82 * loading sequence will be updated as below:
83 * - Load KDB
84 * - Load SYS_DRV
85 * - Load tOS
86 * - Load PMFW
87 * - Setup TMR
88 * - Load other non-psp fw
89 * - Load ASD
90 * - Load XGMI/RAS/HDCP/DTM TA if any
91 *
92 * This new sequence is required for
93 * - Arcturus and onwards
94 */
95static void psp_check_pmfw_centralized_cstate_management(struct psp_context *psp)
96{
97 struct amdgpu_device *adev = psp->adev;
98
99 if (amdgpu_sriov_vf(adev)) {
100 psp->pmfw_centralized_cstate_management = false;
101 return;
102 }
103
104 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
105 case IP_VERSION(11, 0, 0):
106 case IP_VERSION(11, 0, 4):
107 case IP_VERSION(11, 0, 5):
108 case IP_VERSION(11, 0, 7):
109 case IP_VERSION(11, 0, 9):
110 case IP_VERSION(11, 0, 11):
111 case IP_VERSION(11, 0, 12):
112 case IP_VERSION(11, 0, 13):
113 case IP_VERSION(13, 0, 0):
114 case IP_VERSION(13, 0, 2):
115 case IP_VERSION(13, 0, 7):
116 psp->pmfw_centralized_cstate_management = true;
117 break;
118 default:
119 psp->pmfw_centralized_cstate_management = false;
120 break;
121 }
122}
123
124static int psp_init_sriov_microcode(struct psp_context *psp)
125{
126 struct amdgpu_device *adev = psp->adev;
127 char ucode_prefix[30];
128 int ret = 0;
129
130 amdgpu_ucode_ip_version_decode(adev, MP0_HWIP, ucode_prefix, sizeof(ucode_prefix));
131
132 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
133 case IP_VERSION(9, 0, 0):
134 case IP_VERSION(11, 0, 7):
135 case IP_VERSION(11, 0, 9):
136 adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
137 ret = psp_init_cap_microcode(psp, ucode_prefix);
138 break;
139 case IP_VERSION(13, 0, 2):
140 adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
141 ret = psp_init_cap_microcode(psp, ucode_prefix);
142 ret &= psp_init_ta_microcode(psp, ucode_prefix);
143 break;
144 case IP_VERSION(13, 0, 0):
145 adev->virt.autoload_ucode_id = 0;
146 break;
147 case IP_VERSION(13, 0, 6):
148 case IP_VERSION(13, 0, 14):
149 ret = psp_init_cap_microcode(psp, ucode_prefix);
150 ret &= psp_init_ta_microcode(psp, ucode_prefix);
151 break;
152 case IP_VERSION(13, 0, 10):
153 adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MES1_DATA;
154 ret = psp_init_cap_microcode(psp, ucode_prefix);
155 break;
156 default:
157 return -EINVAL;
158 }
159 return ret;
160}
161
162static int psp_early_init(struct amdgpu_ip_block *ip_block)
163{
164 struct amdgpu_device *adev = ip_block->adev;
165 struct psp_context *psp = &adev->psp;
166
167 psp->autoload_supported = true;
168 psp->boot_time_tmr = true;
169
170 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
171 case IP_VERSION(9, 0, 0):
172 psp_v3_1_set_psp_funcs(psp);
173 psp->autoload_supported = false;
174 psp->boot_time_tmr = false;
175 break;
176 case IP_VERSION(10, 0, 0):
177 case IP_VERSION(10, 0, 1):
178 psp_v10_0_set_psp_funcs(psp);
179 psp->autoload_supported = false;
180 psp->boot_time_tmr = false;
181 break;
182 case IP_VERSION(11, 0, 2):
183 case IP_VERSION(11, 0, 4):
184 psp_v11_0_set_psp_funcs(psp);
185 psp->autoload_supported = false;
186 psp->boot_time_tmr = false;
187 break;
188 case IP_VERSION(11, 0, 0):
189 case IP_VERSION(11, 0, 7):
190 adev->psp.sup_pd_fw_up = !amdgpu_sriov_vf(adev);
191 fallthrough;
192 case IP_VERSION(11, 0, 5):
193 case IP_VERSION(11, 0, 9):
194 case IP_VERSION(11, 0, 11):
195 case IP_VERSION(11, 5, 0):
196 case IP_VERSION(11, 0, 12):
197 case IP_VERSION(11, 0, 13):
198 psp_v11_0_set_psp_funcs(psp);
199 psp->boot_time_tmr = false;
200 break;
201 case IP_VERSION(11, 0, 3):
202 case IP_VERSION(12, 0, 1):
203 psp_v12_0_set_psp_funcs(psp);
204 psp->autoload_supported = false;
205 psp->boot_time_tmr = false;
206 break;
207 case IP_VERSION(13, 0, 2):
208 psp->boot_time_tmr = false;
209 fallthrough;
210 case IP_VERSION(13, 0, 6):
211 case IP_VERSION(13, 0, 14):
212 psp_v13_0_set_psp_funcs(psp);
213 psp->autoload_supported = false;
214 break;
215 case IP_VERSION(13, 0, 1):
216 case IP_VERSION(13, 0, 3):
217 case IP_VERSION(13, 0, 5):
218 case IP_VERSION(13, 0, 8):
219 case IP_VERSION(13, 0, 11):
220 case IP_VERSION(14, 0, 0):
221 case IP_VERSION(14, 0, 1):
222 case IP_VERSION(14, 0, 4):
223 psp_v13_0_set_psp_funcs(psp);
224 psp->boot_time_tmr = false;
225 break;
226 case IP_VERSION(11, 0, 8):
227 if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2) {
228 psp_v11_0_8_set_psp_funcs(psp);
229 }
230 psp->autoload_supported = false;
231 psp->boot_time_tmr = false;
232 break;
233 case IP_VERSION(13, 0, 0):
234 case IP_VERSION(13, 0, 7):
235 case IP_VERSION(13, 0, 10):
236 psp_v13_0_set_psp_funcs(psp);
237 adev->psp.sup_ifwi_up = !amdgpu_sriov_vf(adev);
238 psp->boot_time_tmr = false;
239 break;
240 case IP_VERSION(13, 0, 4):
241 psp_v13_0_4_set_psp_funcs(psp);
242 psp->boot_time_tmr = false;
243 break;
244 case IP_VERSION(14, 0, 2):
245 case IP_VERSION(14, 0, 3):
246 psp_v14_0_set_psp_funcs(psp);
247 break;
248 default:
249 return -EINVAL;
250 }
251
252 psp->adev = adev;
253
254 adev->psp_timeout = 20000;
255
256 psp_check_pmfw_centralized_cstate_management(psp);
257
258 if (amdgpu_sriov_vf(adev))
259 return psp_init_sriov_microcode(psp);
260 else
261 return psp_init_microcode(psp);
262}
263
264void psp_ta_free_shared_buf(struct ta_mem_context *mem_ctx)
265{
266 amdgpu_bo_free_kernel(&mem_ctx->shared_bo, &mem_ctx->shared_mc_addr,
267 &mem_ctx->shared_buf);
268 mem_ctx->shared_bo = NULL;
269}
270
271static void psp_free_shared_bufs(struct psp_context *psp)
272{
273 void *tmr_buf;
274 void **pptr;
275
276 /* free TMR memory buffer */
277 pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
278 amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, pptr);
279 psp->tmr_bo = NULL;
280
281 /* free xgmi shared memory */
282 psp_ta_free_shared_buf(&psp->xgmi_context.context.mem_context);
283
284 /* free ras shared memory */
285 psp_ta_free_shared_buf(&psp->ras_context.context.mem_context);
286
287 /* free hdcp shared memory */
288 psp_ta_free_shared_buf(&psp->hdcp_context.context.mem_context);
289
290 /* free dtm shared memory */
291 psp_ta_free_shared_buf(&psp->dtm_context.context.mem_context);
292
293 /* free rap shared memory */
294 psp_ta_free_shared_buf(&psp->rap_context.context.mem_context);
295
296 /* free securedisplay shared memory */
297 psp_ta_free_shared_buf(&psp->securedisplay_context.context.mem_context);
298
299
300}
301
302static void psp_memory_training_fini(struct psp_context *psp)
303{
304 struct psp_memory_training_context *ctx = &psp->mem_train_ctx;
305
306 ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT;
307 kfree(ctx->sys_cache);
308 ctx->sys_cache = NULL;
309}
310
311static int psp_memory_training_init(struct psp_context *psp)
312{
313 int ret;
314 struct psp_memory_training_context *ctx = &psp->mem_train_ctx;
315
316 if (ctx->init != PSP_MEM_TRAIN_RESERVE_SUCCESS) {
317 dev_dbg(psp->adev->dev, "memory training is not supported!\n");
318 return 0;
319 }
320
321 ctx->sys_cache = kzalloc(ctx->train_data_size, GFP_KERNEL);
322 if (ctx->sys_cache == NULL) {
323 dev_err(psp->adev->dev, "alloc mem_train_ctx.sys_cache failed!\n");
324 ret = -ENOMEM;
325 goto Err_out;
326 }
327
328 dev_dbg(psp->adev->dev,
329 "train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n",
330 ctx->train_data_size,
331 ctx->p2c_train_data_offset,
332 ctx->c2p_train_data_offset);
333 ctx->init = PSP_MEM_TRAIN_INIT_SUCCESS;
334 return 0;
335
336Err_out:
337 psp_memory_training_fini(psp);
338 return ret;
339}
340
341/*
342 * Helper funciton to query psp runtime database entry
343 *
344 * @adev: amdgpu_device pointer
345 * @entry_type: the type of psp runtime database entry
346 * @db_entry: runtime database entry pointer
347 *
348 * Return false if runtime database doesn't exit or entry is invalid
349 * or true if the specific database entry is found, and copy to @db_entry
350 */
351static bool psp_get_runtime_db_entry(struct amdgpu_device *adev,
352 enum psp_runtime_entry_type entry_type,
353 void *db_entry)
354{
355 uint64_t db_header_pos, db_dir_pos;
356 struct psp_runtime_data_header db_header = {0};
357 struct psp_runtime_data_directory db_dir = {0};
358 bool ret = false;
359 int i;
360
361 if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) ||
362 amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14))
363 return false;
364
365 db_header_pos = adev->gmc.mc_vram_size - PSP_RUNTIME_DB_OFFSET;
366 db_dir_pos = db_header_pos + sizeof(struct psp_runtime_data_header);
367
368 /* read runtime db header from vram */
369 amdgpu_device_vram_access(adev, db_header_pos, (uint32_t *)&db_header,
370 sizeof(struct psp_runtime_data_header), false);
371
372 if (db_header.cookie != PSP_RUNTIME_DB_COOKIE_ID) {
373 /* runtime db doesn't exist, exit */
374 dev_dbg(adev->dev, "PSP runtime database doesn't exist\n");
375 return false;
376 }
377
378 /* read runtime database entry from vram */
379 amdgpu_device_vram_access(adev, db_dir_pos, (uint32_t *)&db_dir,
380 sizeof(struct psp_runtime_data_directory), false);
381
382 if (db_dir.entry_count >= PSP_RUNTIME_DB_DIAG_ENTRY_MAX_COUNT) {
383 /* invalid db entry count, exit */
384 dev_warn(adev->dev, "Invalid PSP runtime database entry count\n");
385 return false;
386 }
387
388 /* look up for requested entry type */
389 for (i = 0; i < db_dir.entry_count && !ret; i++) {
390 if (db_dir.entry_list[i].entry_type == entry_type) {
391 switch (entry_type) {
392 case PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG:
393 if (db_dir.entry_list[i].size < sizeof(struct psp_runtime_boot_cfg_entry)) {
394 /* invalid db entry size */
395 dev_warn(adev->dev, "Invalid PSP runtime database boot cfg entry size\n");
396 return false;
397 }
398 /* read runtime database entry */
399 amdgpu_device_vram_access(adev, db_header_pos + db_dir.entry_list[i].offset,
400 (uint32_t *)db_entry, sizeof(struct psp_runtime_boot_cfg_entry), false);
401 ret = true;
402 break;
403 case PSP_RUNTIME_ENTRY_TYPE_PPTABLE_ERR_STATUS:
404 if (db_dir.entry_list[i].size < sizeof(struct psp_runtime_scpm_entry)) {
405 /* invalid db entry size */
406 dev_warn(adev->dev, "Invalid PSP runtime database scpm entry size\n");
407 return false;
408 }
409 /* read runtime database entry */
410 amdgpu_device_vram_access(adev, db_header_pos + db_dir.entry_list[i].offset,
411 (uint32_t *)db_entry, sizeof(struct psp_runtime_scpm_entry), false);
412 ret = true;
413 break;
414 default:
415 ret = false;
416 break;
417 }
418 }
419 }
420
421 return ret;
422}
423
424static int psp_sw_init(struct amdgpu_ip_block *ip_block)
425{
426 struct amdgpu_device *adev = ip_block->adev;
427 struct psp_context *psp = &adev->psp;
428 int ret;
429 struct psp_runtime_boot_cfg_entry boot_cfg_entry;
430 struct psp_memory_training_context *mem_training_ctx = &psp->mem_train_ctx;
431 struct psp_runtime_scpm_entry scpm_entry;
432
433 psp->cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
434 if (!psp->cmd) {
435 dev_err(adev->dev, "Failed to allocate memory to command buffer!\n");
436 ret = -ENOMEM;
437 }
438
439 adev->psp.xgmi_context.supports_extended_data =
440 !adev->gmc.xgmi.connected_to_cpu &&
441 amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2);
442
443 memset(&scpm_entry, 0, sizeof(scpm_entry));
444 if ((psp_get_runtime_db_entry(adev,
445 PSP_RUNTIME_ENTRY_TYPE_PPTABLE_ERR_STATUS,
446 &scpm_entry)) &&
447 (scpm_entry.scpm_status != SCPM_DISABLE)) {
448 adev->scpm_enabled = true;
449 adev->scpm_status = scpm_entry.scpm_status;
450 } else {
451 adev->scpm_enabled = false;
452 adev->scpm_status = SCPM_DISABLE;
453 }
454
455 /* TODO: stop gpu driver services and print alarm if scpm is enabled with error status */
456
457 memset(&boot_cfg_entry, 0, sizeof(boot_cfg_entry));
458 if (psp_get_runtime_db_entry(adev,
459 PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG,
460 &boot_cfg_entry)) {
461 psp->boot_cfg_bitmask = boot_cfg_entry.boot_cfg_bitmask;
462 if ((psp->boot_cfg_bitmask) &
463 BOOT_CFG_FEATURE_TWO_STAGE_DRAM_TRAINING) {
464 /* If psp runtime database exists, then
465 * only enable two stage memory training
466 * when TWO_STAGE_DRAM_TRAINING bit is set
467 * in runtime database
468 */
469 mem_training_ctx->enable_mem_training = true;
470 }
471
472 } else {
473 /* If psp runtime database doesn't exist or is
474 * invalid, force enable two stage memory training
475 */
476 mem_training_ctx->enable_mem_training = true;
477 }
478
479 if (mem_training_ctx->enable_mem_training) {
480 ret = psp_memory_training_init(psp);
481 if (ret) {
482 dev_err(adev->dev, "Failed to initialize memory training!\n");
483 return ret;
484 }
485
486 ret = psp_mem_training(psp, PSP_MEM_TRAIN_COLD_BOOT);
487 if (ret) {
488 dev_err(adev->dev, "Failed to process memory training!\n");
489 return ret;
490 }
491 }
492
493 ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG,
494 (amdgpu_sriov_vf(adev) || adev->debug_use_vram_fw_buf) ?
495 AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
496 &psp->fw_pri_bo,
497 &psp->fw_pri_mc_addr,
498 &psp->fw_pri_buf);
499 if (ret)
500 return ret;
501
502 ret = amdgpu_bo_create_kernel(adev, PSP_FENCE_BUFFER_SIZE, PAGE_SIZE,
503 AMDGPU_GEM_DOMAIN_VRAM |
504 AMDGPU_GEM_DOMAIN_GTT,
505 &psp->fence_buf_bo,
506 &psp->fence_buf_mc_addr,
507 &psp->fence_buf);
508 if (ret)
509 goto failed1;
510
511 ret = amdgpu_bo_create_kernel(adev, PSP_CMD_BUFFER_SIZE, PAGE_SIZE,
512 AMDGPU_GEM_DOMAIN_VRAM |
513 AMDGPU_GEM_DOMAIN_GTT,
514 &psp->cmd_buf_bo, &psp->cmd_buf_mc_addr,
515 (void **)&psp->cmd_buf_mem);
516 if (ret)
517 goto failed2;
518
519 return 0;
520
521failed2:
522 amdgpu_bo_free_kernel(&psp->fence_buf_bo,
523 &psp->fence_buf_mc_addr, &psp->fence_buf);
524failed1:
525 amdgpu_bo_free_kernel(&psp->fw_pri_bo,
526 &psp->fw_pri_mc_addr, &psp->fw_pri_buf);
527 return ret;
528}
529
530static int psp_sw_fini(struct amdgpu_ip_block *ip_block)
531{
532 struct amdgpu_device *adev = ip_block->adev;
533 struct psp_context *psp = &adev->psp;
534 struct psp_gfx_cmd_resp *cmd = psp->cmd;
535
536 psp_memory_training_fini(psp);
537
538 amdgpu_ucode_release(&psp->sos_fw);
539 amdgpu_ucode_release(&psp->asd_fw);
540 amdgpu_ucode_release(&psp->ta_fw);
541 amdgpu_ucode_release(&psp->cap_fw);
542 amdgpu_ucode_release(&psp->toc_fw);
543
544 kfree(cmd);
545 cmd = NULL;
546
547 psp_free_shared_bufs(psp);
548
549 if (psp->km_ring.ring_mem)
550 amdgpu_bo_free_kernel(&adev->firmware.rbuf,
551 &psp->km_ring.ring_mem_mc_addr,
552 (void **)&psp->km_ring.ring_mem);
553
554 amdgpu_bo_free_kernel(&psp->fw_pri_bo,
555 &psp->fw_pri_mc_addr, &psp->fw_pri_buf);
556 amdgpu_bo_free_kernel(&psp->fence_buf_bo,
557 &psp->fence_buf_mc_addr, &psp->fence_buf);
558 amdgpu_bo_free_kernel(&psp->cmd_buf_bo, &psp->cmd_buf_mc_addr,
559 (void **)&psp->cmd_buf_mem);
560
561 return 0;
562}
563
564int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
565 uint32_t reg_val, uint32_t mask, bool check_changed)
566{
567 uint32_t val;
568 int i;
569 struct amdgpu_device *adev = psp->adev;
570
571 if (psp->adev->no_hw_access)
572 return 0;
573
574 for (i = 0; i < adev->usec_timeout; i++) {
575 val = RREG32(reg_index);
576 if (check_changed) {
577 if (val != reg_val)
578 return 0;
579 } else {
580 if ((val & mask) == reg_val)
581 return 0;
582 }
583 udelay(1);
584 }
585
586 return -ETIME;
587}
588
589int psp_wait_for_spirom_update(struct psp_context *psp, uint32_t reg_index,
590 uint32_t reg_val, uint32_t mask, uint32_t msec_timeout)
591{
592 uint32_t val;
593 int i;
594 struct amdgpu_device *adev = psp->adev;
595
596 if (psp->adev->no_hw_access)
597 return 0;
598
599 for (i = 0; i < msec_timeout; i++) {
600 val = RREG32(reg_index);
601 if ((val & mask) == reg_val)
602 return 0;
603 msleep(1);
604 }
605
606 return -ETIME;
607}
608
609static const char *psp_gfx_cmd_name(enum psp_gfx_cmd_id cmd_id)
610{
611 switch (cmd_id) {
612 case GFX_CMD_ID_LOAD_TA:
613 return "LOAD_TA";
614 case GFX_CMD_ID_UNLOAD_TA:
615 return "UNLOAD_TA";
616 case GFX_CMD_ID_INVOKE_CMD:
617 return "INVOKE_CMD";
618 case GFX_CMD_ID_LOAD_ASD:
619 return "LOAD_ASD";
620 case GFX_CMD_ID_SETUP_TMR:
621 return "SETUP_TMR";
622 case GFX_CMD_ID_LOAD_IP_FW:
623 return "LOAD_IP_FW";
624 case GFX_CMD_ID_DESTROY_TMR:
625 return "DESTROY_TMR";
626 case GFX_CMD_ID_SAVE_RESTORE:
627 return "SAVE_RESTORE_IP_FW";
628 case GFX_CMD_ID_SETUP_VMR:
629 return "SETUP_VMR";
630 case GFX_CMD_ID_DESTROY_VMR:
631 return "DESTROY_VMR";
632 case GFX_CMD_ID_PROG_REG:
633 return "PROG_REG";
634 case GFX_CMD_ID_GET_FW_ATTESTATION:
635 return "GET_FW_ATTESTATION";
636 case GFX_CMD_ID_LOAD_TOC:
637 return "ID_LOAD_TOC";
638 case GFX_CMD_ID_AUTOLOAD_RLC:
639 return "AUTOLOAD_RLC";
640 case GFX_CMD_ID_BOOT_CFG:
641 return "BOOT_CFG";
642 case GFX_CMD_ID_CONFIG_SQ_PERFMON:
643 return "CONFIG_SQ_PERFMON";
644 default:
645 return "UNKNOWN CMD";
646 }
647}
648
649static bool psp_err_warn(struct psp_context *psp)
650{
651 struct psp_gfx_cmd_resp *cmd = psp->cmd_buf_mem;
652
653 /* This response indicates reg list is already loaded */
654 if (amdgpu_ip_version(psp->adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2) &&
655 cmd->cmd_id == GFX_CMD_ID_LOAD_IP_FW &&
656 cmd->cmd.cmd_load_ip_fw.fw_type == GFX_FW_TYPE_REG_LIST &&
657 cmd->resp.status == TEE_ERROR_CANCEL)
658 return false;
659
660 return true;
661}
662
663static int
664psp_cmd_submit_buf(struct psp_context *psp,
665 struct amdgpu_firmware_info *ucode,
666 struct psp_gfx_cmd_resp *cmd, uint64_t fence_mc_addr)
667{
668 int ret;
669 int index;
670 int timeout = psp->adev->psp_timeout;
671 bool ras_intr = false;
672 bool skip_unsupport = false;
673
674 if (psp->adev->no_hw_access)
675 return 0;
676
677 memset(psp->cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE);
678
679 memcpy(psp->cmd_buf_mem, cmd, sizeof(struct psp_gfx_cmd_resp));
680
681 index = atomic_inc_return(&psp->fence_value);
682 ret = psp_ring_cmd_submit(psp, psp->cmd_buf_mc_addr, fence_mc_addr, index);
683 if (ret) {
684 atomic_dec(&psp->fence_value);
685 goto exit;
686 }
687
688 amdgpu_device_invalidate_hdp(psp->adev, NULL);
689 while (*((unsigned int *)psp->fence_buf) != index) {
690 if (--timeout == 0)
691 break;
692 /*
693 * Shouldn't wait for timeout when err_event_athub occurs,
694 * because gpu reset thread triggered and lock resource should
695 * be released for psp resume sequence.
696 */
697 ras_intr = amdgpu_ras_intr_triggered();
698 if (ras_intr)
699 break;
700 usleep_range(10, 100);
701 amdgpu_device_invalidate_hdp(psp->adev, NULL);
702 }
703
704 /* We allow TEE_ERROR_NOT_SUPPORTED for VMR command and PSP_ERR_UNKNOWN_COMMAND in SRIOV */
705 skip_unsupport = (psp->cmd_buf_mem->resp.status == TEE_ERROR_NOT_SUPPORTED ||
706 psp->cmd_buf_mem->resp.status == PSP_ERR_UNKNOWN_COMMAND) && amdgpu_sriov_vf(psp->adev);
707
708 memcpy(&cmd->resp, &psp->cmd_buf_mem->resp, sizeof(struct psp_gfx_resp));
709
710 /* In some cases, psp response status is not 0 even there is no
711 * problem while the command is submitted. Some version of PSP FW
712 * doesn't write 0 to that field.
713 * So here we would like to only print a warning instead of an error
714 * during psp initialization to avoid breaking hw_init and it doesn't
715 * return -EINVAL.
716 */
717 if (!skip_unsupport && (psp->cmd_buf_mem->resp.status || !timeout) && !ras_intr) {
718 if (ucode)
719 dev_warn(psp->adev->dev,
720 "failed to load ucode %s(0x%X) ",
721 amdgpu_ucode_name(ucode->ucode_id), ucode->ucode_id);
722 if (psp_err_warn(psp))
723 dev_warn(
724 psp->adev->dev,
725 "psp gfx command %s(0x%X) failed and response status is (0x%X)\n",
726 psp_gfx_cmd_name(psp->cmd_buf_mem->cmd_id),
727 psp->cmd_buf_mem->cmd_id,
728 psp->cmd_buf_mem->resp.status);
729 /* If any firmware (including CAP) load fails under SRIOV, it should
730 * return failure to stop the VF from initializing.
731 * Also return failure in case of timeout
732 */
733 if ((ucode && amdgpu_sriov_vf(psp->adev)) || !timeout) {
734 ret = -EINVAL;
735 goto exit;
736 }
737 }
738
739 if (ucode) {
740 ucode->tmr_mc_addr_lo = psp->cmd_buf_mem->resp.fw_addr_lo;
741 ucode->tmr_mc_addr_hi = psp->cmd_buf_mem->resp.fw_addr_hi;
742 }
743
744exit:
745 return ret;
746}
747
748static struct psp_gfx_cmd_resp *acquire_psp_cmd_buf(struct psp_context *psp)
749{
750 struct psp_gfx_cmd_resp *cmd = psp->cmd;
751
752 mutex_lock(&psp->mutex);
753
754 memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp));
755
756 return cmd;
757}
758
759static void release_psp_cmd_buf(struct psp_context *psp)
760{
761 mutex_unlock(&psp->mutex);
762}
763
764static void psp_prep_tmr_cmd_buf(struct psp_context *psp,
765 struct psp_gfx_cmd_resp *cmd,
766 uint64_t tmr_mc, struct amdgpu_bo *tmr_bo)
767{
768 struct amdgpu_device *adev = psp->adev;
769 uint32_t size = 0;
770 uint64_t tmr_pa = 0;
771
772 if (tmr_bo) {
773 size = amdgpu_bo_size(tmr_bo);
774 tmr_pa = amdgpu_gmc_vram_pa(adev, tmr_bo);
775 }
776
777 if (amdgpu_sriov_vf(psp->adev))
778 cmd->cmd_id = GFX_CMD_ID_SETUP_VMR;
779 else
780 cmd->cmd_id = GFX_CMD_ID_SETUP_TMR;
781 cmd->cmd.cmd_setup_tmr.buf_phy_addr_lo = lower_32_bits(tmr_mc);
782 cmd->cmd.cmd_setup_tmr.buf_phy_addr_hi = upper_32_bits(tmr_mc);
783 cmd->cmd.cmd_setup_tmr.buf_size = size;
784 cmd->cmd.cmd_setup_tmr.bitfield.virt_phy_addr = 1;
785 cmd->cmd.cmd_setup_tmr.system_phy_addr_lo = lower_32_bits(tmr_pa);
786 cmd->cmd.cmd_setup_tmr.system_phy_addr_hi = upper_32_bits(tmr_pa);
787}
788
789static void psp_prep_load_toc_cmd_buf(struct psp_gfx_cmd_resp *cmd,
790 uint64_t pri_buf_mc, uint32_t size)
791{
792 cmd->cmd_id = GFX_CMD_ID_LOAD_TOC;
793 cmd->cmd.cmd_load_toc.toc_phy_addr_lo = lower_32_bits(pri_buf_mc);
794 cmd->cmd.cmd_load_toc.toc_phy_addr_hi = upper_32_bits(pri_buf_mc);
795 cmd->cmd.cmd_load_toc.toc_size = size;
796}
797
798/* Issue LOAD TOC cmd to PSP to part toc and calculate tmr size needed */
799static int psp_load_toc(struct psp_context *psp,
800 uint32_t *tmr_size)
801{
802 int ret;
803 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
804
805 /* Copy toc to psp firmware private buffer */
806 psp_copy_fw(psp, psp->toc.start_addr, psp->toc.size_bytes);
807
808 psp_prep_load_toc_cmd_buf(cmd, psp->fw_pri_mc_addr, psp->toc.size_bytes);
809
810 ret = psp_cmd_submit_buf(psp, NULL, cmd,
811 psp->fence_buf_mc_addr);
812 if (!ret)
813 *tmr_size = psp->cmd_buf_mem->resp.tmr_size;
814
815 release_psp_cmd_buf(psp);
816
817 return ret;
818}
819
820/* Set up Trusted Memory Region */
821static int psp_tmr_init(struct psp_context *psp)
822{
823 int ret = 0;
824 int tmr_size;
825 void *tmr_buf;
826 void **pptr;
827
828 /*
829 * According to HW engineer, they prefer the TMR address be "naturally
830 * aligned" , e.g. the start address be an integer divide of TMR size.
831 *
832 * Note: this memory need be reserved till the driver
833 * uninitializes.
834 */
835 tmr_size = PSP_TMR_SIZE(psp->adev);
836
837 /* For ASICs support RLC autoload, psp will parse the toc
838 * and calculate the total size of TMR needed
839 */
840 if (!amdgpu_sriov_vf(psp->adev) &&
841 psp->toc.start_addr &&
842 psp->toc.size_bytes &&
843 psp->fw_pri_buf) {
844 ret = psp_load_toc(psp, &tmr_size);
845 if (ret) {
846 dev_err(psp->adev->dev, "Failed to load toc\n");
847 return ret;
848 }
849 }
850
851 if (!psp->tmr_bo && !psp->boot_time_tmr) {
852 pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
853 ret = amdgpu_bo_create_kernel(psp->adev, tmr_size,
854 PSP_TMR_ALIGNMENT,
855 AMDGPU_HAS_VRAM(psp->adev) ?
856 AMDGPU_GEM_DOMAIN_VRAM :
857 AMDGPU_GEM_DOMAIN_GTT,
858 &psp->tmr_bo, &psp->tmr_mc_addr,
859 pptr);
860 }
861
862 return ret;
863}
864
865static bool psp_skip_tmr(struct psp_context *psp)
866{
867 switch (amdgpu_ip_version(psp->adev, MP0_HWIP, 0)) {
868 case IP_VERSION(11, 0, 9):
869 case IP_VERSION(11, 0, 7):
870 case IP_VERSION(13, 0, 2):
871 case IP_VERSION(13, 0, 6):
872 case IP_VERSION(13, 0, 10):
873 case IP_VERSION(13, 0, 14):
874 return true;
875 default:
876 return false;
877 }
878}
879
880static int psp_tmr_load(struct psp_context *psp)
881{
882 int ret;
883 struct psp_gfx_cmd_resp *cmd;
884
885 /* For Navi12 and CHIP_SIENNA_CICHLID SRIOV, do not set up TMR.
886 * Already set up by host driver.
887 */
888 if (amdgpu_sriov_vf(psp->adev) && psp_skip_tmr(psp))
889 return 0;
890
891 cmd = acquire_psp_cmd_buf(psp);
892
893 psp_prep_tmr_cmd_buf(psp, cmd, psp->tmr_mc_addr, psp->tmr_bo);
894 if (psp->tmr_bo)
895 dev_info(psp->adev->dev, "reserve 0x%lx from 0x%llx for PSP TMR\n",
896 amdgpu_bo_size(psp->tmr_bo), psp->tmr_mc_addr);
897
898 ret = psp_cmd_submit_buf(psp, NULL, cmd,
899 psp->fence_buf_mc_addr);
900
901 release_psp_cmd_buf(psp);
902
903 return ret;
904}
905
906static void psp_prep_tmr_unload_cmd_buf(struct psp_context *psp,
907 struct psp_gfx_cmd_resp *cmd)
908{
909 if (amdgpu_sriov_vf(psp->adev))
910 cmd->cmd_id = GFX_CMD_ID_DESTROY_VMR;
911 else
912 cmd->cmd_id = GFX_CMD_ID_DESTROY_TMR;
913}
914
915static int psp_tmr_unload(struct psp_context *psp)
916{
917 int ret;
918 struct psp_gfx_cmd_resp *cmd;
919
920 /* skip TMR unload for Navi12 and CHIP_SIENNA_CICHLID SRIOV,
921 * as TMR is not loaded at all
922 */
923 if (amdgpu_sriov_vf(psp->adev) && psp_skip_tmr(psp))
924 return 0;
925
926 cmd = acquire_psp_cmd_buf(psp);
927
928 psp_prep_tmr_unload_cmd_buf(psp, cmd);
929 dev_dbg(psp->adev->dev, "free PSP TMR buffer\n");
930
931 ret = psp_cmd_submit_buf(psp, NULL, cmd,
932 psp->fence_buf_mc_addr);
933
934 release_psp_cmd_buf(psp);
935
936 return ret;
937}
938
939static int psp_tmr_terminate(struct psp_context *psp)
940{
941 return psp_tmr_unload(psp);
942}
943
944int psp_get_fw_attestation_records_addr(struct psp_context *psp,
945 uint64_t *output_ptr)
946{
947 int ret;
948 struct psp_gfx_cmd_resp *cmd;
949
950 if (!output_ptr)
951 return -EINVAL;
952
953 if (amdgpu_sriov_vf(psp->adev))
954 return 0;
955
956 cmd = acquire_psp_cmd_buf(psp);
957
958 cmd->cmd_id = GFX_CMD_ID_GET_FW_ATTESTATION;
959
960 ret = psp_cmd_submit_buf(psp, NULL, cmd,
961 psp->fence_buf_mc_addr);
962
963 if (!ret) {
964 *output_ptr = ((uint64_t)cmd->resp.uresp.fwar_db_info.fwar_db_addr_lo) +
965 ((uint64_t)cmd->resp.uresp.fwar_db_info.fwar_db_addr_hi << 32);
966 }
967
968 release_psp_cmd_buf(psp);
969
970 return ret;
971}
972
973static int psp_boot_config_get(struct amdgpu_device *adev, uint32_t *boot_cfg)
974{
975 struct psp_context *psp = &adev->psp;
976 struct psp_gfx_cmd_resp *cmd;
977 int ret;
978
979 if (amdgpu_sriov_vf(adev))
980 return 0;
981
982 cmd = acquire_psp_cmd_buf(psp);
983
984 cmd->cmd_id = GFX_CMD_ID_BOOT_CFG;
985 cmd->cmd.boot_cfg.sub_cmd = BOOTCFG_CMD_GET;
986
987 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
988 if (!ret) {
989 *boot_cfg =
990 (cmd->resp.uresp.boot_cfg.boot_cfg & BOOT_CONFIG_GECC) ? 1 : 0;
991 }
992
993 release_psp_cmd_buf(psp);
994
995 return ret;
996}
997
998static int psp_boot_config_set(struct amdgpu_device *adev, uint32_t boot_cfg)
999{
1000 int ret;
1001 struct psp_context *psp = &adev->psp;
1002 struct psp_gfx_cmd_resp *cmd;
1003
1004 if (amdgpu_sriov_vf(adev))
1005 return 0;
1006
1007 cmd = acquire_psp_cmd_buf(psp);
1008
1009 cmd->cmd_id = GFX_CMD_ID_BOOT_CFG;
1010 cmd->cmd.boot_cfg.sub_cmd = BOOTCFG_CMD_SET;
1011 cmd->cmd.boot_cfg.boot_config = boot_cfg;
1012 cmd->cmd.boot_cfg.boot_config_valid = boot_cfg;
1013
1014 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1015
1016 release_psp_cmd_buf(psp);
1017
1018 return ret;
1019}
1020
1021static int psp_rl_load(struct amdgpu_device *adev)
1022{
1023 int ret;
1024 struct psp_context *psp = &adev->psp;
1025 struct psp_gfx_cmd_resp *cmd;
1026
1027 if (!is_psp_fw_valid(psp->rl))
1028 return 0;
1029
1030 cmd = acquire_psp_cmd_buf(psp);
1031
1032 memset(psp->fw_pri_buf, 0, PSP_1_MEG);
1033 memcpy(psp->fw_pri_buf, psp->rl.start_addr, psp->rl.size_bytes);
1034
1035 cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW;
1036 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(psp->fw_pri_mc_addr);
1037 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(psp->fw_pri_mc_addr);
1038 cmd->cmd.cmd_load_ip_fw.fw_size = psp->rl.size_bytes;
1039 cmd->cmd.cmd_load_ip_fw.fw_type = GFX_FW_TYPE_REG_LIST;
1040
1041 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1042
1043 release_psp_cmd_buf(psp);
1044
1045 return ret;
1046}
1047
1048int psp_memory_partition(struct psp_context *psp, int mode)
1049{
1050 struct psp_gfx_cmd_resp *cmd;
1051 int ret;
1052
1053 if (amdgpu_sriov_vf(psp->adev))
1054 return 0;
1055
1056 cmd = acquire_psp_cmd_buf(psp);
1057
1058 cmd->cmd_id = GFX_CMD_ID_FB_NPS_MODE;
1059 cmd->cmd.cmd_memory_part.mode = mode;
1060
1061 dev_info(psp->adev->dev,
1062 "Requesting %d memory partition change through PSP", mode);
1063 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1064 if (ret)
1065 dev_err(psp->adev->dev,
1066 "PSP request failed to change to NPS%d mode\n", mode);
1067
1068 release_psp_cmd_buf(psp);
1069
1070 return ret;
1071}
1072
1073int psp_spatial_partition(struct psp_context *psp, int mode)
1074{
1075 struct psp_gfx_cmd_resp *cmd;
1076 int ret;
1077
1078 if (amdgpu_sriov_vf(psp->adev))
1079 return 0;
1080
1081 cmd = acquire_psp_cmd_buf(psp);
1082
1083 cmd->cmd_id = GFX_CMD_ID_SRIOV_SPATIAL_PART;
1084 cmd->cmd.cmd_spatial_part.mode = mode;
1085
1086 dev_info(psp->adev->dev, "Requesting %d partitions through PSP", mode);
1087 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1088
1089 release_psp_cmd_buf(psp);
1090
1091 return ret;
1092}
1093
1094static int psp_asd_initialize(struct psp_context *psp)
1095{
1096 int ret;
1097
1098 /* If PSP version doesn't match ASD version, asd loading will be failed.
1099 * add workaround to bypass it for sriov now.
1100 * TODO: add version check to make it common
1101 */
1102 if (amdgpu_sriov_vf(psp->adev) || !psp->asd_context.bin_desc.size_bytes)
1103 return 0;
1104
1105 /* bypass asd if display hardware is not available */
1106 if (!amdgpu_device_has_display_hardware(psp->adev) &&
1107 amdgpu_ip_version(psp->adev, MP0_HWIP, 0) >= IP_VERSION(13, 0, 10))
1108 return 0;
1109
1110 psp->asd_context.mem_context.shared_mc_addr = 0;
1111 psp->asd_context.mem_context.shared_mem_size = PSP_ASD_SHARED_MEM_SIZE;
1112 psp->asd_context.ta_load_type = GFX_CMD_ID_LOAD_ASD;
1113
1114 ret = psp_ta_load(psp, &psp->asd_context);
1115 if (!ret)
1116 psp->asd_context.initialized = true;
1117
1118 return ret;
1119}
1120
1121static void psp_prep_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd,
1122 uint32_t session_id)
1123{
1124 cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA;
1125 cmd->cmd.cmd_unload_ta.session_id = session_id;
1126}
1127
1128int psp_ta_unload(struct psp_context *psp, struct ta_context *context)
1129{
1130 int ret;
1131 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
1132
1133 psp_prep_ta_unload_cmd_buf(cmd, context->session_id);
1134
1135 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1136
1137 context->resp_status = cmd->resp.status;
1138
1139 release_psp_cmd_buf(psp);
1140
1141 return ret;
1142}
1143
1144static int psp_asd_terminate(struct psp_context *psp)
1145{
1146 int ret;
1147
1148 if (amdgpu_sriov_vf(psp->adev))
1149 return 0;
1150
1151 if (!psp->asd_context.initialized)
1152 return 0;
1153
1154 ret = psp_ta_unload(psp, &psp->asd_context);
1155 if (!ret)
1156 psp->asd_context.initialized = false;
1157
1158 return ret;
1159}
1160
1161static void psp_prep_reg_prog_cmd_buf(struct psp_gfx_cmd_resp *cmd,
1162 uint32_t id, uint32_t value)
1163{
1164 cmd->cmd_id = GFX_CMD_ID_PROG_REG;
1165 cmd->cmd.cmd_setup_reg_prog.reg_value = value;
1166 cmd->cmd.cmd_setup_reg_prog.reg_id = id;
1167}
1168
1169int psp_reg_program(struct psp_context *psp, enum psp_reg_prog_id reg,
1170 uint32_t value)
1171{
1172 struct psp_gfx_cmd_resp *cmd;
1173 int ret = 0;
1174
1175 if (reg >= PSP_REG_LAST)
1176 return -EINVAL;
1177
1178 cmd = acquire_psp_cmd_buf(psp);
1179
1180 psp_prep_reg_prog_cmd_buf(cmd, reg, value);
1181 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1182 if (ret)
1183 dev_err(psp->adev->dev, "PSP failed to program reg id %d\n", reg);
1184
1185 release_psp_cmd_buf(psp);
1186
1187 return ret;
1188}
1189
1190static void psp_prep_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
1191 uint64_t ta_bin_mc,
1192 struct ta_context *context)
1193{
1194 cmd->cmd_id = context->ta_load_type;
1195 cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(ta_bin_mc);
1196 cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(ta_bin_mc);
1197 cmd->cmd.cmd_load_ta.app_len = context->bin_desc.size_bytes;
1198
1199 cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo =
1200 lower_32_bits(context->mem_context.shared_mc_addr);
1201 cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi =
1202 upper_32_bits(context->mem_context.shared_mc_addr);
1203 cmd->cmd.cmd_load_ta.cmd_buf_len = context->mem_context.shared_mem_size;
1204}
1205
1206int psp_ta_init_shared_buf(struct psp_context *psp,
1207 struct ta_mem_context *mem_ctx)
1208{
1209 /*
1210 * Allocate 16k memory aligned to 4k from Frame Buffer (local
1211 * physical) for ta to host memory
1212 */
1213 return amdgpu_bo_create_kernel(psp->adev, mem_ctx->shared_mem_size,
1214 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM |
1215 AMDGPU_GEM_DOMAIN_GTT,
1216 &mem_ctx->shared_bo,
1217 &mem_ctx->shared_mc_addr,
1218 &mem_ctx->shared_buf);
1219}
1220
1221static void psp_prep_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd,
1222 uint32_t ta_cmd_id,
1223 uint32_t session_id)
1224{
1225 cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD;
1226 cmd->cmd.cmd_invoke_cmd.session_id = session_id;
1227 cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id;
1228}
1229
1230int psp_ta_invoke(struct psp_context *psp,
1231 uint32_t ta_cmd_id,
1232 struct ta_context *context)
1233{
1234 int ret;
1235 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
1236
1237 psp_prep_ta_invoke_cmd_buf(cmd, ta_cmd_id, context->session_id);
1238
1239 ret = psp_cmd_submit_buf(psp, NULL, cmd,
1240 psp->fence_buf_mc_addr);
1241
1242 context->resp_status = cmd->resp.status;
1243
1244 release_psp_cmd_buf(psp);
1245
1246 return ret;
1247}
1248
1249int psp_ta_load(struct psp_context *psp, struct ta_context *context)
1250{
1251 int ret;
1252 struct psp_gfx_cmd_resp *cmd;
1253
1254 cmd = acquire_psp_cmd_buf(psp);
1255
1256 psp_copy_fw(psp, context->bin_desc.start_addr,
1257 context->bin_desc.size_bytes);
1258
1259 psp_prep_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr, context);
1260
1261 ret = psp_cmd_submit_buf(psp, NULL, cmd,
1262 psp->fence_buf_mc_addr);
1263
1264 context->resp_status = cmd->resp.status;
1265
1266 if (!ret)
1267 context->session_id = cmd->resp.session_id;
1268
1269 release_psp_cmd_buf(psp);
1270
1271 return ret;
1272}
1273
1274int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
1275{
1276 return psp_ta_invoke(psp, ta_cmd_id, &psp->xgmi_context.context);
1277}
1278
1279int psp_xgmi_terminate(struct psp_context *psp)
1280{
1281 int ret;
1282 struct amdgpu_device *adev = psp->adev;
1283
1284 /* XGMI TA unload currently is not supported on Arcturus/Aldebaran A+A */
1285 if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 4) ||
1286 (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2) &&
1287 adev->gmc.xgmi.connected_to_cpu))
1288 return 0;
1289
1290 if (!psp->xgmi_context.context.initialized)
1291 return 0;
1292
1293 ret = psp_ta_unload(psp, &psp->xgmi_context.context);
1294
1295 psp->xgmi_context.context.initialized = false;
1296
1297 return ret;
1298}
1299
1300int psp_xgmi_initialize(struct psp_context *psp, bool set_extended_data, bool load_ta)
1301{
1302 struct ta_xgmi_shared_memory *xgmi_cmd;
1303 int ret;
1304
1305 if (!psp->ta_fw ||
1306 !psp->xgmi_context.context.bin_desc.size_bytes ||
1307 !psp->xgmi_context.context.bin_desc.start_addr)
1308 return -ENOENT;
1309
1310 if (!load_ta)
1311 goto invoke;
1312
1313 psp->xgmi_context.context.mem_context.shared_mem_size = PSP_XGMI_SHARED_MEM_SIZE;
1314 psp->xgmi_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
1315
1316 if (!psp->xgmi_context.context.mem_context.shared_buf) {
1317 ret = psp_ta_init_shared_buf(psp, &psp->xgmi_context.context.mem_context);
1318 if (ret)
1319 return ret;
1320 }
1321
1322 /* Load XGMI TA */
1323 ret = psp_ta_load(psp, &psp->xgmi_context.context);
1324 if (!ret)
1325 psp->xgmi_context.context.initialized = true;
1326 else
1327 return ret;
1328
1329invoke:
1330 /* Initialize XGMI session */
1331 xgmi_cmd = (struct ta_xgmi_shared_memory *)(psp->xgmi_context.context.mem_context.shared_buf);
1332 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1333 xgmi_cmd->flag_extend_link_record = set_extended_data;
1334 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__INITIALIZE;
1335
1336 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
1337 /* note down the capbility flag for XGMI TA */
1338 psp->xgmi_context.xgmi_ta_caps = xgmi_cmd->caps_flag;
1339
1340 return ret;
1341}
1342
1343int psp_xgmi_get_hive_id(struct psp_context *psp, uint64_t *hive_id)
1344{
1345 struct ta_xgmi_shared_memory *xgmi_cmd;
1346 int ret;
1347
1348 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
1349 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1350
1351 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_HIVE_ID;
1352
1353 /* Invoke xgmi ta to get hive id */
1354 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
1355 if (ret)
1356 return ret;
1357
1358 *hive_id = xgmi_cmd->xgmi_out_message.get_hive_id.hive_id;
1359
1360 return 0;
1361}
1362
1363int psp_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id)
1364{
1365 struct ta_xgmi_shared_memory *xgmi_cmd;
1366 int ret;
1367
1368 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
1369 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1370
1371 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_NODE_ID;
1372
1373 /* Invoke xgmi ta to get the node id */
1374 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
1375 if (ret)
1376 return ret;
1377
1378 *node_id = xgmi_cmd->xgmi_out_message.get_node_id.node_id;
1379
1380 return 0;
1381}
1382
1383static bool psp_xgmi_peer_link_info_supported(struct psp_context *psp)
1384{
1385 return (amdgpu_ip_version(psp->adev, MP0_HWIP, 0) ==
1386 IP_VERSION(13, 0, 2) &&
1387 psp->xgmi_context.context.bin_desc.fw_version >= 0x2000000b) ||
1388 amdgpu_ip_version(psp->adev, MP0_HWIP, 0) >=
1389 IP_VERSION(13, 0, 6);
1390}
1391
1392/*
1393 * Chips that support extended topology information require the driver to
1394 * reflect topology information in the opposite direction. This is
1395 * because the TA has already exceeded its link record limit and if the
1396 * TA holds bi-directional information, the driver would have to do
1397 * multiple fetches instead of just two.
1398 */
1399static void psp_xgmi_reflect_topology_info(struct psp_context *psp,
1400 struct psp_xgmi_node_info node_info)
1401{
1402 struct amdgpu_device *mirror_adev;
1403 struct amdgpu_hive_info *hive;
1404 uint64_t src_node_id = psp->adev->gmc.xgmi.node_id;
1405 uint64_t dst_node_id = node_info.node_id;
1406 uint8_t dst_num_hops = node_info.num_hops;
1407 uint8_t dst_num_links = node_info.num_links;
1408
1409 hive = amdgpu_get_xgmi_hive(psp->adev);
1410 if (WARN_ON(!hive))
1411 return;
1412
1413 list_for_each_entry(mirror_adev, &hive->device_list, gmc.xgmi.head) {
1414 struct psp_xgmi_topology_info *mirror_top_info;
1415 int j;
1416
1417 if (mirror_adev->gmc.xgmi.node_id != dst_node_id)
1418 continue;
1419
1420 mirror_top_info = &mirror_adev->psp.xgmi_context.top_info;
1421 for (j = 0; j < mirror_top_info->num_nodes; j++) {
1422 if (mirror_top_info->nodes[j].node_id != src_node_id)
1423 continue;
1424
1425 mirror_top_info->nodes[j].num_hops = dst_num_hops;
1426 /*
1427 * prevent 0 num_links value re-reflection since reflection
1428 * criteria is based on num_hops (direct or indirect).
1429 *
1430 */
1431 if (dst_num_links)
1432 mirror_top_info->nodes[j].num_links = dst_num_links;
1433
1434 break;
1435 }
1436
1437 break;
1438 }
1439
1440 amdgpu_put_xgmi_hive(hive);
1441}
1442
1443int psp_xgmi_get_topology_info(struct psp_context *psp,
1444 int number_devices,
1445 struct psp_xgmi_topology_info *topology,
1446 bool get_extended_data)
1447{
1448 struct ta_xgmi_shared_memory *xgmi_cmd;
1449 struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
1450 struct ta_xgmi_cmd_get_topology_info_output *topology_info_output;
1451 int i;
1452 int ret;
1453
1454 if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
1455 return -EINVAL;
1456
1457 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
1458 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1459 xgmi_cmd->flag_extend_link_record = get_extended_data;
1460
1461 /* Fill in the shared memory with topology information as input */
1462 topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
1463 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_TOPOLOGY_INFO;
1464 topology_info_input->num_nodes = number_devices;
1465
1466 for (i = 0; i < topology_info_input->num_nodes; i++) {
1467 topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
1468 topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
1469 topology_info_input->nodes[i].is_sharing_enabled = topology->nodes[i].is_sharing_enabled;
1470 topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
1471 }
1472
1473 /* Invoke xgmi ta to get the topology information */
1474 ret = psp_xgmi_invoke(psp, TA_COMMAND_XGMI__GET_TOPOLOGY_INFO);
1475 if (ret)
1476 return ret;
1477
1478 /* Read the output topology information from the shared memory */
1479 topology_info_output = &xgmi_cmd->xgmi_out_message.get_topology_info;
1480 topology->num_nodes = xgmi_cmd->xgmi_out_message.get_topology_info.num_nodes;
1481 for (i = 0; i < topology->num_nodes; i++) {
1482 /* extended data will either be 0 or equal to non-extended data */
1483 if (topology_info_output->nodes[i].num_hops)
1484 topology->nodes[i].num_hops = topology_info_output->nodes[i].num_hops;
1485
1486 /* non-extended data gets everything here so no need to update */
1487 if (!get_extended_data) {
1488 topology->nodes[i].node_id = topology_info_output->nodes[i].node_id;
1489 topology->nodes[i].is_sharing_enabled =
1490 topology_info_output->nodes[i].is_sharing_enabled;
1491 topology->nodes[i].sdma_engine =
1492 topology_info_output->nodes[i].sdma_engine;
1493 }
1494
1495 }
1496
1497 /* Invoke xgmi ta again to get the link information */
1498 if (psp_xgmi_peer_link_info_supported(psp)) {
1499 struct ta_xgmi_cmd_get_peer_link_info *link_info_output;
1500 struct ta_xgmi_cmd_get_extend_peer_link_info *link_extend_info_output;
1501 bool requires_reflection =
1502 (psp->xgmi_context.supports_extended_data &&
1503 get_extended_data) ||
1504 amdgpu_ip_version(psp->adev, MP0_HWIP, 0) ==
1505 IP_VERSION(13, 0, 6) ||
1506 amdgpu_ip_version(psp->adev, MP0_HWIP, 0) ==
1507 IP_VERSION(13, 0, 14);
1508 bool ta_port_num_support = amdgpu_sriov_vf(psp->adev) ? 0 :
1509 psp->xgmi_context.xgmi_ta_caps & EXTEND_PEER_LINK_INFO_CMD_FLAG;
1510
1511 /* popluate the shared output buffer rather than the cmd input buffer
1512 * with node_ids as the input for GET_PEER_LINKS command execution.
1513 * This is required for GET_PEER_LINKS per xgmi ta implementation.
1514 * The same requirement for GET_EXTEND_PEER_LINKS command.
1515 */
1516 if (ta_port_num_support) {
1517 link_extend_info_output = &xgmi_cmd->xgmi_out_message.get_extend_link_info;
1518
1519 for (i = 0; i < topology->num_nodes; i++)
1520 link_extend_info_output->nodes[i].node_id = topology->nodes[i].node_id;
1521
1522 link_extend_info_output->num_nodes = topology->num_nodes;
1523 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_EXTEND_PEER_LINKS;
1524 } else {
1525 link_info_output = &xgmi_cmd->xgmi_out_message.get_link_info;
1526
1527 for (i = 0; i < topology->num_nodes; i++)
1528 link_info_output->nodes[i].node_id = topology->nodes[i].node_id;
1529
1530 link_info_output->num_nodes = topology->num_nodes;
1531 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_PEER_LINKS;
1532 }
1533
1534 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
1535 if (ret)
1536 return ret;
1537
1538 for (i = 0; i < topology->num_nodes; i++) {
1539 uint8_t node_num_links = ta_port_num_support ?
1540 link_extend_info_output->nodes[i].num_links : link_info_output->nodes[i].num_links;
1541 /* accumulate num_links on extended data */
1542 if (get_extended_data) {
1543 topology->nodes[i].num_links = topology->nodes[i].num_links + node_num_links;
1544 } else {
1545 topology->nodes[i].num_links = (requires_reflection && topology->nodes[i].num_links) ?
1546 topology->nodes[i].num_links : node_num_links;
1547 }
1548 /* popluate the connected port num info if supported and available */
1549 if (ta_port_num_support && topology->nodes[i].num_links) {
1550 memcpy(topology->nodes[i].port_num, link_extend_info_output->nodes[i].port_num,
1551 sizeof(struct xgmi_connected_port_num) * TA_XGMI__MAX_PORT_NUM);
1552 }
1553
1554 /* reflect the topology information for bi-directionality */
1555 if (requires_reflection && topology->nodes[i].num_hops)
1556 psp_xgmi_reflect_topology_info(psp, topology->nodes[i]);
1557 }
1558 }
1559
1560 return 0;
1561}
1562
1563int psp_xgmi_set_topology_info(struct psp_context *psp,
1564 int number_devices,
1565 struct psp_xgmi_topology_info *topology)
1566{
1567 struct ta_xgmi_shared_memory *xgmi_cmd;
1568 struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
1569 int i;
1570
1571 if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
1572 return -EINVAL;
1573
1574 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
1575 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1576
1577 topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
1578 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__SET_TOPOLOGY_INFO;
1579 topology_info_input->num_nodes = number_devices;
1580
1581 for (i = 0; i < topology_info_input->num_nodes; i++) {
1582 topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
1583 topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
1584 topology_info_input->nodes[i].is_sharing_enabled = 1;
1585 topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
1586 }
1587
1588 /* Invoke xgmi ta to set topology information */
1589 return psp_xgmi_invoke(psp, TA_COMMAND_XGMI__SET_TOPOLOGY_INFO);
1590}
1591
1592// ras begin
1593static void psp_ras_ta_check_status(struct psp_context *psp)
1594{
1595 struct ta_ras_shared_memory *ras_cmd =
1596 (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1597
1598 switch (ras_cmd->ras_status) {
1599 case TA_RAS_STATUS__ERROR_UNSUPPORTED_IP:
1600 dev_warn(psp->adev->dev,
1601 "RAS WARNING: cmd failed due to unsupported ip\n");
1602 break;
1603 case TA_RAS_STATUS__ERROR_UNSUPPORTED_ERROR_INJ:
1604 dev_warn(psp->adev->dev,
1605 "RAS WARNING: cmd failed due to unsupported error injection\n");
1606 break;
1607 case TA_RAS_STATUS__SUCCESS:
1608 break;
1609 case TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED:
1610 if (ras_cmd->cmd_id == TA_RAS_COMMAND__TRIGGER_ERROR)
1611 dev_warn(psp->adev->dev,
1612 "RAS WARNING: Inject error to critical region is not allowed\n");
1613 break;
1614 default:
1615 dev_warn(psp->adev->dev,
1616 "RAS WARNING: ras status = 0x%X\n", ras_cmd->ras_status);
1617 break;
1618 }
1619}
1620
1621static int psp_ras_send_cmd(struct psp_context *psp,
1622 enum ras_command cmd_id, void *in, void *out)
1623{
1624 struct ta_ras_shared_memory *ras_cmd;
1625 uint32_t cmd = cmd_id;
1626 int ret = 0;
1627
1628 if (!in)
1629 return -EINVAL;
1630
1631 mutex_lock(&psp->ras_context.mutex);
1632 ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1633 memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
1634
1635 switch (cmd) {
1636 case TA_RAS_COMMAND__ENABLE_FEATURES:
1637 case TA_RAS_COMMAND__DISABLE_FEATURES:
1638 memcpy(&ras_cmd->ras_in_message,
1639 in, sizeof(ras_cmd->ras_in_message));
1640 break;
1641 case TA_RAS_COMMAND__TRIGGER_ERROR:
1642 memcpy(&ras_cmd->ras_in_message.trigger_error,
1643 in, sizeof(ras_cmd->ras_in_message.trigger_error));
1644 break;
1645 case TA_RAS_COMMAND__QUERY_ADDRESS:
1646 memcpy(&ras_cmd->ras_in_message.address,
1647 in, sizeof(ras_cmd->ras_in_message.address));
1648 break;
1649 default:
1650 dev_err(psp->adev->dev, "Invalid ras cmd id: %u\n", cmd);
1651 ret = -EINVAL;
1652 goto err_out;
1653 }
1654
1655 ras_cmd->cmd_id = cmd;
1656 ret = psp_ras_invoke(psp, ras_cmd->cmd_id);
1657
1658 switch (cmd) {
1659 case TA_RAS_COMMAND__TRIGGER_ERROR:
1660 if (!ret && out)
1661 memcpy(out, &ras_cmd->ras_status, sizeof(ras_cmd->ras_status));
1662 break;
1663 case TA_RAS_COMMAND__QUERY_ADDRESS:
1664 if (ret || ras_cmd->ras_status || psp->cmd_buf_mem->resp.status)
1665 ret = -EINVAL;
1666 else if (out)
1667 memcpy(out,
1668 &ras_cmd->ras_out_message.address,
1669 sizeof(ras_cmd->ras_out_message.address));
1670 break;
1671 default:
1672 break;
1673 }
1674
1675err_out:
1676 mutex_unlock(&psp->ras_context.mutex);
1677
1678 return ret;
1679}
1680
1681int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
1682{
1683 struct ta_ras_shared_memory *ras_cmd;
1684 int ret;
1685
1686 ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1687
1688 /*
1689 * TODO: bypass the loading in sriov for now
1690 */
1691 if (amdgpu_sriov_vf(psp->adev))
1692 return 0;
1693
1694 ret = psp_ta_invoke(psp, ta_cmd_id, &psp->ras_context.context);
1695
1696 if (amdgpu_ras_intr_triggered())
1697 return ret;
1698
1699 if (ras_cmd->if_version > RAS_TA_HOST_IF_VER) {
1700 dev_warn(psp->adev->dev, "RAS: Unsupported Interface\n");
1701 return -EINVAL;
1702 }
1703
1704 if (!ret) {
1705 if (ras_cmd->ras_out_message.flags.err_inject_switch_disable_flag) {
1706 dev_warn(psp->adev->dev, "ECC switch disabled\n");
1707
1708 ras_cmd->ras_status = TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE;
1709 } else if (ras_cmd->ras_out_message.flags.reg_access_failure_flag)
1710 dev_warn(psp->adev->dev,
1711 "RAS internal register access blocked\n");
1712
1713 psp_ras_ta_check_status(psp);
1714 }
1715
1716 return ret;
1717}
1718
1719int psp_ras_enable_features(struct psp_context *psp,
1720 union ta_ras_cmd_input *info, bool enable)
1721{
1722 enum ras_command cmd_id;
1723 int ret;
1724
1725 if (!psp->ras_context.context.initialized || !info)
1726 return -EINVAL;
1727
1728 cmd_id = enable ?
1729 TA_RAS_COMMAND__ENABLE_FEATURES : TA_RAS_COMMAND__DISABLE_FEATURES;
1730 ret = psp_ras_send_cmd(psp, cmd_id, info, NULL);
1731 if (ret)
1732 return -EINVAL;
1733
1734 return 0;
1735}
1736
1737int psp_ras_terminate(struct psp_context *psp)
1738{
1739 int ret;
1740
1741 /*
1742 * TODO: bypass the terminate in sriov for now
1743 */
1744 if (amdgpu_sriov_vf(psp->adev))
1745 return 0;
1746
1747 if (!psp->ras_context.context.initialized)
1748 return 0;
1749
1750 ret = psp_ta_unload(psp, &psp->ras_context.context);
1751
1752 psp->ras_context.context.initialized = false;
1753
1754 mutex_destroy(&psp->ras_context.mutex);
1755
1756 return ret;
1757}
1758
1759int psp_ras_initialize(struct psp_context *psp)
1760{
1761 int ret;
1762 uint32_t boot_cfg = 0xFF;
1763 struct amdgpu_device *adev = psp->adev;
1764 struct ta_ras_shared_memory *ras_cmd;
1765
1766 /*
1767 * TODO: bypass the initialize in sriov for now
1768 */
1769 if (amdgpu_sriov_vf(adev))
1770 return 0;
1771
1772 if (!adev->psp.ras_context.context.bin_desc.size_bytes ||
1773 !adev->psp.ras_context.context.bin_desc.start_addr) {
1774 dev_info(adev->dev, "RAS: optional ras ta ucode is not available\n");
1775 return 0;
1776 }
1777
1778 if (amdgpu_atomfirmware_dynamic_boot_config_supported(adev)) {
1779 /* query GECC enablement status from boot config
1780 * boot_cfg: 1: GECC is enabled or 0: GECC is disabled
1781 */
1782 ret = psp_boot_config_get(adev, &boot_cfg);
1783 if (ret)
1784 dev_warn(adev->dev, "PSP get boot config failed\n");
1785
1786 if (!amdgpu_ras_is_supported(psp->adev, AMDGPU_RAS_BLOCK__UMC)) {
1787 if (!boot_cfg) {
1788 dev_info(adev->dev, "GECC is disabled\n");
1789 } else {
1790 /* disable GECC in next boot cycle if ras is
1791 * disabled by module parameter amdgpu_ras_enable
1792 * and/or amdgpu_ras_mask, or boot_config_get call
1793 * is failed
1794 */
1795 ret = psp_boot_config_set(adev, 0);
1796 if (ret)
1797 dev_warn(adev->dev, "PSP set boot config failed\n");
1798 else
1799 dev_warn(adev->dev, "GECC will be disabled in next boot cycle if set amdgpu_ras_enable and/or amdgpu_ras_mask to 0x0\n");
1800 }
1801 } else {
1802 if (boot_cfg == 1) {
1803 dev_info(adev->dev, "GECC is enabled\n");
1804 } else {
1805 /* enable GECC in next boot cycle if it is disabled
1806 * in boot config, or force enable GECC if failed to
1807 * get boot configuration
1808 */
1809 ret = psp_boot_config_set(adev, BOOT_CONFIG_GECC);
1810 if (ret)
1811 dev_warn(adev->dev, "PSP set boot config failed\n");
1812 else
1813 dev_warn(adev->dev, "GECC will be enabled in next boot cycle\n");
1814 }
1815 }
1816 }
1817
1818 psp->ras_context.context.mem_context.shared_mem_size = PSP_RAS_SHARED_MEM_SIZE;
1819 psp->ras_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
1820
1821 if (!psp->ras_context.context.mem_context.shared_buf) {
1822 ret = psp_ta_init_shared_buf(psp, &psp->ras_context.context.mem_context);
1823 if (ret)
1824 return ret;
1825 }
1826
1827 ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1828 memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
1829
1830 if (amdgpu_ras_is_poison_mode_supported(adev))
1831 ras_cmd->ras_in_message.init_flags.poison_mode_en = 1;
1832 if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu)
1833 ras_cmd->ras_in_message.init_flags.dgpu_mode = 1;
1834 ras_cmd->ras_in_message.init_flags.xcc_mask =
1835 adev->gfx.xcc_mask;
1836 ras_cmd->ras_in_message.init_flags.channel_dis_num = hweight32(adev->gmc.m_half_use) * 2;
1837 if (adev->gmc.gmc_funcs->query_mem_partition_mode)
1838 ras_cmd->ras_in_message.init_flags.nps_mode =
1839 adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
1840
1841 ret = psp_ta_load(psp, &psp->ras_context.context);
1842
1843 if (!ret && !ras_cmd->ras_status) {
1844 psp->ras_context.context.initialized = true;
1845 mutex_init(&psp->ras_context.mutex);
1846 } else {
1847 if (ras_cmd->ras_status)
1848 dev_warn(adev->dev, "RAS Init Status: 0x%X\n", ras_cmd->ras_status);
1849
1850 /* fail to load RAS TA */
1851 psp->ras_context.context.initialized = false;
1852 }
1853
1854 return ret;
1855}
1856
1857int psp_ras_trigger_error(struct psp_context *psp,
1858 struct ta_ras_trigger_error_input *info, uint32_t instance_mask)
1859{
1860 struct amdgpu_device *adev = psp->adev;
1861 int ret;
1862 uint32_t dev_mask;
1863 uint32_t ras_status = 0;
1864
1865 if (!psp->ras_context.context.initialized || !info)
1866 return -EINVAL;
1867
1868 switch (info->block_id) {
1869 case TA_RAS_BLOCK__GFX:
1870 dev_mask = GET_MASK(GC, instance_mask);
1871 break;
1872 case TA_RAS_BLOCK__SDMA:
1873 dev_mask = GET_MASK(SDMA0, instance_mask);
1874 break;
1875 case TA_RAS_BLOCK__VCN:
1876 case TA_RAS_BLOCK__JPEG:
1877 dev_mask = GET_MASK(VCN, instance_mask);
1878 break;
1879 default:
1880 dev_mask = instance_mask;
1881 break;
1882 }
1883
1884 /* reuse sub_block_index for backward compatibility */
1885 dev_mask <<= AMDGPU_RAS_INST_SHIFT;
1886 dev_mask &= AMDGPU_RAS_INST_MASK;
1887 info->sub_block_index |= dev_mask;
1888
1889 ret = psp_ras_send_cmd(psp,
1890 TA_RAS_COMMAND__TRIGGER_ERROR, info, &ras_status);
1891 if (ret)
1892 return -EINVAL;
1893
1894 /* If err_event_athub occurs error inject was successful, however
1895 * return status from TA is no long reliable
1896 */
1897 if (amdgpu_ras_intr_triggered())
1898 return 0;
1899
1900 if (ras_status == TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED)
1901 return -EACCES;
1902 else if (ras_status)
1903 return -EINVAL;
1904
1905 return 0;
1906}
1907
1908int psp_ras_query_address(struct psp_context *psp,
1909 struct ta_ras_query_address_input *addr_in,
1910 struct ta_ras_query_address_output *addr_out)
1911{
1912 int ret;
1913
1914 if (!psp->ras_context.context.initialized ||
1915 !addr_in || !addr_out)
1916 return -EINVAL;
1917
1918 ret = psp_ras_send_cmd(psp,
1919 TA_RAS_COMMAND__QUERY_ADDRESS, addr_in, addr_out);
1920
1921 return ret;
1922}
1923// ras end
1924
1925// HDCP start
1926static int psp_hdcp_initialize(struct psp_context *psp)
1927{
1928 int ret;
1929
1930 /*
1931 * TODO: bypass the initialize in sriov for now
1932 */
1933 if (amdgpu_sriov_vf(psp->adev))
1934 return 0;
1935
1936 /* bypass hdcp initialization if dmu is harvested */
1937 if (!amdgpu_device_has_display_hardware(psp->adev))
1938 return 0;
1939
1940 if (!psp->hdcp_context.context.bin_desc.size_bytes ||
1941 !psp->hdcp_context.context.bin_desc.start_addr) {
1942 dev_info(psp->adev->dev, "HDCP: optional hdcp ta ucode is not available\n");
1943 return 0;
1944 }
1945
1946 psp->hdcp_context.context.mem_context.shared_mem_size = PSP_HDCP_SHARED_MEM_SIZE;
1947 psp->hdcp_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
1948
1949 if (!psp->hdcp_context.context.mem_context.shared_buf) {
1950 ret = psp_ta_init_shared_buf(psp, &psp->hdcp_context.context.mem_context);
1951 if (ret)
1952 return ret;
1953 }
1954
1955 ret = psp_ta_load(psp, &psp->hdcp_context.context);
1956 if (!ret) {
1957 psp->hdcp_context.context.initialized = true;
1958 mutex_init(&psp->hdcp_context.mutex);
1959 }
1960
1961 return ret;
1962}
1963
1964int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
1965{
1966 /*
1967 * TODO: bypass the loading in sriov for now
1968 */
1969 if (amdgpu_sriov_vf(psp->adev))
1970 return 0;
1971
1972 if (!psp->hdcp_context.context.initialized)
1973 return 0;
1974
1975 return psp_ta_invoke(psp, ta_cmd_id, &psp->hdcp_context.context);
1976}
1977
1978static int psp_hdcp_terminate(struct psp_context *psp)
1979{
1980 int ret;
1981
1982 /*
1983 * TODO: bypass the terminate in sriov for now
1984 */
1985 if (amdgpu_sriov_vf(psp->adev))
1986 return 0;
1987
1988 if (!psp->hdcp_context.context.initialized)
1989 return 0;
1990
1991 ret = psp_ta_unload(psp, &psp->hdcp_context.context);
1992
1993 psp->hdcp_context.context.initialized = false;
1994
1995 return ret;
1996}
1997// HDCP end
1998
1999// DTM start
2000static int psp_dtm_initialize(struct psp_context *psp)
2001{
2002 int ret;
2003
2004 /*
2005 * TODO: bypass the initialize in sriov for now
2006 */
2007 if (amdgpu_sriov_vf(psp->adev))
2008 return 0;
2009
2010 /* bypass dtm initialization if dmu is harvested */
2011 if (!amdgpu_device_has_display_hardware(psp->adev))
2012 return 0;
2013
2014 if (!psp->dtm_context.context.bin_desc.size_bytes ||
2015 !psp->dtm_context.context.bin_desc.start_addr) {
2016 dev_info(psp->adev->dev, "DTM: optional dtm ta ucode is not available\n");
2017 return 0;
2018 }
2019
2020 psp->dtm_context.context.mem_context.shared_mem_size = PSP_DTM_SHARED_MEM_SIZE;
2021 psp->dtm_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
2022
2023 if (!psp->dtm_context.context.mem_context.shared_buf) {
2024 ret = psp_ta_init_shared_buf(psp, &psp->dtm_context.context.mem_context);
2025 if (ret)
2026 return ret;
2027 }
2028
2029 ret = psp_ta_load(psp, &psp->dtm_context.context);
2030 if (!ret) {
2031 psp->dtm_context.context.initialized = true;
2032 mutex_init(&psp->dtm_context.mutex);
2033 }
2034
2035 return ret;
2036}
2037
2038int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
2039{
2040 /*
2041 * TODO: bypass the loading in sriov for now
2042 */
2043 if (amdgpu_sriov_vf(psp->adev))
2044 return 0;
2045
2046 if (!psp->dtm_context.context.initialized)
2047 return 0;
2048
2049 return psp_ta_invoke(psp, ta_cmd_id, &psp->dtm_context.context);
2050}
2051
2052static int psp_dtm_terminate(struct psp_context *psp)
2053{
2054 int ret;
2055
2056 /*
2057 * TODO: bypass the terminate in sriov for now
2058 */
2059 if (amdgpu_sriov_vf(psp->adev))
2060 return 0;
2061
2062 if (!psp->dtm_context.context.initialized)
2063 return 0;
2064
2065 ret = psp_ta_unload(psp, &psp->dtm_context.context);
2066
2067 psp->dtm_context.context.initialized = false;
2068
2069 return ret;
2070}
2071// DTM end
2072
2073// RAP start
2074static int psp_rap_initialize(struct psp_context *psp)
2075{
2076 int ret;
2077 enum ta_rap_status status = TA_RAP_STATUS__SUCCESS;
2078
2079 /*
2080 * TODO: bypass the initialize in sriov for now
2081 */
2082 if (amdgpu_sriov_vf(psp->adev))
2083 return 0;
2084
2085 if (!psp->rap_context.context.bin_desc.size_bytes ||
2086 !psp->rap_context.context.bin_desc.start_addr) {
2087 dev_info(psp->adev->dev, "RAP: optional rap ta ucode is not available\n");
2088 return 0;
2089 }
2090
2091 psp->rap_context.context.mem_context.shared_mem_size = PSP_RAP_SHARED_MEM_SIZE;
2092 psp->rap_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
2093
2094 if (!psp->rap_context.context.mem_context.shared_buf) {
2095 ret = psp_ta_init_shared_buf(psp, &psp->rap_context.context.mem_context);
2096 if (ret)
2097 return ret;
2098 }
2099
2100 ret = psp_ta_load(psp, &psp->rap_context.context);
2101 if (!ret) {
2102 psp->rap_context.context.initialized = true;
2103 mutex_init(&psp->rap_context.mutex);
2104 } else
2105 return ret;
2106
2107 ret = psp_rap_invoke(psp, TA_CMD_RAP__INITIALIZE, &status);
2108 if (ret || status != TA_RAP_STATUS__SUCCESS) {
2109 psp_rap_terminate(psp);
2110 /* free rap shared memory */
2111 psp_ta_free_shared_buf(&psp->rap_context.context.mem_context);
2112
2113 dev_warn(psp->adev->dev, "RAP TA initialize fail (%d) status %d.\n",
2114 ret, status);
2115
2116 return ret;
2117 }
2118
2119 return 0;
2120}
2121
2122static int psp_rap_terminate(struct psp_context *psp)
2123{
2124 int ret;
2125
2126 if (!psp->rap_context.context.initialized)
2127 return 0;
2128
2129 ret = psp_ta_unload(psp, &psp->rap_context.context);
2130
2131 psp->rap_context.context.initialized = false;
2132
2133 return ret;
2134}
2135
2136int psp_rap_invoke(struct psp_context *psp, uint32_t ta_cmd_id, enum ta_rap_status *status)
2137{
2138 struct ta_rap_shared_memory *rap_cmd;
2139 int ret = 0;
2140
2141 if (!psp->rap_context.context.initialized)
2142 return 0;
2143
2144 if (ta_cmd_id != TA_CMD_RAP__INITIALIZE &&
2145 ta_cmd_id != TA_CMD_RAP__VALIDATE_L0)
2146 return -EINVAL;
2147
2148 mutex_lock(&psp->rap_context.mutex);
2149
2150 rap_cmd = (struct ta_rap_shared_memory *)
2151 psp->rap_context.context.mem_context.shared_buf;
2152 memset(rap_cmd, 0, sizeof(struct ta_rap_shared_memory));
2153
2154 rap_cmd->cmd_id = ta_cmd_id;
2155 rap_cmd->validation_method_id = METHOD_A;
2156
2157 ret = psp_ta_invoke(psp, rap_cmd->cmd_id, &psp->rap_context.context);
2158 if (ret)
2159 goto out_unlock;
2160
2161 if (status)
2162 *status = rap_cmd->rap_status;
2163
2164out_unlock:
2165 mutex_unlock(&psp->rap_context.mutex);
2166
2167 return ret;
2168}
2169// RAP end
2170
2171/* securedisplay start */
2172static int psp_securedisplay_initialize(struct psp_context *psp)
2173{
2174 int ret;
2175 struct ta_securedisplay_cmd *securedisplay_cmd;
2176
2177 /*
2178 * TODO: bypass the initialize in sriov for now
2179 */
2180 if (amdgpu_sriov_vf(psp->adev))
2181 return 0;
2182
2183 /* bypass securedisplay initialization if dmu is harvested */
2184 if (!amdgpu_device_has_display_hardware(psp->adev))
2185 return 0;
2186
2187 if (!psp->securedisplay_context.context.bin_desc.size_bytes ||
2188 !psp->securedisplay_context.context.bin_desc.start_addr) {
2189 dev_info(psp->adev->dev, "SECUREDISPLAY: securedisplay ta ucode is not available\n");
2190 return 0;
2191 }
2192
2193 psp->securedisplay_context.context.mem_context.shared_mem_size =
2194 PSP_SECUREDISPLAY_SHARED_MEM_SIZE;
2195 psp->securedisplay_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
2196
2197 if (!psp->securedisplay_context.context.initialized) {
2198 ret = psp_ta_init_shared_buf(psp,
2199 &psp->securedisplay_context.context.mem_context);
2200 if (ret)
2201 return ret;
2202 }
2203
2204 ret = psp_ta_load(psp, &psp->securedisplay_context.context);
2205 if (!ret) {
2206 psp->securedisplay_context.context.initialized = true;
2207 mutex_init(&psp->securedisplay_context.mutex);
2208 } else
2209 return ret;
2210
2211 mutex_lock(&psp->securedisplay_context.mutex);
2212
2213 psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd,
2214 TA_SECUREDISPLAY_COMMAND__QUERY_TA);
2215
2216 ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__QUERY_TA);
2217
2218 mutex_unlock(&psp->securedisplay_context.mutex);
2219
2220 if (ret) {
2221 psp_securedisplay_terminate(psp);
2222 /* free securedisplay shared memory */
2223 psp_ta_free_shared_buf(&psp->securedisplay_context.context.mem_context);
2224 dev_err(psp->adev->dev, "SECUREDISPLAY TA initialize fail.\n");
2225 return -EINVAL;
2226 }
2227
2228 if (securedisplay_cmd->status != TA_SECUREDISPLAY_STATUS__SUCCESS) {
2229 psp_securedisplay_parse_resp_status(psp, securedisplay_cmd->status);
2230 dev_err(psp->adev->dev, "SECUREDISPLAY: query securedisplay TA failed. ret 0x%x\n",
2231 securedisplay_cmd->securedisplay_out_message.query_ta.query_cmd_ret);
2232 /* don't try again */
2233 psp->securedisplay_context.context.bin_desc.size_bytes = 0;
2234 }
2235
2236 return 0;
2237}
2238
2239static int psp_securedisplay_terminate(struct psp_context *psp)
2240{
2241 int ret;
2242
2243 /*
2244 * TODO:bypass the terminate in sriov for now
2245 */
2246 if (amdgpu_sriov_vf(psp->adev))
2247 return 0;
2248
2249 if (!psp->securedisplay_context.context.initialized)
2250 return 0;
2251
2252 ret = psp_ta_unload(psp, &psp->securedisplay_context.context);
2253
2254 psp->securedisplay_context.context.initialized = false;
2255
2256 return ret;
2257}
2258
2259int psp_securedisplay_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
2260{
2261 int ret;
2262
2263 if (!psp->securedisplay_context.context.initialized)
2264 return -EINVAL;
2265
2266 if (ta_cmd_id != TA_SECUREDISPLAY_COMMAND__QUERY_TA &&
2267 ta_cmd_id != TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC)
2268 return -EINVAL;
2269
2270 ret = psp_ta_invoke(psp, ta_cmd_id, &psp->securedisplay_context.context);
2271
2272 return ret;
2273}
2274/* SECUREDISPLAY end */
2275
2276int amdgpu_psp_wait_for_bootloader(struct amdgpu_device *adev)
2277{
2278 struct psp_context *psp = &adev->psp;
2279 int ret = 0;
2280
2281 if (!amdgpu_sriov_vf(adev) && psp->funcs && psp->funcs->wait_for_bootloader != NULL)
2282 ret = psp->funcs->wait_for_bootloader(psp);
2283
2284 return ret;
2285}
2286
2287bool amdgpu_psp_get_ras_capability(struct psp_context *psp)
2288{
2289 if (psp->funcs &&
2290 psp->funcs->get_ras_capability) {
2291 return psp->funcs->get_ras_capability(psp);
2292 } else {
2293 return false;
2294 }
2295}
2296
2297bool amdgpu_psp_tos_reload_needed(struct amdgpu_device *adev)
2298{
2299 struct psp_context *psp = &adev->psp;
2300
2301 if (amdgpu_sriov_vf(adev) || (adev->flags & AMD_IS_APU))
2302 return false;
2303
2304 if (psp->funcs && psp->funcs->is_reload_needed)
2305 return psp->funcs->is_reload_needed(psp);
2306
2307 return false;
2308}
2309
2310static int psp_hw_start(struct psp_context *psp)
2311{
2312 struct amdgpu_device *adev = psp->adev;
2313 int ret;
2314
2315 if (!amdgpu_sriov_vf(adev)) {
2316 if ((is_psp_fw_valid(psp->kdb)) &&
2317 (psp->funcs->bootloader_load_kdb != NULL)) {
2318 ret = psp_bootloader_load_kdb(psp);
2319 if (ret) {
2320 dev_err(adev->dev, "PSP load kdb failed!\n");
2321 return ret;
2322 }
2323 }
2324
2325 if ((is_psp_fw_valid(psp->spl)) &&
2326 (psp->funcs->bootloader_load_spl != NULL)) {
2327 ret = psp_bootloader_load_spl(psp);
2328 if (ret) {
2329 dev_err(adev->dev, "PSP load spl failed!\n");
2330 return ret;
2331 }
2332 }
2333
2334 if ((is_psp_fw_valid(psp->sys)) &&
2335 (psp->funcs->bootloader_load_sysdrv != NULL)) {
2336 ret = psp_bootloader_load_sysdrv(psp);
2337 if (ret) {
2338 dev_err(adev->dev, "PSP load sys drv failed!\n");
2339 return ret;
2340 }
2341 }
2342
2343 if ((is_psp_fw_valid(psp->soc_drv)) &&
2344 (psp->funcs->bootloader_load_soc_drv != NULL)) {
2345 ret = psp_bootloader_load_soc_drv(psp);
2346 if (ret) {
2347 dev_err(adev->dev, "PSP load soc drv failed!\n");
2348 return ret;
2349 }
2350 }
2351
2352 if ((is_psp_fw_valid(psp->intf_drv)) &&
2353 (psp->funcs->bootloader_load_intf_drv != NULL)) {
2354 ret = psp_bootloader_load_intf_drv(psp);
2355 if (ret) {
2356 dev_err(adev->dev, "PSP load intf drv failed!\n");
2357 return ret;
2358 }
2359 }
2360
2361 if ((is_psp_fw_valid(psp->dbg_drv)) &&
2362 (psp->funcs->bootloader_load_dbg_drv != NULL)) {
2363 ret = psp_bootloader_load_dbg_drv(psp);
2364 if (ret) {
2365 dev_err(adev->dev, "PSP load dbg drv failed!\n");
2366 return ret;
2367 }
2368 }
2369
2370 if ((is_psp_fw_valid(psp->ras_drv)) &&
2371 (psp->funcs->bootloader_load_ras_drv != NULL)) {
2372 ret = psp_bootloader_load_ras_drv(psp);
2373 if (ret) {
2374 dev_err(adev->dev, "PSP load ras_drv failed!\n");
2375 return ret;
2376 }
2377 }
2378
2379 if ((is_psp_fw_valid(psp->ipkeymgr_drv)) &&
2380 (psp->funcs->bootloader_load_ipkeymgr_drv != NULL)) {
2381 ret = psp_bootloader_load_ipkeymgr_drv(psp);
2382 if (ret) {
2383 dev_err(adev->dev, "PSP load ipkeymgr_drv failed!\n");
2384 return ret;
2385 }
2386 }
2387
2388 if ((is_psp_fw_valid(psp->sos)) &&
2389 (psp->funcs->bootloader_load_sos != NULL)) {
2390 ret = psp_bootloader_load_sos(psp);
2391 if (ret) {
2392 dev_err(adev->dev, "PSP load sos failed!\n");
2393 return ret;
2394 }
2395 }
2396 }
2397
2398 ret = psp_ring_create(psp, PSP_RING_TYPE__KM);
2399 if (ret) {
2400 dev_err(adev->dev, "PSP create ring failed!\n");
2401 return ret;
2402 }
2403
2404 if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev))
2405 goto skip_pin_bo;
2406
2407 if (!psp->boot_time_tmr || psp->autoload_supported) {
2408 ret = psp_tmr_init(psp);
2409 if (ret) {
2410 dev_err(adev->dev, "PSP tmr init failed!\n");
2411 return ret;
2412 }
2413 }
2414
2415skip_pin_bo:
2416 /*
2417 * For ASICs with DF Cstate management centralized
2418 * to PMFW, TMR setup should be performed after PMFW
2419 * loaded and before other non-psp firmware loaded.
2420 */
2421 if (psp->pmfw_centralized_cstate_management) {
2422 ret = psp_load_smu_fw(psp);
2423 if (ret)
2424 return ret;
2425 }
2426
2427 if (!psp->boot_time_tmr || !psp->autoload_supported) {
2428 ret = psp_tmr_load(psp);
2429 if (ret) {
2430 dev_err(adev->dev, "PSP load tmr failed!\n");
2431 return ret;
2432 }
2433 }
2434
2435 return 0;
2436}
2437
2438static int psp_get_fw_type(struct amdgpu_firmware_info *ucode,
2439 enum psp_gfx_fw_type *type)
2440{
2441 switch (ucode->ucode_id) {
2442 case AMDGPU_UCODE_ID_CAP:
2443 *type = GFX_FW_TYPE_CAP;
2444 break;
2445 case AMDGPU_UCODE_ID_SDMA0:
2446 *type = GFX_FW_TYPE_SDMA0;
2447 break;
2448 case AMDGPU_UCODE_ID_SDMA1:
2449 *type = GFX_FW_TYPE_SDMA1;
2450 break;
2451 case AMDGPU_UCODE_ID_SDMA2:
2452 *type = GFX_FW_TYPE_SDMA2;
2453 break;
2454 case AMDGPU_UCODE_ID_SDMA3:
2455 *type = GFX_FW_TYPE_SDMA3;
2456 break;
2457 case AMDGPU_UCODE_ID_SDMA4:
2458 *type = GFX_FW_TYPE_SDMA4;
2459 break;
2460 case AMDGPU_UCODE_ID_SDMA5:
2461 *type = GFX_FW_TYPE_SDMA5;
2462 break;
2463 case AMDGPU_UCODE_ID_SDMA6:
2464 *type = GFX_FW_TYPE_SDMA6;
2465 break;
2466 case AMDGPU_UCODE_ID_SDMA7:
2467 *type = GFX_FW_TYPE_SDMA7;
2468 break;
2469 case AMDGPU_UCODE_ID_CP_MES:
2470 *type = GFX_FW_TYPE_CP_MES;
2471 break;
2472 case AMDGPU_UCODE_ID_CP_MES_DATA:
2473 *type = GFX_FW_TYPE_MES_STACK;
2474 break;
2475 case AMDGPU_UCODE_ID_CP_MES1:
2476 *type = GFX_FW_TYPE_CP_MES_KIQ;
2477 break;
2478 case AMDGPU_UCODE_ID_CP_MES1_DATA:
2479 *type = GFX_FW_TYPE_MES_KIQ_STACK;
2480 break;
2481 case AMDGPU_UCODE_ID_CP_CE:
2482 *type = GFX_FW_TYPE_CP_CE;
2483 break;
2484 case AMDGPU_UCODE_ID_CP_PFP:
2485 *type = GFX_FW_TYPE_CP_PFP;
2486 break;
2487 case AMDGPU_UCODE_ID_CP_ME:
2488 *type = GFX_FW_TYPE_CP_ME;
2489 break;
2490 case AMDGPU_UCODE_ID_CP_MEC1:
2491 *type = GFX_FW_TYPE_CP_MEC;
2492 break;
2493 case AMDGPU_UCODE_ID_CP_MEC1_JT:
2494 *type = GFX_FW_TYPE_CP_MEC_ME1;
2495 break;
2496 case AMDGPU_UCODE_ID_CP_MEC2:
2497 *type = GFX_FW_TYPE_CP_MEC;
2498 break;
2499 case AMDGPU_UCODE_ID_CP_MEC2_JT:
2500 *type = GFX_FW_TYPE_CP_MEC_ME2;
2501 break;
2502 case AMDGPU_UCODE_ID_RLC_P:
2503 *type = GFX_FW_TYPE_RLC_P;
2504 break;
2505 case AMDGPU_UCODE_ID_RLC_V:
2506 *type = GFX_FW_TYPE_RLC_V;
2507 break;
2508 case AMDGPU_UCODE_ID_RLC_G:
2509 *type = GFX_FW_TYPE_RLC_G;
2510 break;
2511 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL:
2512 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_CNTL;
2513 break;
2514 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM:
2515 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_GPM_MEM;
2516 break;
2517 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM:
2518 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_MEM;
2519 break;
2520 case AMDGPU_UCODE_ID_RLC_IRAM:
2521 *type = GFX_FW_TYPE_RLC_IRAM;
2522 break;
2523 case AMDGPU_UCODE_ID_RLC_DRAM:
2524 *type = GFX_FW_TYPE_RLC_DRAM_BOOT;
2525 break;
2526 case AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS:
2527 *type = GFX_FW_TYPE_GLOBAL_TAP_DELAYS;
2528 break;
2529 case AMDGPU_UCODE_ID_SE0_TAP_DELAYS:
2530 *type = GFX_FW_TYPE_SE0_TAP_DELAYS;
2531 break;
2532 case AMDGPU_UCODE_ID_SE1_TAP_DELAYS:
2533 *type = GFX_FW_TYPE_SE1_TAP_DELAYS;
2534 break;
2535 case AMDGPU_UCODE_ID_SE2_TAP_DELAYS:
2536 *type = GFX_FW_TYPE_SE2_TAP_DELAYS;
2537 break;
2538 case AMDGPU_UCODE_ID_SE3_TAP_DELAYS:
2539 *type = GFX_FW_TYPE_SE3_TAP_DELAYS;
2540 break;
2541 case AMDGPU_UCODE_ID_SMC:
2542 *type = GFX_FW_TYPE_SMU;
2543 break;
2544 case AMDGPU_UCODE_ID_PPTABLE:
2545 *type = GFX_FW_TYPE_PPTABLE;
2546 break;
2547 case AMDGPU_UCODE_ID_UVD:
2548 *type = GFX_FW_TYPE_UVD;
2549 break;
2550 case AMDGPU_UCODE_ID_UVD1:
2551 *type = GFX_FW_TYPE_UVD1;
2552 break;
2553 case AMDGPU_UCODE_ID_VCE:
2554 *type = GFX_FW_TYPE_VCE;
2555 break;
2556 case AMDGPU_UCODE_ID_VCN:
2557 *type = GFX_FW_TYPE_VCN;
2558 break;
2559 case AMDGPU_UCODE_ID_VCN1:
2560 *type = GFX_FW_TYPE_VCN1;
2561 break;
2562 case AMDGPU_UCODE_ID_DMCU_ERAM:
2563 *type = GFX_FW_TYPE_DMCU_ERAM;
2564 break;
2565 case AMDGPU_UCODE_ID_DMCU_INTV:
2566 *type = GFX_FW_TYPE_DMCU_ISR;
2567 break;
2568 case AMDGPU_UCODE_ID_VCN0_RAM:
2569 *type = GFX_FW_TYPE_VCN0_RAM;
2570 break;
2571 case AMDGPU_UCODE_ID_VCN1_RAM:
2572 *type = GFX_FW_TYPE_VCN1_RAM;
2573 break;
2574 case AMDGPU_UCODE_ID_DMCUB:
2575 *type = GFX_FW_TYPE_DMUB;
2576 break;
2577 case AMDGPU_UCODE_ID_SDMA_UCODE_TH0:
2578 case AMDGPU_UCODE_ID_SDMA_RS64:
2579 *type = GFX_FW_TYPE_SDMA_UCODE_TH0;
2580 break;
2581 case AMDGPU_UCODE_ID_SDMA_UCODE_TH1:
2582 *type = GFX_FW_TYPE_SDMA_UCODE_TH1;
2583 break;
2584 case AMDGPU_UCODE_ID_IMU_I:
2585 *type = GFX_FW_TYPE_IMU_I;
2586 break;
2587 case AMDGPU_UCODE_ID_IMU_D:
2588 *type = GFX_FW_TYPE_IMU_D;
2589 break;
2590 case AMDGPU_UCODE_ID_CP_RS64_PFP:
2591 *type = GFX_FW_TYPE_RS64_PFP;
2592 break;
2593 case AMDGPU_UCODE_ID_CP_RS64_ME:
2594 *type = GFX_FW_TYPE_RS64_ME;
2595 break;
2596 case AMDGPU_UCODE_ID_CP_RS64_MEC:
2597 *type = GFX_FW_TYPE_RS64_MEC;
2598 break;
2599 case AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK:
2600 *type = GFX_FW_TYPE_RS64_PFP_P0_STACK;
2601 break;
2602 case AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK:
2603 *type = GFX_FW_TYPE_RS64_PFP_P1_STACK;
2604 break;
2605 case AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK:
2606 *type = GFX_FW_TYPE_RS64_ME_P0_STACK;
2607 break;
2608 case AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK:
2609 *type = GFX_FW_TYPE_RS64_ME_P1_STACK;
2610 break;
2611 case AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK:
2612 *type = GFX_FW_TYPE_RS64_MEC_P0_STACK;
2613 break;
2614 case AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK:
2615 *type = GFX_FW_TYPE_RS64_MEC_P1_STACK;
2616 break;
2617 case AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK:
2618 *type = GFX_FW_TYPE_RS64_MEC_P2_STACK;
2619 break;
2620 case AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK:
2621 *type = GFX_FW_TYPE_RS64_MEC_P3_STACK;
2622 break;
2623 case AMDGPU_UCODE_ID_VPE_CTX:
2624 *type = GFX_FW_TYPE_VPEC_FW1;
2625 break;
2626 case AMDGPU_UCODE_ID_VPE_CTL:
2627 *type = GFX_FW_TYPE_VPEC_FW2;
2628 break;
2629 case AMDGPU_UCODE_ID_VPE:
2630 *type = GFX_FW_TYPE_VPE;
2631 break;
2632 case AMDGPU_UCODE_ID_UMSCH_MM_UCODE:
2633 *type = GFX_FW_TYPE_UMSCH_UCODE;
2634 break;
2635 case AMDGPU_UCODE_ID_UMSCH_MM_DATA:
2636 *type = GFX_FW_TYPE_UMSCH_DATA;
2637 break;
2638 case AMDGPU_UCODE_ID_UMSCH_MM_CMD_BUFFER:
2639 *type = GFX_FW_TYPE_UMSCH_CMD_BUFFER;
2640 break;
2641 case AMDGPU_UCODE_ID_P2S_TABLE:
2642 *type = GFX_FW_TYPE_P2S_TABLE;
2643 break;
2644 case AMDGPU_UCODE_ID_JPEG_RAM:
2645 *type = GFX_FW_TYPE_JPEG_RAM;
2646 break;
2647 case AMDGPU_UCODE_ID_ISP:
2648 *type = GFX_FW_TYPE_ISP;
2649 break;
2650 case AMDGPU_UCODE_ID_MAXIMUM:
2651 default:
2652 return -EINVAL;
2653 }
2654
2655 return 0;
2656}
2657
2658static void psp_print_fw_hdr(struct psp_context *psp,
2659 struct amdgpu_firmware_info *ucode)
2660{
2661 struct amdgpu_device *adev = psp->adev;
2662 struct common_firmware_header *hdr;
2663
2664 switch (ucode->ucode_id) {
2665 case AMDGPU_UCODE_ID_SDMA0:
2666 case AMDGPU_UCODE_ID_SDMA1:
2667 case AMDGPU_UCODE_ID_SDMA2:
2668 case AMDGPU_UCODE_ID_SDMA3:
2669 case AMDGPU_UCODE_ID_SDMA4:
2670 case AMDGPU_UCODE_ID_SDMA5:
2671 case AMDGPU_UCODE_ID_SDMA6:
2672 case AMDGPU_UCODE_ID_SDMA7:
2673 hdr = (struct common_firmware_header *)
2674 adev->sdma.instance[ucode->ucode_id - AMDGPU_UCODE_ID_SDMA0].fw->data;
2675 amdgpu_ucode_print_sdma_hdr(hdr);
2676 break;
2677 case AMDGPU_UCODE_ID_CP_CE:
2678 hdr = (struct common_firmware_header *)adev->gfx.ce_fw->data;
2679 amdgpu_ucode_print_gfx_hdr(hdr);
2680 break;
2681 case AMDGPU_UCODE_ID_CP_PFP:
2682 hdr = (struct common_firmware_header *)adev->gfx.pfp_fw->data;
2683 amdgpu_ucode_print_gfx_hdr(hdr);
2684 break;
2685 case AMDGPU_UCODE_ID_CP_ME:
2686 hdr = (struct common_firmware_header *)adev->gfx.me_fw->data;
2687 amdgpu_ucode_print_gfx_hdr(hdr);
2688 break;
2689 case AMDGPU_UCODE_ID_CP_MEC1:
2690 hdr = (struct common_firmware_header *)adev->gfx.mec_fw->data;
2691 amdgpu_ucode_print_gfx_hdr(hdr);
2692 break;
2693 case AMDGPU_UCODE_ID_RLC_G:
2694 hdr = (struct common_firmware_header *)adev->gfx.rlc_fw->data;
2695 amdgpu_ucode_print_rlc_hdr(hdr);
2696 break;
2697 case AMDGPU_UCODE_ID_SMC:
2698 hdr = (struct common_firmware_header *)adev->pm.fw->data;
2699 amdgpu_ucode_print_smc_hdr(hdr);
2700 break;
2701 default:
2702 break;
2703 }
2704}
2705
2706static int psp_prep_load_ip_fw_cmd_buf(struct psp_context *psp,
2707 struct amdgpu_firmware_info *ucode,
2708 struct psp_gfx_cmd_resp *cmd)
2709{
2710 int ret;
2711 uint64_t fw_mem_mc_addr = ucode->mc_addr;
2712
2713 cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW;
2714 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(fw_mem_mc_addr);
2715 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(fw_mem_mc_addr);
2716 cmd->cmd.cmd_load_ip_fw.fw_size = ucode->ucode_size;
2717
2718 ret = psp_get_fw_type(ucode, &cmd->cmd.cmd_load_ip_fw.fw_type);
2719 if (ret)
2720 dev_err(psp->adev->dev, "Unknown firmware type\n");
2721
2722 return ret;
2723}
2724
2725int psp_execute_ip_fw_load(struct psp_context *psp,
2726 struct amdgpu_firmware_info *ucode)
2727{
2728 int ret = 0;
2729 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
2730
2731 ret = psp_prep_load_ip_fw_cmd_buf(psp, ucode, cmd);
2732 if (!ret) {
2733 ret = psp_cmd_submit_buf(psp, ucode, cmd,
2734 psp->fence_buf_mc_addr);
2735 }
2736
2737 release_psp_cmd_buf(psp);
2738
2739 return ret;
2740}
2741
2742static int psp_load_p2s_table(struct psp_context *psp)
2743{
2744 int ret;
2745 struct amdgpu_device *adev = psp->adev;
2746 struct amdgpu_firmware_info *ucode =
2747 &adev->firmware.ucode[AMDGPU_UCODE_ID_P2S_TABLE];
2748
2749 if (adev->in_runpm && ((adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) ||
2750 (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO)))
2751 return 0;
2752
2753 if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) ||
2754 amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14)) {
2755 uint32_t supp_vers = adev->flags & AMD_IS_APU ? 0x0036013D :
2756 0x0036003C;
2757 if (psp->sos.fw_version < supp_vers)
2758 return 0;
2759 }
2760
2761 if (!ucode->fw || amdgpu_sriov_vf(psp->adev))
2762 return 0;
2763
2764 ret = psp_execute_ip_fw_load(psp, ucode);
2765
2766 return ret;
2767}
2768
2769static int psp_load_smu_fw(struct psp_context *psp)
2770{
2771 int ret;
2772 struct amdgpu_device *adev = psp->adev;
2773 struct amdgpu_firmware_info *ucode =
2774 &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC];
2775 struct amdgpu_ras *ras = psp->ras_context.ras;
2776
2777 /*
2778 * Skip SMU FW reloading in case of using BACO for runpm only,
2779 * as SMU is always alive.
2780 */
2781 if (adev->in_runpm && ((adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) ||
2782 (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO)))
2783 return 0;
2784
2785 if (!ucode->fw || amdgpu_sriov_vf(psp->adev))
2786 return 0;
2787
2788 if ((amdgpu_in_reset(adev) && ras && adev->ras_enabled &&
2789 (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 4) ||
2790 amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 2)))) {
2791 ret = amdgpu_dpm_set_mp1_state(adev, PP_MP1_STATE_UNLOAD);
2792 if (ret)
2793 dev_err(adev->dev, "Failed to set MP1 state prepare for reload\n");
2794 }
2795
2796 ret = psp_execute_ip_fw_load(psp, ucode);
2797
2798 if (ret)
2799 dev_err(adev->dev, "PSP load smu failed!\n");
2800
2801 return ret;
2802}
2803
2804static bool fw_load_skip_check(struct psp_context *psp,
2805 struct amdgpu_firmware_info *ucode)
2806{
2807 if (!ucode->fw || !ucode->ucode_size)
2808 return true;
2809
2810 if (ucode->ucode_id == AMDGPU_UCODE_ID_P2S_TABLE)
2811 return true;
2812
2813 if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC &&
2814 (psp_smu_reload_quirk(psp) ||
2815 psp->autoload_supported ||
2816 psp->pmfw_centralized_cstate_management))
2817 return true;
2818
2819 if (amdgpu_sriov_vf(psp->adev) &&
2820 amdgpu_virt_fw_load_skip_check(psp->adev, ucode->ucode_id))
2821 return true;
2822
2823 if (psp->autoload_supported &&
2824 (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC1_JT ||
2825 ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT))
2826 /* skip mec JT when autoload is enabled */
2827 return true;
2828
2829 return false;
2830}
2831
2832int psp_load_fw_list(struct psp_context *psp,
2833 struct amdgpu_firmware_info **ucode_list, int ucode_count)
2834{
2835 int ret = 0, i;
2836 struct amdgpu_firmware_info *ucode;
2837
2838 for (i = 0; i < ucode_count; ++i) {
2839 ucode = ucode_list[i];
2840 psp_print_fw_hdr(psp, ucode);
2841 ret = psp_execute_ip_fw_load(psp, ucode);
2842 if (ret)
2843 return ret;
2844 }
2845 return ret;
2846}
2847
2848static int psp_load_non_psp_fw(struct psp_context *psp)
2849{
2850 int i, ret;
2851 struct amdgpu_firmware_info *ucode;
2852 struct amdgpu_device *adev = psp->adev;
2853
2854 if (psp->autoload_supported &&
2855 !psp->pmfw_centralized_cstate_management) {
2856 ret = psp_load_smu_fw(psp);
2857 if (ret)
2858 return ret;
2859 }
2860
2861 /* Load P2S table first if it's available */
2862 psp_load_p2s_table(psp);
2863
2864 for (i = 0; i < adev->firmware.max_ucodes; i++) {
2865 ucode = &adev->firmware.ucode[i];
2866
2867 if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC &&
2868 !fw_load_skip_check(psp, ucode)) {
2869 ret = psp_load_smu_fw(psp);
2870 if (ret)
2871 return ret;
2872 continue;
2873 }
2874
2875 if (fw_load_skip_check(psp, ucode))
2876 continue;
2877
2878 if (psp->autoload_supported &&
2879 (amdgpu_ip_version(adev, MP0_HWIP, 0) ==
2880 IP_VERSION(11, 0, 7) ||
2881 amdgpu_ip_version(adev, MP0_HWIP, 0) ==
2882 IP_VERSION(11, 0, 11) ||
2883 amdgpu_ip_version(adev, MP0_HWIP, 0) ==
2884 IP_VERSION(11, 0, 12)) &&
2885 (ucode->ucode_id == AMDGPU_UCODE_ID_SDMA1 ||
2886 ucode->ucode_id == AMDGPU_UCODE_ID_SDMA2 ||
2887 ucode->ucode_id == AMDGPU_UCODE_ID_SDMA3))
2888 /* PSP only receive one SDMA fw for sienna_cichlid,
2889 * as all four sdma fw are same
2890 */
2891 continue;
2892
2893 psp_print_fw_hdr(psp, ucode);
2894
2895 ret = psp_execute_ip_fw_load(psp, ucode);
2896 if (ret)
2897 return ret;
2898
2899 /* Start rlc autoload after psp received all the gfx firmware */
2900 if (psp->autoload_supported && ucode->ucode_id == (amdgpu_sriov_vf(adev) ?
2901 adev->virt.autoload_ucode_id : AMDGPU_UCODE_ID_RLC_G)) {
2902 ret = psp_rlc_autoload_start(psp);
2903 if (ret) {
2904 dev_err(adev->dev, "Failed to start rlc autoload\n");
2905 return ret;
2906 }
2907 }
2908 }
2909
2910 return 0;
2911}
2912
2913static int psp_load_fw(struct amdgpu_device *adev)
2914{
2915 int ret;
2916 struct psp_context *psp = &adev->psp;
2917
2918 if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) {
2919 /* should not destroy ring, only stop */
2920 psp_ring_stop(psp, PSP_RING_TYPE__KM);
2921 } else {
2922 memset(psp->fence_buf, 0, PSP_FENCE_BUFFER_SIZE);
2923
2924 ret = psp_ring_init(psp, PSP_RING_TYPE__KM);
2925 if (ret) {
2926 dev_err(adev->dev, "PSP ring init failed!\n");
2927 goto failed;
2928 }
2929 }
2930
2931 ret = psp_hw_start(psp);
2932 if (ret)
2933 goto failed;
2934
2935 ret = psp_load_non_psp_fw(psp);
2936 if (ret)
2937 goto failed1;
2938
2939 ret = psp_asd_initialize(psp);
2940 if (ret) {
2941 dev_err(adev->dev, "PSP load asd failed!\n");
2942 goto failed1;
2943 }
2944
2945 ret = psp_rl_load(adev);
2946 if (ret) {
2947 dev_err(adev->dev, "PSP load RL failed!\n");
2948 goto failed1;
2949 }
2950
2951 if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) {
2952 if (adev->gmc.xgmi.num_physical_nodes > 1) {
2953 ret = psp_xgmi_initialize(psp, false, true);
2954 /* Warning the XGMI seesion initialize failure
2955 * Instead of stop driver initialization
2956 */
2957 if (ret)
2958 dev_err(psp->adev->dev,
2959 "XGMI: Failed to initialize XGMI session\n");
2960 }
2961 }
2962
2963 if (psp->ta_fw) {
2964 ret = psp_ras_initialize(psp);
2965 if (ret)
2966 dev_err(psp->adev->dev,
2967 "RAS: Failed to initialize RAS\n");
2968
2969 ret = psp_hdcp_initialize(psp);
2970 if (ret)
2971 dev_err(psp->adev->dev,
2972 "HDCP: Failed to initialize HDCP\n");
2973
2974 ret = psp_dtm_initialize(psp);
2975 if (ret)
2976 dev_err(psp->adev->dev,
2977 "DTM: Failed to initialize DTM\n");
2978
2979 ret = psp_rap_initialize(psp);
2980 if (ret)
2981 dev_err(psp->adev->dev,
2982 "RAP: Failed to initialize RAP\n");
2983
2984 ret = psp_securedisplay_initialize(psp);
2985 if (ret)
2986 dev_err(psp->adev->dev,
2987 "SECUREDISPLAY: Failed to initialize SECUREDISPLAY\n");
2988 }
2989
2990 return 0;
2991
2992failed1:
2993 psp_free_shared_bufs(psp);
2994failed:
2995 /*
2996 * all cleanup jobs (xgmi terminate, ras terminate,
2997 * ring destroy, cmd/fence/fw buffers destory,
2998 * psp->cmd destory) are delayed to psp_hw_fini
2999 */
3000 psp_ring_destroy(psp, PSP_RING_TYPE__KM);
3001 return ret;
3002}
3003
3004static int psp_hw_init(struct amdgpu_ip_block *ip_block)
3005{
3006 int ret;
3007 struct amdgpu_device *adev = ip_block->adev;
3008
3009 mutex_lock(&adev->firmware.mutex);
3010 /*
3011 * This sequence is just used on hw_init only once, no need on
3012 * resume.
3013 */
3014 ret = amdgpu_ucode_init_bo(adev);
3015 if (ret)
3016 goto failed;
3017
3018 ret = psp_load_fw(adev);
3019 if (ret) {
3020 dev_err(adev->dev, "PSP firmware loading failed\n");
3021 goto failed;
3022 }
3023
3024 mutex_unlock(&adev->firmware.mutex);
3025 return 0;
3026
3027failed:
3028 adev->firmware.load_type = AMDGPU_FW_LOAD_DIRECT;
3029 mutex_unlock(&adev->firmware.mutex);
3030 return -EINVAL;
3031}
3032
3033static int psp_hw_fini(struct amdgpu_ip_block *ip_block)
3034{
3035 struct amdgpu_device *adev = ip_block->adev;
3036 struct psp_context *psp = &adev->psp;
3037
3038 if (psp->ta_fw) {
3039 psp_ras_terminate(psp);
3040 psp_securedisplay_terminate(psp);
3041 psp_rap_terminate(psp);
3042 psp_dtm_terminate(psp);
3043 psp_hdcp_terminate(psp);
3044
3045 if (adev->gmc.xgmi.num_physical_nodes > 1)
3046 psp_xgmi_terminate(psp);
3047 }
3048
3049 psp_asd_terminate(psp);
3050 psp_tmr_terminate(psp);
3051
3052 psp_ring_destroy(psp, PSP_RING_TYPE__KM);
3053
3054 return 0;
3055}
3056
3057static int psp_suspend(struct amdgpu_ip_block *ip_block)
3058{
3059 int ret = 0;
3060 struct amdgpu_device *adev = ip_block->adev;
3061 struct psp_context *psp = &adev->psp;
3062
3063 if (adev->gmc.xgmi.num_physical_nodes > 1 &&
3064 psp->xgmi_context.context.initialized) {
3065 ret = psp_xgmi_terminate(psp);
3066 if (ret) {
3067 dev_err(adev->dev, "Failed to terminate xgmi ta\n");
3068 goto out;
3069 }
3070 }
3071
3072 if (psp->ta_fw) {
3073 ret = psp_ras_terminate(psp);
3074 if (ret) {
3075 dev_err(adev->dev, "Failed to terminate ras ta\n");
3076 goto out;
3077 }
3078 ret = psp_hdcp_terminate(psp);
3079 if (ret) {
3080 dev_err(adev->dev, "Failed to terminate hdcp ta\n");
3081 goto out;
3082 }
3083 ret = psp_dtm_terminate(psp);
3084 if (ret) {
3085 dev_err(adev->dev, "Failed to terminate dtm ta\n");
3086 goto out;
3087 }
3088 ret = psp_rap_terminate(psp);
3089 if (ret) {
3090 dev_err(adev->dev, "Failed to terminate rap ta\n");
3091 goto out;
3092 }
3093 ret = psp_securedisplay_terminate(psp);
3094 if (ret) {
3095 dev_err(adev->dev, "Failed to terminate securedisplay ta\n");
3096 goto out;
3097 }
3098 }
3099
3100 ret = psp_asd_terminate(psp);
3101 if (ret) {
3102 dev_err(adev->dev, "Failed to terminate asd\n");
3103 goto out;
3104 }
3105
3106 ret = psp_tmr_terminate(psp);
3107 if (ret) {
3108 dev_err(adev->dev, "Failed to terminate tmr\n");
3109 goto out;
3110 }
3111
3112 ret = psp_ring_stop(psp, PSP_RING_TYPE__KM);
3113 if (ret)
3114 dev_err(adev->dev, "PSP ring stop failed\n");
3115
3116out:
3117 return ret;
3118}
3119
3120static int psp_resume(struct amdgpu_ip_block *ip_block)
3121{
3122 int ret;
3123 struct amdgpu_device *adev = ip_block->adev;
3124 struct psp_context *psp = &adev->psp;
3125
3126 dev_info(adev->dev, "PSP is resuming...\n");
3127
3128 if (psp->mem_train_ctx.enable_mem_training) {
3129 ret = psp_mem_training(psp, PSP_MEM_TRAIN_RESUME);
3130 if (ret) {
3131 dev_err(adev->dev, "Failed to process memory training!\n");
3132 return ret;
3133 }
3134 }
3135
3136 mutex_lock(&adev->firmware.mutex);
3137
3138 ret = psp_hw_start(psp);
3139 if (ret)
3140 goto failed;
3141
3142 ret = psp_load_non_psp_fw(psp);
3143 if (ret)
3144 goto failed;
3145
3146 ret = psp_asd_initialize(psp);
3147 if (ret) {
3148 dev_err(adev->dev, "PSP load asd failed!\n");
3149 goto failed;
3150 }
3151
3152 ret = psp_rl_load(adev);
3153 if (ret) {
3154 dev_err(adev->dev, "PSP load RL failed!\n");
3155 goto failed;
3156 }
3157
3158 if (adev->gmc.xgmi.num_physical_nodes > 1) {
3159 ret = psp_xgmi_initialize(psp, false, true);
3160 /* Warning the XGMI seesion initialize failure
3161 * Instead of stop driver initialization
3162 */
3163 if (ret)
3164 dev_err(psp->adev->dev,
3165 "XGMI: Failed to initialize XGMI session\n");
3166 }
3167
3168 if (psp->ta_fw) {
3169 ret = psp_ras_initialize(psp);
3170 if (ret)
3171 dev_err(psp->adev->dev,
3172 "RAS: Failed to initialize RAS\n");
3173
3174 ret = psp_hdcp_initialize(psp);
3175 if (ret)
3176 dev_err(psp->adev->dev,
3177 "HDCP: Failed to initialize HDCP\n");
3178
3179 ret = psp_dtm_initialize(psp);
3180 if (ret)
3181 dev_err(psp->adev->dev,
3182 "DTM: Failed to initialize DTM\n");
3183
3184 ret = psp_rap_initialize(psp);
3185 if (ret)
3186 dev_err(psp->adev->dev,
3187 "RAP: Failed to initialize RAP\n");
3188
3189 ret = psp_securedisplay_initialize(psp);
3190 if (ret)
3191 dev_err(psp->adev->dev,
3192 "SECUREDISPLAY: Failed to initialize SECUREDISPLAY\n");
3193 }
3194
3195 mutex_unlock(&adev->firmware.mutex);
3196
3197 return 0;
3198
3199failed:
3200 dev_err(adev->dev, "PSP resume failed\n");
3201 mutex_unlock(&adev->firmware.mutex);
3202 return ret;
3203}
3204
3205int psp_gpu_reset(struct amdgpu_device *adev)
3206{
3207 int ret;
3208
3209 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
3210 return 0;
3211
3212 mutex_lock(&adev->psp.mutex);
3213 ret = psp_mode1_reset(&adev->psp);
3214 mutex_unlock(&adev->psp.mutex);
3215
3216 return ret;
3217}
3218
3219int psp_rlc_autoload_start(struct psp_context *psp)
3220{
3221 int ret;
3222 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
3223
3224 cmd->cmd_id = GFX_CMD_ID_AUTOLOAD_RLC;
3225
3226 ret = psp_cmd_submit_buf(psp, NULL, cmd,
3227 psp->fence_buf_mc_addr);
3228
3229 release_psp_cmd_buf(psp);
3230
3231 return ret;
3232}
3233
3234int psp_ring_cmd_submit(struct psp_context *psp,
3235 uint64_t cmd_buf_mc_addr,
3236 uint64_t fence_mc_addr,
3237 int index)
3238{
3239 unsigned int psp_write_ptr_reg = 0;
3240 struct psp_gfx_rb_frame *write_frame;
3241 struct psp_ring *ring = &psp->km_ring;
3242 struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem;
3243 struct psp_gfx_rb_frame *ring_buffer_end = ring_buffer_start +
3244 ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1;
3245 struct amdgpu_device *adev = psp->adev;
3246 uint32_t ring_size_dw = ring->ring_size / 4;
3247 uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4;
3248
3249 /* KM (GPCOM) prepare write pointer */
3250 psp_write_ptr_reg = psp_ring_get_wptr(psp);
3251
3252 /* Update KM RB frame pointer to new frame */
3253 /* write_frame ptr increments by size of rb_frame in bytes */
3254 /* psp_write_ptr_reg increments by size of rb_frame in DWORDs */
3255 if ((psp_write_ptr_reg % ring_size_dw) == 0)
3256 write_frame = ring_buffer_start;
3257 else
3258 write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw);
3259 /* Check invalid write_frame ptr address */
3260 if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) {
3261 dev_err(adev->dev,
3262 "ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n",
3263 ring_buffer_start, ring_buffer_end, write_frame);
3264 dev_err(adev->dev,
3265 "write_frame is pointing to address out of bounds\n");
3266 return -EINVAL;
3267 }
3268
3269 /* Initialize KM RB frame */
3270 memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame));
3271
3272 /* Update KM RB frame */
3273 write_frame->cmd_buf_addr_hi = upper_32_bits(cmd_buf_mc_addr);
3274 write_frame->cmd_buf_addr_lo = lower_32_bits(cmd_buf_mc_addr);
3275 write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr);
3276 write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr);
3277 write_frame->fence_value = index;
3278 amdgpu_device_flush_hdp(adev, NULL);
3279
3280 /* Update the write Pointer in DWORDs */
3281 psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw;
3282 psp_ring_set_wptr(psp, psp_write_ptr_reg);
3283 return 0;
3284}
3285
3286int psp_init_asd_microcode(struct psp_context *psp, const char *chip_name)
3287{
3288 struct amdgpu_device *adev = psp->adev;
3289 const struct psp_firmware_header_v1_0 *asd_hdr;
3290 int err = 0;
3291
3292 err = amdgpu_ucode_request(adev, &adev->psp.asd_fw, "amdgpu/%s_asd.bin", chip_name);
3293 if (err)
3294 goto out;
3295
3296 asd_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data;
3297 adev->psp.asd_context.bin_desc.fw_version = le32_to_cpu(asd_hdr->header.ucode_version);
3298 adev->psp.asd_context.bin_desc.feature_version = le32_to_cpu(asd_hdr->sos.fw_version);
3299 adev->psp.asd_context.bin_desc.size_bytes = le32_to_cpu(asd_hdr->header.ucode_size_bytes);
3300 adev->psp.asd_context.bin_desc.start_addr = (uint8_t *)asd_hdr +
3301 le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes);
3302 return 0;
3303out:
3304 amdgpu_ucode_release(&adev->psp.asd_fw);
3305 return err;
3306}
3307
3308int psp_init_toc_microcode(struct psp_context *psp, const char *chip_name)
3309{
3310 struct amdgpu_device *adev = psp->adev;
3311 const struct psp_firmware_header_v1_0 *toc_hdr;
3312 int err = 0;
3313
3314 err = amdgpu_ucode_request(adev, &adev->psp.toc_fw, "amdgpu/%s_toc.bin", chip_name);
3315 if (err)
3316 goto out;
3317
3318 toc_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.toc_fw->data;
3319 adev->psp.toc.fw_version = le32_to_cpu(toc_hdr->header.ucode_version);
3320 adev->psp.toc.feature_version = le32_to_cpu(toc_hdr->sos.fw_version);
3321 adev->psp.toc.size_bytes = le32_to_cpu(toc_hdr->header.ucode_size_bytes);
3322 adev->psp.toc.start_addr = (uint8_t *)toc_hdr +
3323 le32_to_cpu(toc_hdr->header.ucode_array_offset_bytes);
3324 return 0;
3325out:
3326 amdgpu_ucode_release(&adev->psp.toc_fw);
3327 return err;
3328}
3329
3330static int parse_sos_bin_descriptor(struct psp_context *psp,
3331 const struct psp_fw_bin_desc *desc,
3332 const struct psp_firmware_header_v2_0 *sos_hdr)
3333{
3334 uint8_t *ucode_start_addr = NULL;
3335
3336 if (!psp || !desc || !sos_hdr)
3337 return -EINVAL;
3338
3339 ucode_start_addr = (uint8_t *)sos_hdr +
3340 le32_to_cpu(desc->offset_bytes) +
3341 le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
3342
3343 switch (desc->fw_type) {
3344 case PSP_FW_TYPE_PSP_SOS:
3345 psp->sos.fw_version = le32_to_cpu(desc->fw_version);
3346 psp->sos.feature_version = le32_to_cpu(desc->fw_version);
3347 psp->sos.size_bytes = le32_to_cpu(desc->size_bytes);
3348 psp->sos.start_addr = ucode_start_addr;
3349 break;
3350 case PSP_FW_TYPE_PSP_SYS_DRV:
3351 psp->sys.fw_version = le32_to_cpu(desc->fw_version);
3352 psp->sys.feature_version = le32_to_cpu(desc->fw_version);
3353 psp->sys.size_bytes = le32_to_cpu(desc->size_bytes);
3354 psp->sys.start_addr = ucode_start_addr;
3355 break;
3356 case PSP_FW_TYPE_PSP_KDB:
3357 psp->kdb.fw_version = le32_to_cpu(desc->fw_version);
3358 psp->kdb.feature_version = le32_to_cpu(desc->fw_version);
3359 psp->kdb.size_bytes = le32_to_cpu(desc->size_bytes);
3360 psp->kdb.start_addr = ucode_start_addr;
3361 break;
3362 case PSP_FW_TYPE_PSP_TOC:
3363 psp->toc.fw_version = le32_to_cpu(desc->fw_version);
3364 psp->toc.feature_version = le32_to_cpu(desc->fw_version);
3365 psp->toc.size_bytes = le32_to_cpu(desc->size_bytes);
3366 psp->toc.start_addr = ucode_start_addr;
3367 break;
3368 case PSP_FW_TYPE_PSP_SPL:
3369 psp->spl.fw_version = le32_to_cpu(desc->fw_version);
3370 psp->spl.feature_version = le32_to_cpu(desc->fw_version);
3371 psp->spl.size_bytes = le32_to_cpu(desc->size_bytes);
3372 psp->spl.start_addr = ucode_start_addr;
3373 break;
3374 case PSP_FW_TYPE_PSP_RL:
3375 psp->rl.fw_version = le32_to_cpu(desc->fw_version);
3376 psp->rl.feature_version = le32_to_cpu(desc->fw_version);
3377 psp->rl.size_bytes = le32_to_cpu(desc->size_bytes);
3378 psp->rl.start_addr = ucode_start_addr;
3379 break;
3380 case PSP_FW_TYPE_PSP_SOC_DRV:
3381 psp->soc_drv.fw_version = le32_to_cpu(desc->fw_version);
3382 psp->soc_drv.feature_version = le32_to_cpu(desc->fw_version);
3383 psp->soc_drv.size_bytes = le32_to_cpu(desc->size_bytes);
3384 psp->soc_drv.start_addr = ucode_start_addr;
3385 break;
3386 case PSP_FW_TYPE_PSP_INTF_DRV:
3387 psp->intf_drv.fw_version = le32_to_cpu(desc->fw_version);
3388 psp->intf_drv.feature_version = le32_to_cpu(desc->fw_version);
3389 psp->intf_drv.size_bytes = le32_to_cpu(desc->size_bytes);
3390 psp->intf_drv.start_addr = ucode_start_addr;
3391 break;
3392 case PSP_FW_TYPE_PSP_DBG_DRV:
3393 psp->dbg_drv.fw_version = le32_to_cpu(desc->fw_version);
3394 psp->dbg_drv.feature_version = le32_to_cpu(desc->fw_version);
3395 psp->dbg_drv.size_bytes = le32_to_cpu(desc->size_bytes);
3396 psp->dbg_drv.start_addr = ucode_start_addr;
3397 break;
3398 case PSP_FW_TYPE_PSP_RAS_DRV:
3399 psp->ras_drv.fw_version = le32_to_cpu(desc->fw_version);
3400 psp->ras_drv.feature_version = le32_to_cpu(desc->fw_version);
3401 psp->ras_drv.size_bytes = le32_to_cpu(desc->size_bytes);
3402 psp->ras_drv.start_addr = ucode_start_addr;
3403 break;
3404 case PSP_FW_TYPE_PSP_IPKEYMGR_DRV:
3405 psp->ipkeymgr_drv.fw_version = le32_to_cpu(desc->fw_version);
3406 psp->ipkeymgr_drv.feature_version = le32_to_cpu(desc->fw_version);
3407 psp->ipkeymgr_drv.size_bytes = le32_to_cpu(desc->size_bytes);
3408 psp->ipkeymgr_drv.start_addr = ucode_start_addr;
3409 break;
3410 default:
3411 dev_warn(psp->adev->dev, "Unsupported PSP FW type: %d\n", desc->fw_type);
3412 break;
3413 }
3414
3415 return 0;
3416}
3417
3418static int psp_init_sos_base_fw(struct amdgpu_device *adev)
3419{
3420 const struct psp_firmware_header_v1_0 *sos_hdr;
3421 const struct psp_firmware_header_v1_3 *sos_hdr_v1_3;
3422 uint8_t *ucode_array_start_addr;
3423
3424 sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data;
3425 ucode_array_start_addr = (uint8_t *)sos_hdr +
3426 le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
3427
3428 if (adev->gmc.xgmi.connected_to_cpu ||
3429 (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 2))) {
3430 adev->psp.sos.fw_version = le32_to_cpu(sos_hdr->header.ucode_version);
3431 adev->psp.sos.feature_version = le32_to_cpu(sos_hdr->sos.fw_version);
3432
3433 adev->psp.sys.size_bytes = le32_to_cpu(sos_hdr->sos.offset_bytes);
3434 adev->psp.sys.start_addr = ucode_array_start_addr;
3435
3436 adev->psp.sos.size_bytes = le32_to_cpu(sos_hdr->sos.size_bytes);
3437 adev->psp.sos.start_addr = ucode_array_start_addr +
3438 le32_to_cpu(sos_hdr->sos.offset_bytes);
3439 } else {
3440 /* Load alternate PSP SOS FW */
3441 sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data;
3442
3443 adev->psp.sos.fw_version = le32_to_cpu(sos_hdr_v1_3->sos_aux.fw_version);
3444 adev->psp.sos.feature_version = le32_to_cpu(sos_hdr_v1_3->sos_aux.fw_version);
3445
3446 adev->psp.sys.size_bytes = le32_to_cpu(sos_hdr_v1_3->sys_drv_aux.size_bytes);
3447 adev->psp.sys.start_addr = ucode_array_start_addr +
3448 le32_to_cpu(sos_hdr_v1_3->sys_drv_aux.offset_bytes);
3449
3450 adev->psp.sos.size_bytes = le32_to_cpu(sos_hdr_v1_3->sos_aux.size_bytes);
3451 adev->psp.sos.start_addr = ucode_array_start_addr +
3452 le32_to_cpu(sos_hdr_v1_3->sos_aux.offset_bytes);
3453 }
3454
3455 if ((adev->psp.sys.size_bytes == 0) || (adev->psp.sos.size_bytes == 0)) {
3456 dev_warn(adev->dev, "PSP SOS FW not available");
3457 return -EINVAL;
3458 }
3459
3460 return 0;
3461}
3462
3463int psp_init_sos_microcode(struct psp_context *psp, const char *chip_name)
3464{
3465 struct amdgpu_device *adev = psp->adev;
3466 const struct psp_firmware_header_v1_0 *sos_hdr;
3467 const struct psp_firmware_header_v1_1 *sos_hdr_v1_1;
3468 const struct psp_firmware_header_v1_2 *sos_hdr_v1_2;
3469 const struct psp_firmware_header_v1_3 *sos_hdr_v1_3;
3470 const struct psp_firmware_header_v2_0 *sos_hdr_v2_0;
3471 const struct psp_firmware_header_v2_1 *sos_hdr_v2_1;
3472 int fw_index, fw_bin_count, start_index = 0;
3473 const struct psp_fw_bin_desc *fw_bin;
3474 uint8_t *ucode_array_start_addr;
3475 int err = 0;
3476
3477 err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, "amdgpu/%s_sos.bin", chip_name);
3478 if (err)
3479 goto out;
3480
3481 sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data;
3482 ucode_array_start_addr = (uint8_t *)sos_hdr +
3483 le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
3484 amdgpu_ucode_print_psp_hdr(&sos_hdr->header);
3485
3486 switch (sos_hdr->header.header_version_major) {
3487 case 1:
3488 err = psp_init_sos_base_fw(adev);
3489 if (err)
3490 goto out;
3491
3492 if (sos_hdr->header.header_version_minor == 1) {
3493 sos_hdr_v1_1 = (const struct psp_firmware_header_v1_1 *)adev->psp.sos_fw->data;
3494 adev->psp.toc.size_bytes = le32_to_cpu(sos_hdr_v1_1->toc.size_bytes);
3495 adev->psp.toc.start_addr = (uint8_t *)adev->psp.sys.start_addr +
3496 le32_to_cpu(sos_hdr_v1_1->toc.offset_bytes);
3497 adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_1->kdb.size_bytes);
3498 adev->psp.kdb.start_addr = (uint8_t *)adev->psp.sys.start_addr +
3499 le32_to_cpu(sos_hdr_v1_1->kdb.offset_bytes);
3500 }
3501 if (sos_hdr->header.header_version_minor == 2) {
3502 sos_hdr_v1_2 = (const struct psp_firmware_header_v1_2 *)adev->psp.sos_fw->data;
3503 adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_2->kdb.size_bytes);
3504 adev->psp.kdb.start_addr = (uint8_t *)adev->psp.sys.start_addr +
3505 le32_to_cpu(sos_hdr_v1_2->kdb.offset_bytes);
3506 }
3507 if (sos_hdr->header.header_version_minor == 3) {
3508 sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data;
3509 adev->psp.toc.size_bytes = le32_to_cpu(sos_hdr_v1_3->v1_1.toc.size_bytes);
3510 adev->psp.toc.start_addr = ucode_array_start_addr +
3511 le32_to_cpu(sos_hdr_v1_3->v1_1.toc.offset_bytes);
3512 adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_3->v1_1.kdb.size_bytes);
3513 adev->psp.kdb.start_addr = ucode_array_start_addr +
3514 le32_to_cpu(sos_hdr_v1_3->v1_1.kdb.offset_bytes);
3515 adev->psp.spl.size_bytes = le32_to_cpu(sos_hdr_v1_3->spl.size_bytes);
3516 adev->psp.spl.start_addr = ucode_array_start_addr +
3517 le32_to_cpu(sos_hdr_v1_3->spl.offset_bytes);
3518 adev->psp.rl.size_bytes = le32_to_cpu(sos_hdr_v1_3->rl.size_bytes);
3519 adev->psp.rl.start_addr = ucode_array_start_addr +
3520 le32_to_cpu(sos_hdr_v1_3->rl.offset_bytes);
3521 }
3522 break;
3523 case 2:
3524 sos_hdr_v2_0 = (const struct psp_firmware_header_v2_0 *)adev->psp.sos_fw->data;
3525
3526 fw_bin_count = le32_to_cpu(sos_hdr_v2_0->psp_fw_bin_count);
3527
3528 if (fw_bin_count >= UCODE_MAX_PSP_PACKAGING) {
3529 dev_err(adev->dev, "packed SOS count exceeds maximum limit\n");
3530 err = -EINVAL;
3531 goto out;
3532 }
3533
3534 if (sos_hdr_v2_0->header.header_version_minor == 1) {
3535 sos_hdr_v2_1 = (const struct psp_firmware_header_v2_1 *)adev->psp.sos_fw->data;
3536
3537 fw_bin = sos_hdr_v2_1->psp_fw_bin;
3538
3539 if (psp_is_aux_sos_load_required(psp))
3540 start_index = le32_to_cpu(sos_hdr_v2_1->psp_aux_fw_bin_index);
3541 else
3542 fw_bin_count -= le32_to_cpu(sos_hdr_v2_1->psp_aux_fw_bin_index);
3543
3544 } else {
3545 fw_bin = sos_hdr_v2_0->psp_fw_bin;
3546 }
3547
3548 for (fw_index = start_index; fw_index < fw_bin_count; fw_index++) {
3549 err = parse_sos_bin_descriptor(psp, fw_bin + fw_index,
3550 sos_hdr_v2_0);
3551 if (err)
3552 goto out;
3553 }
3554 break;
3555 default:
3556 dev_err(adev->dev,
3557 "unsupported psp sos firmware\n");
3558 err = -EINVAL;
3559 goto out;
3560 }
3561
3562 return 0;
3563out:
3564 amdgpu_ucode_release(&adev->psp.sos_fw);
3565
3566 return err;
3567}
3568
3569static bool is_ta_fw_applicable(struct psp_context *psp,
3570 const struct psp_fw_bin_desc *desc)
3571{
3572 struct amdgpu_device *adev = psp->adev;
3573 uint32_t fw_version;
3574
3575 switch (desc->fw_type) {
3576 case TA_FW_TYPE_PSP_XGMI:
3577 case TA_FW_TYPE_PSP_XGMI_AUX:
3578 /* for now, AUX TA only exists on 13.0.6 ta bin,
3579 * from v20.00.0x.14
3580 */
3581 if (amdgpu_ip_version(adev, MP0_HWIP, 0) ==
3582 IP_VERSION(13, 0, 6)) {
3583 fw_version = le32_to_cpu(desc->fw_version);
3584
3585 if (adev->flags & AMD_IS_APU &&
3586 (fw_version & 0xff) >= 0x14)
3587 return desc->fw_type == TA_FW_TYPE_PSP_XGMI_AUX;
3588 else
3589 return desc->fw_type == TA_FW_TYPE_PSP_XGMI;
3590 }
3591 break;
3592 default:
3593 break;
3594 }
3595
3596 return true;
3597}
3598
3599static int parse_ta_bin_descriptor(struct psp_context *psp,
3600 const struct psp_fw_bin_desc *desc,
3601 const struct ta_firmware_header_v2_0 *ta_hdr)
3602{
3603 uint8_t *ucode_start_addr = NULL;
3604
3605 if (!psp || !desc || !ta_hdr)
3606 return -EINVAL;
3607
3608 if (!is_ta_fw_applicable(psp, desc))
3609 return 0;
3610
3611 ucode_start_addr = (uint8_t *)ta_hdr +
3612 le32_to_cpu(desc->offset_bytes) +
3613 le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
3614
3615 switch (desc->fw_type) {
3616 case TA_FW_TYPE_PSP_ASD:
3617 psp->asd_context.bin_desc.fw_version = le32_to_cpu(desc->fw_version);
3618 psp->asd_context.bin_desc.feature_version = le32_to_cpu(desc->fw_version);
3619 psp->asd_context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
3620 psp->asd_context.bin_desc.start_addr = ucode_start_addr;
3621 break;
3622 case TA_FW_TYPE_PSP_XGMI:
3623 case TA_FW_TYPE_PSP_XGMI_AUX:
3624 psp->xgmi_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version);
3625 psp->xgmi_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
3626 psp->xgmi_context.context.bin_desc.start_addr = ucode_start_addr;
3627 break;
3628 case TA_FW_TYPE_PSP_RAS:
3629 psp->ras_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version);
3630 psp->ras_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
3631 psp->ras_context.context.bin_desc.start_addr = ucode_start_addr;
3632 break;
3633 case TA_FW_TYPE_PSP_HDCP:
3634 psp->hdcp_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version);
3635 psp->hdcp_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
3636 psp->hdcp_context.context.bin_desc.start_addr = ucode_start_addr;
3637 break;
3638 case TA_FW_TYPE_PSP_DTM:
3639 psp->dtm_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version);
3640 psp->dtm_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
3641 psp->dtm_context.context.bin_desc.start_addr = ucode_start_addr;
3642 break;
3643 case TA_FW_TYPE_PSP_RAP:
3644 psp->rap_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version);
3645 psp->rap_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
3646 psp->rap_context.context.bin_desc.start_addr = ucode_start_addr;
3647 break;
3648 case TA_FW_TYPE_PSP_SECUREDISPLAY:
3649 psp->securedisplay_context.context.bin_desc.fw_version =
3650 le32_to_cpu(desc->fw_version);
3651 psp->securedisplay_context.context.bin_desc.size_bytes =
3652 le32_to_cpu(desc->size_bytes);
3653 psp->securedisplay_context.context.bin_desc.start_addr =
3654 ucode_start_addr;
3655 break;
3656 default:
3657 dev_warn(psp->adev->dev, "Unsupported TA type: %d\n", desc->fw_type);
3658 break;
3659 }
3660
3661 return 0;
3662}
3663
3664static int parse_ta_v1_microcode(struct psp_context *psp)
3665{
3666 const struct ta_firmware_header_v1_0 *ta_hdr;
3667 struct amdgpu_device *adev = psp->adev;
3668
3669 ta_hdr = (const struct ta_firmware_header_v1_0 *) adev->psp.ta_fw->data;
3670
3671 if (le16_to_cpu(ta_hdr->header.header_version_major) != 1)
3672 return -EINVAL;
3673
3674 adev->psp.xgmi_context.context.bin_desc.fw_version =
3675 le32_to_cpu(ta_hdr->xgmi.fw_version);
3676 adev->psp.xgmi_context.context.bin_desc.size_bytes =
3677 le32_to_cpu(ta_hdr->xgmi.size_bytes);
3678 adev->psp.xgmi_context.context.bin_desc.start_addr =
3679 (uint8_t *)ta_hdr +
3680 le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
3681
3682 adev->psp.ras_context.context.bin_desc.fw_version =
3683 le32_to_cpu(ta_hdr->ras.fw_version);
3684 adev->psp.ras_context.context.bin_desc.size_bytes =
3685 le32_to_cpu(ta_hdr->ras.size_bytes);
3686 adev->psp.ras_context.context.bin_desc.start_addr =
3687 (uint8_t *)adev->psp.xgmi_context.context.bin_desc.start_addr +
3688 le32_to_cpu(ta_hdr->ras.offset_bytes);
3689
3690 adev->psp.hdcp_context.context.bin_desc.fw_version =
3691 le32_to_cpu(ta_hdr->hdcp.fw_version);
3692 adev->psp.hdcp_context.context.bin_desc.size_bytes =
3693 le32_to_cpu(ta_hdr->hdcp.size_bytes);
3694 adev->psp.hdcp_context.context.bin_desc.start_addr =
3695 (uint8_t *)ta_hdr +
3696 le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
3697
3698 adev->psp.dtm_context.context.bin_desc.fw_version =
3699 le32_to_cpu(ta_hdr->dtm.fw_version);
3700 adev->psp.dtm_context.context.bin_desc.size_bytes =
3701 le32_to_cpu(ta_hdr->dtm.size_bytes);
3702 adev->psp.dtm_context.context.bin_desc.start_addr =
3703 (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr +
3704 le32_to_cpu(ta_hdr->dtm.offset_bytes);
3705
3706 adev->psp.securedisplay_context.context.bin_desc.fw_version =
3707 le32_to_cpu(ta_hdr->securedisplay.fw_version);
3708 adev->psp.securedisplay_context.context.bin_desc.size_bytes =
3709 le32_to_cpu(ta_hdr->securedisplay.size_bytes);
3710 adev->psp.securedisplay_context.context.bin_desc.start_addr =
3711 (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr +
3712 le32_to_cpu(ta_hdr->securedisplay.offset_bytes);
3713
3714 adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version);
3715
3716 return 0;
3717}
3718
3719static int parse_ta_v2_microcode(struct psp_context *psp)
3720{
3721 const struct ta_firmware_header_v2_0 *ta_hdr;
3722 struct amdgpu_device *adev = psp->adev;
3723 int err = 0;
3724 int ta_index = 0;
3725
3726 ta_hdr = (const struct ta_firmware_header_v2_0 *)adev->psp.ta_fw->data;
3727
3728 if (le16_to_cpu(ta_hdr->header.header_version_major) != 2)
3729 return -EINVAL;
3730
3731 if (le32_to_cpu(ta_hdr->ta_fw_bin_count) >= UCODE_MAX_PSP_PACKAGING) {
3732 dev_err(adev->dev, "packed TA count exceeds maximum limit\n");
3733 return -EINVAL;
3734 }
3735
3736 for (ta_index = 0; ta_index < le32_to_cpu(ta_hdr->ta_fw_bin_count); ta_index++) {
3737 err = parse_ta_bin_descriptor(psp,
3738 &ta_hdr->ta_fw_bin[ta_index],
3739 ta_hdr);
3740 if (err)
3741 return err;
3742 }
3743
3744 return 0;
3745}
3746
3747int psp_init_ta_microcode(struct psp_context *psp, const char *chip_name)
3748{
3749 const struct common_firmware_header *hdr;
3750 struct amdgpu_device *adev = psp->adev;
3751 int err;
3752
3753 err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, "amdgpu/%s_ta.bin", chip_name);
3754 if (err)
3755 return err;
3756
3757 hdr = (const struct common_firmware_header *)adev->psp.ta_fw->data;
3758 switch (le16_to_cpu(hdr->header_version_major)) {
3759 case 1:
3760 err = parse_ta_v1_microcode(psp);
3761 break;
3762 case 2:
3763 err = parse_ta_v2_microcode(psp);
3764 break;
3765 default:
3766 dev_err(adev->dev, "unsupported TA header version\n");
3767 err = -EINVAL;
3768 }
3769
3770 if (err)
3771 amdgpu_ucode_release(&adev->psp.ta_fw);
3772
3773 return err;
3774}
3775
3776int psp_init_cap_microcode(struct psp_context *psp, const char *chip_name)
3777{
3778 struct amdgpu_device *adev = psp->adev;
3779 const struct psp_firmware_header_v1_0 *cap_hdr_v1_0;
3780 struct amdgpu_firmware_info *info = NULL;
3781 int err = 0;
3782
3783 if (!amdgpu_sriov_vf(adev)) {
3784 dev_err(adev->dev, "cap microcode should only be loaded under SRIOV\n");
3785 return -EINVAL;
3786 }
3787
3788 err = amdgpu_ucode_request(adev, &adev->psp.cap_fw, "amdgpu/%s_cap.bin", chip_name);
3789 if (err) {
3790 if (err == -ENODEV) {
3791 dev_warn(adev->dev, "cap microcode does not exist, skip\n");
3792 err = 0;
3793 } else {
3794 dev_err(adev->dev, "fail to initialize cap microcode\n");
3795 }
3796 goto out;
3797 }
3798
3799 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CAP];
3800 info->ucode_id = AMDGPU_UCODE_ID_CAP;
3801 info->fw = adev->psp.cap_fw;
3802 cap_hdr_v1_0 = (const struct psp_firmware_header_v1_0 *)
3803 adev->psp.cap_fw->data;
3804 adev->firmware.fw_size += ALIGN(
3805 le32_to_cpu(cap_hdr_v1_0->header.ucode_size_bytes), PAGE_SIZE);
3806 adev->psp.cap_fw_version = le32_to_cpu(cap_hdr_v1_0->header.ucode_version);
3807 adev->psp.cap_feature_version = le32_to_cpu(cap_hdr_v1_0->sos.fw_version);
3808 adev->psp.cap_ucode_size = le32_to_cpu(cap_hdr_v1_0->header.ucode_size_bytes);
3809
3810 return 0;
3811
3812out:
3813 amdgpu_ucode_release(&adev->psp.cap_fw);
3814 return err;
3815}
3816
3817int psp_config_sq_perfmon(struct psp_context *psp,
3818 uint32_t xcp_id, bool core_override_enable,
3819 bool reg_override_enable, bool perfmon_override_enable)
3820{
3821 int ret;
3822
3823 if (amdgpu_sriov_vf(psp->adev))
3824 return 0;
3825
3826 if (xcp_id > MAX_XCP) {
3827 dev_err(psp->adev->dev, "invalid xcp_id %d\n", xcp_id);
3828 return -EINVAL;
3829 }
3830
3831 if (amdgpu_ip_version(psp->adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 6)) {
3832 dev_err(psp->adev->dev, "Unsupported MP0 version 0x%x for CONFIG_SQ_PERFMON command\n",
3833 amdgpu_ip_version(psp->adev, MP0_HWIP, 0));
3834 return -EINVAL;
3835 }
3836 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
3837
3838 cmd->cmd_id = GFX_CMD_ID_CONFIG_SQ_PERFMON;
3839 cmd->cmd.config_sq_perfmon.gfx_xcp_mask = BIT_MASK(xcp_id);
3840 cmd->cmd.config_sq_perfmon.core_override = core_override_enable;
3841 cmd->cmd.config_sq_perfmon.reg_override = reg_override_enable;
3842 cmd->cmd.config_sq_perfmon.perfmon_override = perfmon_override_enable;
3843
3844 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
3845 if (ret)
3846 dev_warn(psp->adev->dev, "PSP failed to config sq: xcp%d core%d reg%d perfmon%d\n",
3847 xcp_id, core_override_enable, reg_override_enable, perfmon_override_enable);
3848
3849 release_psp_cmd_buf(psp);
3850 return ret;
3851}
3852
3853static int psp_set_clockgating_state(void *handle,
3854 enum amd_clockgating_state state)
3855{
3856 return 0;
3857}
3858
3859static int psp_set_powergating_state(void *handle,
3860 enum amd_powergating_state state)
3861{
3862 return 0;
3863}
3864
3865static ssize_t psp_usbc_pd_fw_sysfs_read(struct device *dev,
3866 struct device_attribute *attr,
3867 char *buf)
3868{
3869 struct drm_device *ddev = dev_get_drvdata(dev);
3870 struct amdgpu_device *adev = drm_to_adev(ddev);
3871 uint32_t fw_ver;
3872 int ret;
3873
3874 if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) {
3875 dev_info(adev->dev, "PSP block is not ready yet\n.");
3876 return -EBUSY;
3877 }
3878
3879 mutex_lock(&adev->psp.mutex);
3880 ret = psp_read_usbc_pd_fw(&adev->psp, &fw_ver);
3881 mutex_unlock(&adev->psp.mutex);
3882
3883 if (ret) {
3884 dev_err(adev->dev, "Failed to read USBC PD FW, err = %d\n", ret);
3885 return ret;
3886 }
3887
3888 return sysfs_emit(buf, "%x\n", fw_ver);
3889}
3890
3891static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev,
3892 struct device_attribute *attr,
3893 const char *buf,
3894 size_t count)
3895{
3896 struct drm_device *ddev = dev_get_drvdata(dev);
3897 struct amdgpu_device *adev = drm_to_adev(ddev);
3898 int ret, idx;
3899 const struct firmware *usbc_pd_fw;
3900 struct amdgpu_bo *fw_buf_bo = NULL;
3901 uint64_t fw_pri_mc_addr;
3902 void *fw_pri_cpu_addr;
3903
3904 if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) {
3905 dev_err(adev->dev, "PSP block is not ready yet.");
3906 return -EBUSY;
3907 }
3908
3909 if (!drm_dev_enter(ddev, &idx))
3910 return -ENODEV;
3911
3912 ret = amdgpu_ucode_request(adev, &usbc_pd_fw, "amdgpu/%s", buf);
3913 if (ret)
3914 goto fail;
3915
3916 /* LFB address which is aligned to 1MB boundary per PSP request */
3917 ret = amdgpu_bo_create_kernel(adev, usbc_pd_fw->size, 0x100000,
3918 AMDGPU_GEM_DOMAIN_VRAM |
3919 AMDGPU_GEM_DOMAIN_GTT,
3920 &fw_buf_bo, &fw_pri_mc_addr,
3921 &fw_pri_cpu_addr);
3922 if (ret)
3923 goto rel_buf;
3924
3925 memcpy_toio(fw_pri_cpu_addr, usbc_pd_fw->data, usbc_pd_fw->size);
3926
3927 mutex_lock(&adev->psp.mutex);
3928 ret = psp_load_usbc_pd_fw(&adev->psp, fw_pri_mc_addr);
3929 mutex_unlock(&adev->psp.mutex);
3930
3931 amdgpu_bo_free_kernel(&fw_buf_bo, &fw_pri_mc_addr, &fw_pri_cpu_addr);
3932
3933rel_buf:
3934 amdgpu_ucode_release(&usbc_pd_fw);
3935fail:
3936 if (ret) {
3937 dev_err(adev->dev, "Failed to load USBC PD FW, err = %d", ret);
3938 count = ret;
3939 }
3940
3941 drm_dev_exit(idx);
3942 return count;
3943}
3944
3945void psp_copy_fw(struct psp_context *psp, uint8_t *start_addr, uint32_t bin_size)
3946{
3947 int idx;
3948
3949 if (!drm_dev_enter(adev_to_drm(psp->adev), &idx))
3950 return;
3951
3952 memset(psp->fw_pri_buf, 0, PSP_1_MEG);
3953 memcpy(psp->fw_pri_buf, start_addr, bin_size);
3954
3955 drm_dev_exit(idx);
3956}
3957
3958/**
3959 * DOC: usbc_pd_fw
3960 * Reading from this file will retrieve the USB-C PD firmware version. Writing to
3961 * this file will trigger the update process.
3962 */
3963static DEVICE_ATTR(usbc_pd_fw, 0644,
3964 psp_usbc_pd_fw_sysfs_read,
3965 psp_usbc_pd_fw_sysfs_write);
3966
3967int is_psp_fw_valid(struct psp_bin_desc bin)
3968{
3969 return bin.size_bytes;
3970}
3971
3972static ssize_t amdgpu_psp_vbflash_write(struct file *filp, struct kobject *kobj,
3973 struct bin_attribute *bin_attr,
3974 char *buffer, loff_t pos, size_t count)
3975{
3976 struct device *dev = kobj_to_dev(kobj);
3977 struct drm_device *ddev = dev_get_drvdata(dev);
3978 struct amdgpu_device *adev = drm_to_adev(ddev);
3979
3980 adev->psp.vbflash_done = false;
3981
3982 /* Safeguard against memory drain */
3983 if (adev->psp.vbflash_image_size > AMD_VBIOS_FILE_MAX_SIZE_B) {
3984 dev_err(adev->dev, "File size cannot exceed %u\n", AMD_VBIOS_FILE_MAX_SIZE_B);
3985 kvfree(adev->psp.vbflash_tmp_buf);
3986 adev->psp.vbflash_tmp_buf = NULL;
3987 adev->psp.vbflash_image_size = 0;
3988 return -ENOMEM;
3989 }
3990
3991 /* TODO Just allocate max for now and optimize to realloc later if needed */
3992 if (!adev->psp.vbflash_tmp_buf) {
3993 adev->psp.vbflash_tmp_buf = kvmalloc(AMD_VBIOS_FILE_MAX_SIZE_B, GFP_KERNEL);
3994 if (!adev->psp.vbflash_tmp_buf)
3995 return -ENOMEM;
3996 }
3997
3998 mutex_lock(&adev->psp.mutex);
3999 memcpy(adev->psp.vbflash_tmp_buf + pos, buffer, count);
4000 adev->psp.vbflash_image_size += count;
4001 mutex_unlock(&adev->psp.mutex);
4002
4003 dev_dbg(adev->dev, "IFWI staged for update\n");
4004
4005 return count;
4006}
4007
4008static ssize_t amdgpu_psp_vbflash_read(struct file *filp, struct kobject *kobj,
4009 struct bin_attribute *bin_attr, char *buffer,
4010 loff_t pos, size_t count)
4011{
4012 struct device *dev = kobj_to_dev(kobj);
4013 struct drm_device *ddev = dev_get_drvdata(dev);
4014 struct amdgpu_device *adev = drm_to_adev(ddev);
4015 struct amdgpu_bo *fw_buf_bo = NULL;
4016 uint64_t fw_pri_mc_addr;
4017 void *fw_pri_cpu_addr;
4018 int ret;
4019
4020 if (adev->psp.vbflash_image_size == 0)
4021 return -EINVAL;
4022
4023 dev_dbg(adev->dev, "PSP IFWI flash process initiated\n");
4024
4025 ret = amdgpu_bo_create_kernel(adev, adev->psp.vbflash_image_size,
4026 AMDGPU_GPU_PAGE_SIZE,
4027 AMDGPU_GEM_DOMAIN_VRAM,
4028 &fw_buf_bo,
4029 &fw_pri_mc_addr,
4030 &fw_pri_cpu_addr);
4031 if (ret)
4032 goto rel_buf;
4033
4034 memcpy_toio(fw_pri_cpu_addr, adev->psp.vbflash_tmp_buf, adev->psp.vbflash_image_size);
4035
4036 mutex_lock(&adev->psp.mutex);
4037 ret = psp_update_spirom(&adev->psp, fw_pri_mc_addr);
4038 mutex_unlock(&adev->psp.mutex);
4039
4040 amdgpu_bo_free_kernel(&fw_buf_bo, &fw_pri_mc_addr, &fw_pri_cpu_addr);
4041
4042rel_buf:
4043 kvfree(adev->psp.vbflash_tmp_buf);
4044 adev->psp.vbflash_tmp_buf = NULL;
4045 adev->psp.vbflash_image_size = 0;
4046
4047 if (ret) {
4048 dev_err(adev->dev, "Failed to load IFWI, err = %d\n", ret);
4049 return ret;
4050 }
4051
4052 dev_dbg(adev->dev, "PSP IFWI flash process done\n");
4053 return 0;
4054}
4055
4056/**
4057 * DOC: psp_vbflash
4058 * Writing to this file will stage an IFWI for update. Reading from this file
4059 * will trigger the update process.
4060 */
4061static struct bin_attribute psp_vbflash_bin_attr = {
4062 .attr = {.name = "psp_vbflash", .mode = 0660},
4063 .size = 0,
4064 .write = amdgpu_psp_vbflash_write,
4065 .read = amdgpu_psp_vbflash_read,
4066};
4067
4068/**
4069 * DOC: psp_vbflash_status
4070 * The status of the flash process.
4071 * 0: IFWI flash not complete.
4072 * 1: IFWI flash complete.
4073 */
4074static ssize_t amdgpu_psp_vbflash_status(struct device *dev,
4075 struct device_attribute *attr,
4076 char *buf)
4077{
4078 struct drm_device *ddev = dev_get_drvdata(dev);
4079 struct amdgpu_device *adev = drm_to_adev(ddev);
4080 uint32_t vbflash_status;
4081
4082 vbflash_status = psp_vbflash_status(&adev->psp);
4083 if (!adev->psp.vbflash_done)
4084 vbflash_status = 0;
4085 else if (adev->psp.vbflash_done && !(vbflash_status & 0x80000000))
4086 vbflash_status = 1;
4087
4088 return sysfs_emit(buf, "0x%x\n", vbflash_status);
4089}
4090static DEVICE_ATTR(psp_vbflash_status, 0440, amdgpu_psp_vbflash_status, NULL);
4091
4092static struct bin_attribute *bin_flash_attrs[] = {
4093 &psp_vbflash_bin_attr,
4094 NULL
4095};
4096
4097static struct attribute *flash_attrs[] = {
4098 &dev_attr_psp_vbflash_status.attr,
4099 &dev_attr_usbc_pd_fw.attr,
4100 NULL
4101};
4102
4103static umode_t amdgpu_flash_attr_is_visible(struct kobject *kobj, struct attribute *attr, int idx)
4104{
4105 struct device *dev = kobj_to_dev(kobj);
4106 struct drm_device *ddev = dev_get_drvdata(dev);
4107 struct amdgpu_device *adev = drm_to_adev(ddev);
4108
4109 if (attr == &dev_attr_usbc_pd_fw.attr)
4110 return adev->psp.sup_pd_fw_up ? 0660 : 0;
4111
4112 return adev->psp.sup_ifwi_up ? 0440 : 0;
4113}
4114
4115static umode_t amdgpu_bin_flash_attr_is_visible(struct kobject *kobj,
4116 const struct bin_attribute *attr,
4117 int idx)
4118{
4119 struct device *dev = kobj_to_dev(kobj);
4120 struct drm_device *ddev = dev_get_drvdata(dev);
4121 struct amdgpu_device *adev = drm_to_adev(ddev);
4122
4123 return adev->psp.sup_ifwi_up ? 0660 : 0;
4124}
4125
4126const struct attribute_group amdgpu_flash_attr_group = {
4127 .attrs = flash_attrs,
4128 .bin_attrs = bin_flash_attrs,
4129 .is_bin_visible = amdgpu_bin_flash_attr_is_visible,
4130 .is_visible = amdgpu_flash_attr_is_visible,
4131};
4132
4133const struct amd_ip_funcs psp_ip_funcs = {
4134 .name = "psp",
4135 .early_init = psp_early_init,
4136 .sw_init = psp_sw_init,
4137 .sw_fini = psp_sw_fini,
4138 .hw_init = psp_hw_init,
4139 .hw_fini = psp_hw_fini,
4140 .suspend = psp_suspend,
4141 .resume = psp_resume,
4142 .set_clockgating_state = psp_set_clockgating_state,
4143 .set_powergating_state = psp_set_powergating_state,
4144};
4145
4146const struct amdgpu_ip_block_version psp_v3_1_ip_block = {
4147 .type = AMD_IP_BLOCK_TYPE_PSP,
4148 .major = 3,
4149 .minor = 1,
4150 .rev = 0,
4151 .funcs = &psp_ip_funcs,
4152};
4153
4154const struct amdgpu_ip_block_version psp_v10_0_ip_block = {
4155 .type = AMD_IP_BLOCK_TYPE_PSP,
4156 .major = 10,
4157 .minor = 0,
4158 .rev = 0,
4159 .funcs = &psp_ip_funcs,
4160};
4161
4162const struct amdgpu_ip_block_version psp_v11_0_ip_block = {
4163 .type = AMD_IP_BLOCK_TYPE_PSP,
4164 .major = 11,
4165 .minor = 0,
4166 .rev = 0,
4167 .funcs = &psp_ip_funcs,
4168};
4169
4170const struct amdgpu_ip_block_version psp_v11_0_8_ip_block = {
4171 .type = AMD_IP_BLOCK_TYPE_PSP,
4172 .major = 11,
4173 .minor = 0,
4174 .rev = 8,
4175 .funcs = &psp_ip_funcs,
4176};
4177
4178const struct amdgpu_ip_block_version psp_v12_0_ip_block = {
4179 .type = AMD_IP_BLOCK_TYPE_PSP,
4180 .major = 12,
4181 .minor = 0,
4182 .rev = 0,
4183 .funcs = &psp_ip_funcs,
4184};
4185
4186const struct amdgpu_ip_block_version psp_v13_0_ip_block = {
4187 .type = AMD_IP_BLOCK_TYPE_PSP,
4188 .major = 13,
4189 .minor = 0,
4190 .rev = 0,
4191 .funcs = &psp_ip_funcs,
4192};
4193
4194const struct amdgpu_ip_block_version psp_v13_0_4_ip_block = {
4195 .type = AMD_IP_BLOCK_TYPE_PSP,
4196 .major = 13,
4197 .minor = 0,
4198 .rev = 4,
4199 .funcs = &psp_ip_funcs,
4200};
4201
4202const struct amdgpu_ip_block_version psp_v14_0_ip_block = {
4203 .type = AMD_IP_BLOCK_TYPE_PSP,
4204 .major = 14,
4205 .minor = 0,
4206 .rev = 0,
4207 .funcs = &psp_ip_funcs,
4208};