Linux Audio

Check our new training course

Loading...
v6.9.4
   1/*
   2 * Copyright 2016 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 * Author: Huang Rui
  23 *
  24 */
  25
  26#include <linux/firmware.h>
  27#include <drm/drm_drv.h>
  28
  29#include "amdgpu.h"
  30#include "amdgpu_psp.h"
  31#include "amdgpu_ucode.h"
  32#include "amdgpu_xgmi.h"
  33#include "soc15_common.h"
  34#include "psp_v3_1.h"
  35#include "psp_v10_0.h"
  36#include "psp_v11_0.h"
  37#include "psp_v11_0_8.h"
  38#include "psp_v12_0.h"
  39#include "psp_v13_0.h"
  40#include "psp_v13_0_4.h"
  41#include "psp_v14_0.h"
  42
  43#include "amdgpu_ras.h"
  44#include "amdgpu_securedisplay.h"
  45#include "amdgpu_atomfirmware.h"
  46
  47#define AMD_VBIOS_FILE_MAX_SIZE_B      (1024*1024*3)
 
  48
  49static int psp_load_smu_fw(struct psp_context *psp);
  50static int psp_rap_terminate(struct psp_context *psp);
  51static int psp_securedisplay_terminate(struct psp_context *psp);
  52
  53static int psp_ring_init(struct psp_context *psp,
  54			 enum psp_ring_type ring_type)
  55{
  56	int ret = 0;
  57	struct psp_ring *ring;
  58	struct amdgpu_device *adev = psp->adev;
  59
  60	ring = &psp->km_ring;
  61
  62	ring->ring_type = ring_type;
  63
  64	/* allocate 4k Page of Local Frame Buffer memory for ring */
  65	ring->ring_size = 0x1000;
  66	ret = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE,
  67				      AMDGPU_GEM_DOMAIN_VRAM |
  68				      AMDGPU_GEM_DOMAIN_GTT,
  69				      &adev->firmware.rbuf,
  70				      &ring->ring_mem_mc_addr,
  71				      (void **)&ring->ring_mem);
  72	if (ret) {
  73		ring->ring_size = 0;
  74		return ret;
  75	}
  76
  77	return 0;
  78}
  79
  80/*
  81 * Due to DF Cstate management centralized to PMFW, the firmware
  82 * loading sequence will be updated as below:
  83 *   - Load KDB
  84 *   - Load SYS_DRV
  85 *   - Load tOS
  86 *   - Load PMFW
  87 *   - Setup TMR
  88 *   - Load other non-psp fw
  89 *   - Load ASD
  90 *   - Load XGMI/RAS/HDCP/DTM TA if any
  91 *
  92 * This new sequence is required for
  93 *   - Arcturus and onwards
 
  94 */
  95static void psp_check_pmfw_centralized_cstate_management(struct psp_context *psp)
  96{
  97	struct amdgpu_device *adev = psp->adev;
  98
  99	if (amdgpu_sriov_vf(adev)) {
 100		psp->pmfw_centralized_cstate_management = false;
 101		return;
 102	}
 103
 104	switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
 105	case IP_VERSION(11, 0, 0):
 106	case IP_VERSION(11, 0, 4):
 107	case IP_VERSION(11, 0, 5):
 108	case IP_VERSION(11, 0, 7):
 109	case IP_VERSION(11, 0, 9):
 110	case IP_VERSION(11, 0, 11):
 111	case IP_VERSION(11, 0, 12):
 112	case IP_VERSION(11, 0, 13):
 113	case IP_VERSION(13, 0, 0):
 114	case IP_VERSION(13, 0, 2):
 115	case IP_VERSION(13, 0, 7):
 116		psp->pmfw_centralized_cstate_management = true;
 117		break;
 118	default:
 119		psp->pmfw_centralized_cstate_management = false;
 120		break;
 121	}
 122}
 123
 124static int psp_init_sriov_microcode(struct psp_context *psp)
 125{
 126	struct amdgpu_device *adev = psp->adev;
 127	char ucode_prefix[30];
 128	int ret = 0;
 129
 130	amdgpu_ucode_ip_version_decode(adev, MP0_HWIP, ucode_prefix, sizeof(ucode_prefix));
 
 131
 132	switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
 133	case IP_VERSION(9, 0, 0):
 134	case IP_VERSION(11, 0, 7):
 135	case IP_VERSION(11, 0, 9):
 136		adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
 137		ret = psp_init_cap_microcode(psp, ucode_prefix);
 138		break;
 139	case IP_VERSION(13, 0, 2):
 140		adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
 141		ret = psp_init_cap_microcode(psp, ucode_prefix);
 142		ret &= psp_init_ta_microcode(psp, ucode_prefix);
 143		break;
 144	case IP_VERSION(13, 0, 0):
 145		adev->virt.autoload_ucode_id = 0;
 146		break;
 147	case IP_VERSION(13, 0, 6):
 148		ret = psp_init_cap_microcode(psp, ucode_prefix);
 149		ret &= psp_init_ta_microcode(psp, ucode_prefix);
 150		break;
 151	case IP_VERSION(13, 0, 10):
 152		adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MES1_DATA;
 153		ret = psp_init_cap_microcode(psp, ucode_prefix);
 154		break;
 155	default:
 156		return -EINVAL;
 157	}
 158	return ret;
 159}
 160
 161static int psp_early_init(void *handle)
 162{
 163	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 164	struct psp_context *psp = &adev->psp;
 165
 166	psp->autoload_supported = true;
 167	psp->boot_time_tmr = true;
 168
 169	switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
 170	case IP_VERSION(9, 0, 0):
 171		psp_v3_1_set_psp_funcs(psp);
 172		psp->autoload_supported = false;
 173		psp->boot_time_tmr = false;
 174		break;
 175	case IP_VERSION(10, 0, 0):
 176	case IP_VERSION(10, 0, 1):
 177		psp_v10_0_set_psp_funcs(psp);
 178		psp->autoload_supported = false;
 179		psp->boot_time_tmr = false;
 180		break;
 181	case IP_VERSION(11, 0, 2):
 182	case IP_VERSION(11, 0, 4):
 183		psp_v11_0_set_psp_funcs(psp);
 184		psp->autoload_supported = false;
 185		psp->boot_time_tmr = false;
 186		break;
 187	case IP_VERSION(11, 0, 0):
 188	case IP_VERSION(11, 0, 7):
 189		adev->psp.sup_pd_fw_up = !amdgpu_sriov_vf(adev);
 190		fallthrough;
 191	case IP_VERSION(11, 0, 5):
 192	case IP_VERSION(11, 0, 9):
 193	case IP_VERSION(11, 0, 11):
 194	case IP_VERSION(11, 5, 0):
 195	case IP_VERSION(11, 0, 12):
 196	case IP_VERSION(11, 0, 13):
 197		psp_v11_0_set_psp_funcs(psp);
 198		psp->boot_time_tmr = false;
 199		break;
 200	case IP_VERSION(11, 0, 3):
 201	case IP_VERSION(12, 0, 1):
 202		psp_v12_0_set_psp_funcs(psp);
 203		psp->autoload_supported = false;
 204		psp->boot_time_tmr = false;
 205		break;
 206	case IP_VERSION(13, 0, 2):
 207		psp->boot_time_tmr = false;
 208		fallthrough;
 209	case IP_VERSION(13, 0, 6):
 210		psp_v13_0_set_psp_funcs(psp);
 211		psp->autoload_supported = false;
 212		break;
 213	case IP_VERSION(13, 0, 1):
 214	case IP_VERSION(13, 0, 3):
 215	case IP_VERSION(13, 0, 5):
 216	case IP_VERSION(13, 0, 8):
 217	case IP_VERSION(13, 0, 11):
 218	case IP_VERSION(14, 0, 0):
 219	case IP_VERSION(14, 0, 1):
 220		psp_v13_0_set_psp_funcs(psp);
 221		psp->boot_time_tmr = false;
 222		break;
 223	case IP_VERSION(11, 0, 8):
 224		if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2) {
 225			psp_v11_0_8_set_psp_funcs(psp);
 226		}
 227		psp->autoload_supported = false;
 228		psp->boot_time_tmr = false;
 229		break;
 230	case IP_VERSION(13, 0, 0):
 231	case IP_VERSION(13, 0, 7):
 232	case IP_VERSION(13, 0, 10):
 233		psp_v13_0_set_psp_funcs(psp);
 234		adev->psp.sup_ifwi_up = !amdgpu_sriov_vf(adev);
 235		psp->boot_time_tmr = false;
 236		break;
 237	case IP_VERSION(13, 0, 4):
 238		psp_v13_0_4_set_psp_funcs(psp);
 239		psp->boot_time_tmr = false;
 240		break;
 241	case IP_VERSION(14, 0, 2):
 242	case IP_VERSION(14, 0, 3):
 243		psp_v14_0_set_psp_funcs(psp);
 244		break;
 245	default:
 246		return -EINVAL;
 247	}
 248
 249	psp->adev = adev;
 250
 251	adev->psp_timeout = 20000;
 252
 253	psp_check_pmfw_centralized_cstate_management(psp);
 254
 255	if (amdgpu_sriov_vf(adev))
 256		return psp_init_sriov_microcode(psp);
 257	else
 258		return psp_init_microcode(psp);
 259}
 260
 261void psp_ta_free_shared_buf(struct ta_mem_context *mem_ctx)
 262{
 263	amdgpu_bo_free_kernel(&mem_ctx->shared_bo, &mem_ctx->shared_mc_addr,
 264			      &mem_ctx->shared_buf);
 265	mem_ctx->shared_bo = NULL;
 266}
 267
 268static void psp_free_shared_bufs(struct psp_context *psp)
 269{
 270	void *tmr_buf;
 271	void **pptr;
 272
 273	/* free TMR memory buffer */
 274	pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
 275	amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, pptr);
 276	psp->tmr_bo = NULL;
 277
 278	/* free xgmi shared memory */
 279	psp_ta_free_shared_buf(&psp->xgmi_context.context.mem_context);
 280
 281	/* free ras shared memory */
 282	psp_ta_free_shared_buf(&psp->ras_context.context.mem_context);
 283
 284	/* free hdcp shared memory */
 285	psp_ta_free_shared_buf(&psp->hdcp_context.context.mem_context);
 286
 287	/* free dtm shared memory */
 288	psp_ta_free_shared_buf(&psp->dtm_context.context.mem_context);
 289
 290	/* free rap shared memory */
 291	psp_ta_free_shared_buf(&psp->rap_context.context.mem_context);
 292
 293	/* free securedisplay shared memory */
 294	psp_ta_free_shared_buf(&psp->securedisplay_context.context.mem_context);
 295
 296
 297}
 298
 299static void psp_memory_training_fini(struct psp_context *psp)
 300{
 301	struct psp_memory_training_context *ctx = &psp->mem_train_ctx;
 302
 303	ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT;
 304	kfree(ctx->sys_cache);
 305	ctx->sys_cache = NULL;
 306}
 307
 308static int psp_memory_training_init(struct psp_context *psp)
 309{
 310	int ret;
 311	struct psp_memory_training_context *ctx = &psp->mem_train_ctx;
 312
 313	if (ctx->init != PSP_MEM_TRAIN_RESERVE_SUCCESS) {
 314		dev_dbg(psp->adev->dev, "memory training is not supported!\n");
 315		return 0;
 316	}
 317
 318	ctx->sys_cache = kzalloc(ctx->train_data_size, GFP_KERNEL);
 319	if (ctx->sys_cache == NULL) {
 320		dev_err(psp->adev->dev, "alloc mem_train_ctx.sys_cache failed!\n");
 321		ret = -ENOMEM;
 322		goto Err_out;
 323	}
 324
 325	dev_dbg(psp->adev->dev,
 326		"train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n",
 327		ctx->train_data_size,
 328		ctx->p2c_train_data_offset,
 329		ctx->c2p_train_data_offset);
 330	ctx->init = PSP_MEM_TRAIN_INIT_SUCCESS;
 331	return 0;
 332
 333Err_out:
 334	psp_memory_training_fini(psp);
 335	return ret;
 336}
 337
 338/*
 339 * Helper funciton to query psp runtime database entry
 340 *
 341 * @adev: amdgpu_device pointer
 342 * @entry_type: the type of psp runtime database entry
 343 * @db_entry: runtime database entry pointer
 344 *
 345 * Return false if runtime database doesn't exit or entry is invalid
 346 * or true if the specific database entry is found, and copy to @db_entry
 347 */
 348static bool psp_get_runtime_db_entry(struct amdgpu_device *adev,
 349				     enum psp_runtime_entry_type entry_type,
 350				     void *db_entry)
 351{
 352	uint64_t db_header_pos, db_dir_pos;
 353	struct psp_runtime_data_header db_header = {0};
 354	struct psp_runtime_data_directory db_dir = {0};
 355	bool ret = false;
 356	int i;
 357
 358	if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6))
 359		return false;
 360
 361	db_header_pos = adev->gmc.mc_vram_size - PSP_RUNTIME_DB_OFFSET;
 362	db_dir_pos = db_header_pos + sizeof(struct psp_runtime_data_header);
 363
 364	/* read runtime db header from vram */
 365	amdgpu_device_vram_access(adev, db_header_pos, (uint32_t *)&db_header,
 366			sizeof(struct psp_runtime_data_header), false);
 367
 368	if (db_header.cookie != PSP_RUNTIME_DB_COOKIE_ID) {
 369		/* runtime db doesn't exist, exit */
 370		dev_dbg(adev->dev, "PSP runtime database doesn't exist\n");
 371		return false;
 372	}
 373
 374	/* read runtime database entry from vram */
 375	amdgpu_device_vram_access(adev, db_dir_pos, (uint32_t *)&db_dir,
 376			sizeof(struct psp_runtime_data_directory), false);
 377
 378	if (db_dir.entry_count >= PSP_RUNTIME_DB_DIAG_ENTRY_MAX_COUNT) {
 379		/* invalid db entry count, exit */
 380		dev_warn(adev->dev, "Invalid PSP runtime database entry count\n");
 381		return false;
 382	}
 383
 384	/* look up for requested entry type */
 385	for (i = 0; i < db_dir.entry_count && !ret; i++) {
 386		if (db_dir.entry_list[i].entry_type == entry_type) {
 387			switch (entry_type) {
 388			case PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG:
 389				if (db_dir.entry_list[i].size < sizeof(struct psp_runtime_boot_cfg_entry)) {
 390					/* invalid db entry size */
 391					dev_warn(adev->dev, "Invalid PSP runtime database boot cfg entry size\n");
 392					return false;
 393				}
 394				/* read runtime database entry */
 395				amdgpu_device_vram_access(adev, db_header_pos + db_dir.entry_list[i].offset,
 396							  (uint32_t *)db_entry, sizeof(struct psp_runtime_boot_cfg_entry), false);
 397				ret = true;
 398				break;
 399			case PSP_RUNTIME_ENTRY_TYPE_PPTABLE_ERR_STATUS:
 400				if (db_dir.entry_list[i].size < sizeof(struct psp_runtime_scpm_entry)) {
 401					/* invalid db entry size */
 402					dev_warn(adev->dev, "Invalid PSP runtime database scpm entry size\n");
 403					return false;
 404				}
 405				/* read runtime database entry */
 406				amdgpu_device_vram_access(adev, db_header_pos + db_dir.entry_list[i].offset,
 407							  (uint32_t *)db_entry, sizeof(struct psp_runtime_scpm_entry), false);
 408				ret = true;
 409				break;
 410			default:
 411				ret = false;
 412				break;
 413			}
 414		}
 415	}
 416
 417	return ret;
 418}
 419
 420static int psp_sw_init(void *handle)
 421{
 422	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 423	struct psp_context *psp = &adev->psp;
 424	int ret;
 425	struct psp_runtime_boot_cfg_entry boot_cfg_entry;
 426	struct psp_memory_training_context *mem_training_ctx = &psp->mem_train_ctx;
 427	struct psp_runtime_scpm_entry scpm_entry;
 428
 429	psp->cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
 430	if (!psp->cmd) {
 431		dev_err(adev->dev, "Failed to allocate memory to command buffer!\n");
 432		ret = -ENOMEM;
 433	}
 434
 435	adev->psp.xgmi_context.supports_extended_data =
 436		!adev->gmc.xgmi.connected_to_cpu &&
 437		amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2);
 438
 439	memset(&scpm_entry, 0, sizeof(scpm_entry));
 440	if ((psp_get_runtime_db_entry(adev,
 441				PSP_RUNTIME_ENTRY_TYPE_PPTABLE_ERR_STATUS,
 442				&scpm_entry)) &&
 443	    (scpm_entry.scpm_status != SCPM_DISABLE)) {
 444		adev->scpm_enabled = true;
 445		adev->scpm_status = scpm_entry.scpm_status;
 446	} else {
 447		adev->scpm_enabled = false;
 448		adev->scpm_status = SCPM_DISABLE;
 449	}
 450
 451	/* TODO: stop gpu driver services and print alarm if scpm is enabled with error status */
 452
 453	memset(&boot_cfg_entry, 0, sizeof(boot_cfg_entry));
 454	if (psp_get_runtime_db_entry(adev,
 455				PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG,
 456				&boot_cfg_entry)) {
 457		psp->boot_cfg_bitmask = boot_cfg_entry.boot_cfg_bitmask;
 458		if ((psp->boot_cfg_bitmask) &
 459		    BOOT_CFG_FEATURE_TWO_STAGE_DRAM_TRAINING) {
 460			/* If psp runtime database exists, then
 461			 * only enable two stage memory training
 462			 * when TWO_STAGE_DRAM_TRAINING bit is set
 463			 * in runtime database
 464			 */
 465			mem_training_ctx->enable_mem_training = true;
 466		}
 467
 468	} else {
 469		/* If psp runtime database doesn't exist or is
 470		 * invalid, force enable two stage memory training
 471		 */
 472		mem_training_ctx->enable_mem_training = true;
 473	}
 474
 475	if (mem_training_ctx->enable_mem_training) {
 476		ret = psp_memory_training_init(psp);
 477		if (ret) {
 478			dev_err(adev->dev, "Failed to initialize memory training!\n");
 479			return ret;
 480		}
 481
 482		ret = psp_mem_training(psp, PSP_MEM_TRAIN_COLD_BOOT);
 483		if (ret) {
 484			dev_err(adev->dev, "Failed to process memory training!\n");
 485			return ret;
 486		}
 487	}
 488
 489	ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG,
 490				      (amdgpu_sriov_vf(adev) || adev->debug_use_vram_fw_buf) ?
 491				      AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
 492				      &psp->fw_pri_bo,
 493				      &psp->fw_pri_mc_addr,
 494				      &psp->fw_pri_buf);
 495	if (ret)
 496		return ret;
 497
 498	ret = amdgpu_bo_create_kernel(adev, PSP_FENCE_BUFFER_SIZE, PAGE_SIZE,
 499				      AMDGPU_GEM_DOMAIN_VRAM |
 500				      AMDGPU_GEM_DOMAIN_GTT,
 501				      &psp->fence_buf_bo,
 502				      &psp->fence_buf_mc_addr,
 503				      &psp->fence_buf);
 504	if (ret)
 505		goto failed1;
 506
 507	ret = amdgpu_bo_create_kernel(adev, PSP_CMD_BUFFER_SIZE, PAGE_SIZE,
 508				      AMDGPU_GEM_DOMAIN_VRAM |
 509				      AMDGPU_GEM_DOMAIN_GTT,
 510				      &psp->cmd_buf_bo, &psp->cmd_buf_mc_addr,
 511				      (void **)&psp->cmd_buf_mem);
 512	if (ret)
 513		goto failed2;
 514
 515	return 0;
 516
 517failed2:
 518	amdgpu_bo_free_kernel(&psp->fence_buf_bo,
 519			      &psp->fence_buf_mc_addr, &psp->fence_buf);
 520failed1:
 521	amdgpu_bo_free_kernel(&psp->fw_pri_bo,
 522			      &psp->fw_pri_mc_addr, &psp->fw_pri_buf);
 523	return ret;
 524}
 525
 526static int psp_sw_fini(void *handle)
 527{
 528	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 529	struct psp_context *psp = &adev->psp;
 530	struct psp_gfx_cmd_resp *cmd = psp->cmd;
 531
 532	psp_memory_training_fini(psp);
 533
 534	amdgpu_ucode_release(&psp->sos_fw);
 535	amdgpu_ucode_release(&psp->asd_fw);
 536	amdgpu_ucode_release(&psp->ta_fw);
 537	amdgpu_ucode_release(&psp->cap_fw);
 538	amdgpu_ucode_release(&psp->toc_fw);
 539
 540	kfree(cmd);
 541	cmd = NULL;
 542
 543	psp_free_shared_bufs(psp);
 
 
 
 
 
 
 
 
 
 544
 545	if (psp->km_ring.ring_mem)
 546		amdgpu_bo_free_kernel(&adev->firmware.rbuf,
 547				      &psp->km_ring.ring_mem_mc_addr,
 548				      (void **)&psp->km_ring.ring_mem);
 549
 550	amdgpu_bo_free_kernel(&psp->fw_pri_bo,
 551			      &psp->fw_pri_mc_addr, &psp->fw_pri_buf);
 552	amdgpu_bo_free_kernel(&psp->fence_buf_bo,
 553			      &psp->fence_buf_mc_addr, &psp->fence_buf);
 554	amdgpu_bo_free_kernel(&psp->cmd_buf_bo, &psp->cmd_buf_mc_addr,
 555			      (void **)&psp->cmd_buf_mem);
 556
 557	return 0;
 558}
 559
 560int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
 561		 uint32_t reg_val, uint32_t mask, bool check_changed)
 562{
 563	uint32_t val;
 564	int i;
 565	struct amdgpu_device *adev = psp->adev;
 566
 567	if (psp->adev->no_hw_access)
 568		return 0;
 569
 570	for (i = 0; i < adev->usec_timeout; i++) {
 571		val = RREG32(reg_index);
 572		if (check_changed) {
 573			if (val != reg_val)
 574				return 0;
 575		} else {
 576			if ((val & mask) == reg_val)
 577				return 0;
 578		}
 579		udelay(1);
 580	}
 581
 582	return -ETIME;
 583}
 584
 585int psp_wait_for_spirom_update(struct psp_context *psp, uint32_t reg_index,
 586			       uint32_t reg_val, uint32_t mask, uint32_t msec_timeout)
 587{
 588	uint32_t val;
 589	int i;
 590	struct amdgpu_device *adev = psp->adev;
 591
 592	if (psp->adev->no_hw_access)
 593		return 0;
 594
 595	for (i = 0; i < msec_timeout; i++) {
 596		val = RREG32(reg_index);
 597		if ((val & mask) == reg_val)
 598			return 0;
 599		msleep(1);
 600	}
 601
 602	return -ETIME;
 603}
 604
 605static const char *psp_gfx_cmd_name(enum psp_gfx_cmd_id cmd_id)
 606{
 607	switch (cmd_id) {
 608	case GFX_CMD_ID_LOAD_TA:
 609		return "LOAD_TA";
 610	case GFX_CMD_ID_UNLOAD_TA:
 611		return "UNLOAD_TA";
 612	case GFX_CMD_ID_INVOKE_CMD:
 613		return "INVOKE_CMD";
 614	case GFX_CMD_ID_LOAD_ASD:
 615		return "LOAD_ASD";
 616	case GFX_CMD_ID_SETUP_TMR:
 617		return "SETUP_TMR";
 618	case GFX_CMD_ID_LOAD_IP_FW:
 619		return "LOAD_IP_FW";
 620	case GFX_CMD_ID_DESTROY_TMR:
 621		return "DESTROY_TMR";
 622	case GFX_CMD_ID_SAVE_RESTORE:
 623		return "SAVE_RESTORE_IP_FW";
 624	case GFX_CMD_ID_SETUP_VMR:
 625		return "SETUP_VMR";
 626	case GFX_CMD_ID_DESTROY_VMR:
 627		return "DESTROY_VMR";
 628	case GFX_CMD_ID_PROG_REG:
 629		return "PROG_REG";
 630	case GFX_CMD_ID_GET_FW_ATTESTATION:
 631		return "GET_FW_ATTESTATION";
 632	case GFX_CMD_ID_LOAD_TOC:
 633		return "ID_LOAD_TOC";
 634	case GFX_CMD_ID_AUTOLOAD_RLC:
 635		return "AUTOLOAD_RLC";
 636	case GFX_CMD_ID_BOOT_CFG:
 637		return "BOOT_CFG";
 638	default:
 639		return "UNKNOWN CMD";
 640	}
 641}
 642
 643static int
 644psp_cmd_submit_buf(struct psp_context *psp,
 645		   struct amdgpu_firmware_info *ucode,
 646		   struct psp_gfx_cmd_resp *cmd, uint64_t fence_mc_addr)
 647{
 648	int ret;
 649	int index;
 650	int timeout = psp->adev->psp_timeout;
 651	bool ras_intr = false;
 652	bool skip_unsupport = false;
 653
 654	if (psp->adev->no_hw_access)
 655		return 0;
 656
 657	memset(psp->cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE);
 658
 659	memcpy(psp->cmd_buf_mem, cmd, sizeof(struct psp_gfx_cmd_resp));
 660
 661	index = atomic_inc_return(&psp->fence_value);
 662	ret = psp_ring_cmd_submit(psp, psp->cmd_buf_mc_addr, fence_mc_addr, index);
 663	if (ret) {
 664		atomic_dec(&psp->fence_value);
 665		goto exit;
 
 666	}
 667
 668	amdgpu_device_invalidate_hdp(psp->adev, NULL);
 669	while (*((unsigned int *)psp->fence_buf) != index) {
 670		if (--timeout == 0)
 671			break;
 672		/*
 673		 * Shouldn't wait for timeout when err_event_athub occurs,
 674		 * because gpu reset thread triggered and lock resource should
 675		 * be released for psp resume sequence.
 676		 */
 677		ras_intr = amdgpu_ras_intr_triggered();
 678		if (ras_intr)
 679			break;
 680		usleep_range(10, 100);
 681		amdgpu_device_invalidate_hdp(psp->adev, NULL);
 682	}
 683
 684	/* We allow TEE_ERROR_NOT_SUPPORTED for VMR command and PSP_ERR_UNKNOWN_COMMAND in SRIOV */
 685	skip_unsupport = (psp->cmd_buf_mem->resp.status == TEE_ERROR_NOT_SUPPORTED ||
 686		psp->cmd_buf_mem->resp.status == PSP_ERR_UNKNOWN_COMMAND) && amdgpu_sriov_vf(psp->adev);
 687
 688	memcpy(&cmd->resp, &psp->cmd_buf_mem->resp, sizeof(struct psp_gfx_resp));
 689
 690	/* In some cases, psp response status is not 0 even there is no
 691	 * problem while the command is submitted. Some version of PSP FW
 692	 * doesn't write 0 to that field.
 693	 * So here we would like to only print a warning instead of an error
 694	 * during psp initialization to avoid breaking hw_init and it doesn't
 695	 * return -EINVAL.
 696	 */
 697	if (!skip_unsupport && (psp->cmd_buf_mem->resp.status || !timeout) && !ras_intr) {
 698		if (ucode)
 699			dev_warn(psp->adev->dev,
 700				 "failed to load ucode %s(0x%X) ",
 701				 amdgpu_ucode_name(ucode->ucode_id), ucode->ucode_id);
 702		dev_warn(psp->adev->dev,
 703			 "psp gfx command %s(0x%X) failed and response status is (0x%X)\n",
 704			 psp_gfx_cmd_name(psp->cmd_buf_mem->cmd_id), psp->cmd_buf_mem->cmd_id,
 705			 psp->cmd_buf_mem->resp.status);
 706		/* If any firmware (including CAP) load fails under SRIOV, it should
 707		 * return failure to stop the VF from initializing.
 708		 * Also return failure in case of timeout
 709		 */
 710		if ((ucode && amdgpu_sriov_vf(psp->adev)) || !timeout) {
 711			ret = -EINVAL;
 712			goto exit;
 713		}
 714	}
 715
 
 
 
 716	if (ucode) {
 717		ucode->tmr_mc_addr_lo = psp->cmd_buf_mem->resp.fw_addr_lo;
 718		ucode->tmr_mc_addr_hi = psp->cmd_buf_mem->resp.fw_addr_hi;
 719	}
 
 720
 721exit:
 722	return ret;
 723}
 724
 725static struct psp_gfx_cmd_resp *acquire_psp_cmd_buf(struct psp_context *psp)
 726{
 727	struct psp_gfx_cmd_resp *cmd = psp->cmd;
 728
 729	mutex_lock(&psp->mutex);
 730
 731	memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp));
 732
 733	return cmd;
 734}
 735
 736static void release_psp_cmd_buf(struct psp_context *psp)
 737{
 738	mutex_unlock(&psp->mutex);
 739}
 740
 741static void psp_prep_tmr_cmd_buf(struct psp_context *psp,
 742				 struct psp_gfx_cmd_resp *cmd,
 743				 uint64_t tmr_mc, struct amdgpu_bo *tmr_bo)
 744{
 745	struct amdgpu_device *adev = psp->adev;
 746	uint32_t size = 0;
 747	uint64_t tmr_pa = 0;
 748
 749	if (tmr_bo) {
 750		size = amdgpu_bo_size(tmr_bo);
 751		tmr_pa = amdgpu_gmc_vram_pa(adev, tmr_bo);
 752	}
 753
 754	if (amdgpu_sriov_vf(psp->adev))
 755		cmd->cmd_id = GFX_CMD_ID_SETUP_VMR;
 756	else
 757		cmd->cmd_id = GFX_CMD_ID_SETUP_TMR;
 758	cmd->cmd.cmd_setup_tmr.buf_phy_addr_lo = lower_32_bits(tmr_mc);
 759	cmd->cmd.cmd_setup_tmr.buf_phy_addr_hi = upper_32_bits(tmr_mc);
 760	cmd->cmd.cmd_setup_tmr.buf_size = size;
 761	cmd->cmd.cmd_setup_tmr.bitfield.virt_phy_addr = 1;
 762	cmd->cmd.cmd_setup_tmr.system_phy_addr_lo = lower_32_bits(tmr_pa);
 763	cmd->cmd.cmd_setup_tmr.system_phy_addr_hi = upper_32_bits(tmr_pa);
 764}
 765
 766static void psp_prep_load_toc_cmd_buf(struct psp_gfx_cmd_resp *cmd,
 767				      uint64_t pri_buf_mc, uint32_t size)
 768{
 769	cmd->cmd_id = GFX_CMD_ID_LOAD_TOC;
 770	cmd->cmd.cmd_load_toc.toc_phy_addr_lo = lower_32_bits(pri_buf_mc);
 771	cmd->cmd.cmd_load_toc.toc_phy_addr_hi = upper_32_bits(pri_buf_mc);
 772	cmd->cmd.cmd_load_toc.toc_size = size;
 773}
 774
 775/* Issue LOAD TOC cmd to PSP to part toc and calculate tmr size needed */
 776static int psp_load_toc(struct psp_context *psp,
 777			uint32_t *tmr_size)
 778{
 779	int ret;
 780	struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
 781
 
 
 
 782	/* Copy toc to psp firmware private buffer */
 783	psp_copy_fw(psp, psp->toc.start_addr, psp->toc.size_bytes);
 
 784
 785	psp_prep_load_toc_cmd_buf(cmd, psp->fw_pri_mc_addr, psp->toc.size_bytes);
 786
 787	ret = psp_cmd_submit_buf(psp, NULL, cmd,
 788				 psp->fence_buf_mc_addr);
 789	if (!ret)
 790		*tmr_size = psp->cmd_buf_mem->resp.tmr_size;
 791
 792	release_psp_cmd_buf(psp);
 793
 794	return ret;
 795}
 796
 797/* Set up Trusted Memory Region */
 798static int psp_tmr_init(struct psp_context *psp)
 799{
 800	int ret = 0;
 801	int tmr_size;
 802	void *tmr_buf;
 803	void **pptr;
 804
 805	/*
 806	 * According to HW engineer, they prefer the TMR address be "naturally
 807	 * aligned" , e.g. the start address be an integer divide of TMR size.
 808	 *
 809	 * Note: this memory need be reserved till the driver
 810	 * uninitializes.
 811	 */
 812	tmr_size = PSP_TMR_SIZE(psp->adev);
 813
 814	/* For ASICs support RLC autoload, psp will parse the toc
 815	 * and calculate the total size of TMR needed
 816	 */
 817	if (!amdgpu_sriov_vf(psp->adev) &&
 818	    psp->toc.start_addr &&
 819	    psp->toc.size_bytes &&
 820	    psp->fw_pri_buf) {
 821		ret = psp_load_toc(psp, &tmr_size);
 822		if (ret) {
 823			dev_err(psp->adev->dev, "Failed to load toc\n");
 824			return ret;
 825		}
 826	}
 827
 828	if (!psp->tmr_bo && !psp->boot_time_tmr) {
 829		pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
 830		ret = amdgpu_bo_create_kernel(psp->adev, tmr_size,
 831					      PSP_TMR_ALIGNMENT,
 832					      AMDGPU_HAS_VRAM(psp->adev) ?
 833					      AMDGPU_GEM_DOMAIN_VRAM :
 834					      AMDGPU_GEM_DOMAIN_GTT,
 835					      &psp->tmr_bo, &psp->tmr_mc_addr,
 836					      pptr);
 837	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 838
 839	return ret;
 840}
 841
 842static bool psp_skip_tmr(struct psp_context *psp)
 843{
 844	switch (amdgpu_ip_version(psp->adev, MP0_HWIP, 0)) {
 845	case IP_VERSION(11, 0, 9):
 846	case IP_VERSION(11, 0, 7):
 847	case IP_VERSION(13, 0, 2):
 848	case IP_VERSION(13, 0, 6):
 849	case IP_VERSION(13, 0, 10):
 850		return true;
 851	default:
 852		return false;
 853	}
 854}
 855
 856static int psp_tmr_load(struct psp_context *psp)
 857{
 858	int ret;
 859	struct psp_gfx_cmd_resp *cmd;
 860
 861	/* For Navi12 and CHIP_SIENNA_CICHLID SRIOV, do not set up TMR.
 862	 * Already set up by host driver.
 863	 */
 864	if (amdgpu_sriov_vf(psp->adev) && psp_skip_tmr(psp))
 865		return 0;
 866
 867	cmd = acquire_psp_cmd_buf(psp);
 
 
 868
 869	psp_prep_tmr_cmd_buf(psp, cmd, psp->tmr_mc_addr, psp->tmr_bo);
 870	if (psp->tmr_bo)
 871		dev_info(psp->adev->dev, "reserve 0x%lx from 0x%llx for PSP TMR\n",
 872			 amdgpu_bo_size(psp->tmr_bo), psp->tmr_mc_addr);
 873
 874	ret = psp_cmd_submit_buf(psp, NULL, cmd,
 875				 psp->fence_buf_mc_addr);
 876
 877	release_psp_cmd_buf(psp);
 878
 879	return ret;
 880}
 881
 882static void psp_prep_tmr_unload_cmd_buf(struct psp_context *psp,
 883					struct psp_gfx_cmd_resp *cmd)
 884{
 885	if (amdgpu_sriov_vf(psp->adev))
 886		cmd->cmd_id = GFX_CMD_ID_DESTROY_VMR;
 887	else
 888		cmd->cmd_id = GFX_CMD_ID_DESTROY_TMR;
 889}
 890
 891static int psp_tmr_unload(struct psp_context *psp)
 892{
 893	int ret;
 894	struct psp_gfx_cmd_resp *cmd;
 895
 896	/* skip TMR unload for Navi12 and CHIP_SIENNA_CICHLID SRIOV,
 897	 * as TMR is not loaded at all
 898	 */
 899	if (amdgpu_sriov_vf(psp->adev) && psp_skip_tmr(psp))
 900		return 0;
 901
 902	cmd = acquire_psp_cmd_buf(psp);
 903
 904	psp_prep_tmr_unload_cmd_buf(psp, cmd);
 905	dev_dbg(psp->adev->dev, "free PSP TMR buffer\n");
 906
 907	ret = psp_cmd_submit_buf(psp, NULL, cmd,
 908				 psp->fence_buf_mc_addr);
 909
 910	release_psp_cmd_buf(psp);
 911
 912	return ret;
 913}
 914
 915static int psp_tmr_terminate(struct psp_context *psp)
 916{
 917	return psp_tmr_unload(psp);
 918}
 919
 920int psp_get_fw_attestation_records_addr(struct psp_context *psp,
 921					uint64_t *output_ptr)
 922{
 923	int ret;
 924	struct psp_gfx_cmd_resp *cmd;
 925
 926	if (!output_ptr)
 927		return -EINVAL;
 928
 929	if (amdgpu_sriov_vf(psp->adev))
 930		return 0;
 931
 932	cmd = acquire_psp_cmd_buf(psp);
 933
 934	cmd->cmd_id = GFX_CMD_ID_GET_FW_ATTESTATION;
 935
 936	ret = psp_cmd_submit_buf(psp, NULL, cmd,
 937				 psp->fence_buf_mc_addr);
 938
 939	if (!ret) {
 940		*output_ptr = ((uint64_t)cmd->resp.uresp.fwar_db_info.fwar_db_addr_lo) +
 941			      ((uint64_t)cmd->resp.uresp.fwar_db_info.fwar_db_addr_hi << 32);
 942	}
 943
 944	release_psp_cmd_buf(psp);
 945
 946	return ret;
 947}
 948
 949static int psp_boot_config_get(struct amdgpu_device *adev, uint32_t *boot_cfg)
 950{
 951	struct psp_context *psp = &adev->psp;
 952	struct psp_gfx_cmd_resp *cmd;
 953	int ret;
 
 
 954
 955	if (amdgpu_sriov_vf(adev))
 956		return 0;
 957
 958	cmd = acquire_psp_cmd_buf(psp);
 959
 960	cmd->cmd_id = GFX_CMD_ID_BOOT_CFG;
 961	cmd->cmd.boot_cfg.sub_cmd = BOOTCFG_CMD_GET;
 962
 963	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
 964	if (!ret) {
 965		*boot_cfg =
 966			(cmd->resp.uresp.boot_cfg.boot_cfg & BOOT_CONFIG_GECC) ? 1 : 0;
 967	}
 968
 969	release_psp_cmd_buf(psp);
 
 
 970
 971	return ret;
 972}
 973
 974static int psp_boot_config_set(struct amdgpu_device *adev, uint32_t boot_cfg)
 
 975{
 976	int ret;
 977	struct psp_context *psp = &adev->psp;
 978	struct psp_gfx_cmd_resp *cmd;
 979
 980	if (amdgpu_sriov_vf(adev))
 981		return 0;
 982
 983	cmd = acquire_psp_cmd_buf(psp);
 984
 985	cmd->cmd_id = GFX_CMD_ID_BOOT_CFG;
 986	cmd->cmd.boot_cfg.sub_cmd = BOOTCFG_CMD_SET;
 987	cmd->cmd.boot_cfg.boot_config = boot_cfg;
 988	cmd->cmd.boot_cfg.boot_config_valid = boot_cfg;
 989
 990	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
 991
 992	release_psp_cmd_buf(psp);
 993
 994	return ret;
 995}
 996
 997static int psp_rl_load(struct amdgpu_device *adev)
 998{
 999	int ret;
1000	struct psp_context *psp = &adev->psp;
1001	struct psp_gfx_cmd_resp *cmd;
1002
1003	if (!is_psp_fw_valid(psp->rl))
1004		return 0;
1005
1006	cmd = acquire_psp_cmd_buf(psp);
1007
1008	memset(psp->fw_pri_buf, 0, PSP_1_MEG);
1009	memcpy(psp->fw_pri_buf, psp->rl.start_addr, psp->rl.size_bytes);
1010
1011	cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW;
1012	cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(psp->fw_pri_mc_addr);
1013	cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(psp->fw_pri_mc_addr);
1014	cmd->cmd.cmd_load_ip_fw.fw_size = psp->rl.size_bytes;
1015	cmd->cmd.cmd_load_ip_fw.fw_type = GFX_FW_TYPE_REG_LIST;
1016
1017	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1018
1019	release_psp_cmd_buf(psp);
1020
1021	return ret;
1022}
1023
1024int psp_spatial_partition(struct psp_context *psp, int mode)
1025{
1026	struct psp_gfx_cmd_resp *cmd;
1027	int ret;
1028
1029	if (amdgpu_sriov_vf(psp->adev))
1030		return 0;
1031
1032	cmd = acquire_psp_cmd_buf(psp);
1033
1034	cmd->cmd_id = GFX_CMD_ID_SRIOV_SPATIAL_PART;
1035	cmd->cmd.cmd_spatial_part.mode = mode;
1036
1037	dev_info(psp->adev->dev, "Requesting %d partitions through PSP", mode);
1038	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1039
1040	release_psp_cmd_buf(psp);
1041
1042	return ret;
1043}
1044
1045static int psp_asd_initialize(struct psp_context *psp)
1046{
1047	int ret;
1048
1049	/* If PSP version doesn't match ASD version, asd loading will be failed.
1050	 * add workaround to bypass it for sriov now.
1051	 * TODO: add version check to make it common
1052	 */
1053	if (amdgpu_sriov_vf(psp->adev) || !psp->asd_context.bin_desc.size_bytes)
1054		return 0;
1055
1056	psp->asd_context.mem_context.shared_mc_addr  = 0;
1057	psp->asd_context.mem_context.shared_mem_size = PSP_ASD_SHARED_MEM_SIZE;
1058	psp->asd_context.ta_load_type                = GFX_CMD_ID_LOAD_ASD;
1059
1060	ret = psp_ta_load(psp, &psp->asd_context);
1061	if (!ret)
1062		psp->asd_context.initialized = true;
 
 
 
 
 
 
 
 
 
 
 
1063
1064	return ret;
1065}
1066
1067static void psp_prep_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd,
1068				       uint32_t session_id)
1069{
1070	cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA;
1071	cmd->cmd.cmd_unload_ta.session_id = session_id;
1072}
1073
1074int psp_ta_unload(struct psp_context *psp, struct ta_context *context)
1075{
1076	int ret;
1077	struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
1078
1079	psp_prep_ta_unload_cmd_buf(cmd, context->session_id);
1080
1081	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1082
1083	context->resp_status = cmd->resp.status;
1084
1085	release_psp_cmd_buf(psp);
1086
1087	return ret;
1088}
1089
1090static int psp_asd_terminate(struct psp_context *psp)
1091{
1092	int ret;
 
1093
1094	if (amdgpu_sriov_vf(psp->adev))
1095		return 0;
1096
1097	if (!psp->asd_context.initialized)
1098		return 0;
1099
1100	ret = psp_ta_unload(psp, &psp->asd_context);
 
 
 
 
 
 
 
1101	if (!ret)
1102		psp->asd_context.initialized = false;
 
 
1103
1104	return ret;
1105}
1106
1107static void psp_prep_reg_prog_cmd_buf(struct psp_gfx_cmd_resp *cmd,
1108		uint32_t id, uint32_t value)
1109{
1110	cmd->cmd_id = GFX_CMD_ID_PROG_REG;
1111	cmd->cmd.cmd_setup_reg_prog.reg_value = value;
1112	cmd->cmd.cmd_setup_reg_prog.reg_id = id;
1113}
1114
1115int psp_reg_program(struct psp_context *psp, enum psp_reg_prog_id reg,
1116		uint32_t value)
1117{
1118	struct psp_gfx_cmd_resp *cmd;
1119	int ret = 0;
1120
1121	if (reg >= PSP_REG_LAST)
1122		return -EINVAL;
1123
1124	cmd = acquire_psp_cmd_buf(psp);
 
 
1125
1126	psp_prep_reg_prog_cmd_buf(cmd, reg, value);
1127	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1128	if (ret)
1129		dev_err(psp->adev->dev, "PSP failed to program reg id %d\n", reg);
1130
1131	release_psp_cmd_buf(psp);
1132
 
1133	return ret;
1134}
1135
1136static void psp_prep_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
1137				     uint64_t ta_bin_mc,
1138				     struct ta_context *context)
 
 
1139{
1140	cmd->cmd_id				= context->ta_load_type;
1141	cmd->cmd.cmd_load_ta.app_phy_addr_lo	= lower_32_bits(ta_bin_mc);
1142	cmd->cmd.cmd_load_ta.app_phy_addr_hi	= upper_32_bits(ta_bin_mc);
1143	cmd->cmd.cmd_load_ta.app_len		= context->bin_desc.size_bytes;
1144
1145	cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo =
1146		lower_32_bits(context->mem_context.shared_mc_addr);
1147	cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi =
1148		upper_32_bits(context->mem_context.shared_mc_addr);
1149	cmd->cmd.cmd_load_ta.cmd_buf_len = context->mem_context.shared_mem_size;
1150}
1151
1152int psp_ta_init_shared_buf(struct psp_context *psp,
1153				  struct ta_mem_context *mem_ctx)
1154{
 
 
1155	/*
1156	 * Allocate 16k memory aligned to 4k from Frame Buffer (local
1157	 * physical) for ta to host memory
1158	 */
1159	return amdgpu_bo_create_kernel(psp->adev, mem_ctx->shared_mem_size,
1160				      PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM |
1161				      AMDGPU_GEM_DOMAIN_GTT,
1162				      &mem_ctx->shared_bo,
1163				      &mem_ctx->shared_mc_addr,
1164				      &mem_ctx->shared_buf);
 
1165}
1166
1167static void psp_prep_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd,
1168				       uint32_t ta_cmd_id,
1169				       uint32_t session_id)
1170{
1171	cmd->cmd_id				= GFX_CMD_ID_INVOKE_CMD;
1172	cmd->cmd.cmd_invoke_cmd.session_id	= session_id;
1173	cmd->cmd.cmd_invoke_cmd.ta_cmd_id	= ta_cmd_id;
1174}
1175
1176int psp_ta_invoke(struct psp_context *psp,
1177		  uint32_t ta_cmd_id,
1178		  struct ta_context *context)
1179{
1180	int ret;
1181	struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
 
 
 
 
1182
1183	psp_prep_ta_invoke_cmd_buf(cmd, ta_cmd_id, context->session_id);
1184
1185	ret = psp_cmd_submit_buf(psp, NULL, cmd,
1186				 psp->fence_buf_mc_addr);
1187
1188	context->resp_status = cmd->resp.status;
1189
1190	release_psp_cmd_buf(psp);
1191
1192	return ret;
1193}
1194
1195int psp_ta_load(struct psp_context *psp, struct ta_context *context)
1196{
1197	int ret;
1198	struct psp_gfx_cmd_resp *cmd;
1199
1200	cmd = acquire_psp_cmd_buf(psp);
 
 
1201
1202	psp_copy_fw(psp, context->bin_desc.start_addr,
1203		    context->bin_desc.size_bytes);
 
 
 
 
1204
1205	psp_prep_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr, context);
 
 
 
 
1206
1207	ret = psp_cmd_submit_buf(psp, NULL, cmd,
1208				 psp->fence_buf_mc_addr);
1209
1210	context->resp_status = cmd->resp.status;
 
 
 
1211
1212	if (!ret)
1213		context->session_id = cmd->resp.session_id;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1214
1215	release_psp_cmd_buf(psp);
 
 
 
 
 
1216
1217	return ret;
1218}
1219
1220int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
1221{
1222	return psp_ta_invoke(psp, ta_cmd_id, &psp->xgmi_context.context);
1223}
1224
1225int psp_xgmi_terminate(struct psp_context *psp)
1226{
1227	int ret;
1228	struct amdgpu_device *adev = psp->adev;
1229
1230	/* XGMI TA unload currently is not supported on Arcturus/Aldebaran A+A */
1231	if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 4) ||
1232	    (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2) &&
1233	     adev->gmc.xgmi.connected_to_cpu))
1234		return 0;
1235
1236	if (!psp->xgmi_context.context.initialized)
1237		return 0;
 
1238
1239	ret = psp_ta_unload(psp, &psp->xgmi_context.context);
1240
1241	psp->xgmi_context.context.initialized = false;
 
 
 
1242
1243	return ret;
1244}
1245
1246int psp_xgmi_initialize(struct psp_context *psp, bool set_extended_data, bool load_ta)
1247{
1248	struct ta_xgmi_shared_memory *xgmi_cmd;
1249	int ret;
1250
1251	if (!psp->ta_fw ||
1252	    !psp->xgmi_context.context.bin_desc.size_bytes ||
1253	    !psp->xgmi_context.context.bin_desc.start_addr)
1254		return -ENOENT;
1255
1256	if (!load_ta)
1257		goto invoke;
1258
1259	psp->xgmi_context.context.mem_context.shared_mem_size = PSP_XGMI_SHARED_MEM_SIZE;
1260	psp->xgmi_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
1261
1262	if (!psp->xgmi_context.context.mem_context.shared_buf) {
1263		ret = psp_ta_init_shared_buf(psp, &psp->xgmi_context.context.mem_context);
1264		if (ret)
1265			return ret;
1266	}
1267
1268	/* Load XGMI TA */
1269	ret = psp_ta_load(psp, &psp->xgmi_context.context);
1270	if (!ret)
1271		psp->xgmi_context.context.initialized = true;
1272	else
1273		return ret;
1274
1275invoke:
1276	/* Initialize XGMI session */
1277	xgmi_cmd = (struct ta_xgmi_shared_memory *)(psp->xgmi_context.context.mem_context.shared_buf);
1278	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1279	xgmi_cmd->flag_extend_link_record = set_extended_data;
1280	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__INITIALIZE;
1281
1282	ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
1283	/* note down the capbility flag for XGMI TA */
1284	psp->xgmi_context.xgmi_ta_caps = xgmi_cmd->caps_flag;
1285
1286	return ret;
1287}
1288
1289int psp_xgmi_get_hive_id(struct psp_context *psp, uint64_t *hive_id)
1290{
1291	struct ta_xgmi_shared_memory *xgmi_cmd;
1292	int ret;
1293
1294	xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
1295	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1296
1297	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_HIVE_ID;
1298
1299	/* Invoke xgmi ta to get hive id */
1300	ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
1301	if (ret)
1302		return ret;
1303
1304	*hive_id = xgmi_cmd->xgmi_out_message.get_hive_id.hive_id;
1305
1306	return 0;
1307}
1308
1309int psp_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id)
1310{
1311	struct ta_xgmi_shared_memory *xgmi_cmd;
1312	int ret;
1313
1314	xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
1315	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1316
1317	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_NODE_ID;
1318
1319	/* Invoke xgmi ta to get the node id */
1320	ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
1321	if (ret)
1322		return ret;
1323
1324	*node_id = xgmi_cmd->xgmi_out_message.get_node_id.node_id;
1325
1326	return 0;
1327}
1328
1329static bool psp_xgmi_peer_link_info_supported(struct psp_context *psp)
1330{
1331	return (amdgpu_ip_version(psp->adev, MP0_HWIP, 0) ==
1332			IP_VERSION(13, 0, 2) &&
1333		psp->xgmi_context.context.bin_desc.fw_version >= 0x2000000b) ||
1334	       amdgpu_ip_version(psp->adev, MP0_HWIP, 0) >=
1335		       IP_VERSION(13, 0, 6);
1336}
1337
1338/*
1339 * Chips that support extended topology information require the driver to
1340 * reflect topology information in the opposite direction.  This is
1341 * because the TA has already exceeded its link record limit and if the
1342 * TA holds bi-directional information, the driver would have to do
1343 * multiple fetches instead of just two.
1344 */
1345static void psp_xgmi_reflect_topology_info(struct psp_context *psp,
1346					struct psp_xgmi_node_info node_info)
1347{
1348	struct amdgpu_device *mirror_adev;
1349	struct amdgpu_hive_info *hive;
1350	uint64_t src_node_id = psp->adev->gmc.xgmi.node_id;
1351	uint64_t dst_node_id = node_info.node_id;
1352	uint8_t dst_num_hops = node_info.num_hops;
1353	uint8_t dst_num_links = node_info.num_links;
1354
1355	hive = amdgpu_get_xgmi_hive(psp->adev);
1356	list_for_each_entry(mirror_adev, &hive->device_list, gmc.xgmi.head) {
1357		struct psp_xgmi_topology_info *mirror_top_info;
1358		int j;
1359
1360		if (mirror_adev->gmc.xgmi.node_id != dst_node_id)
1361			continue;
1362
1363		mirror_top_info = &mirror_adev->psp.xgmi_context.top_info;
1364		for (j = 0; j < mirror_top_info->num_nodes; j++) {
1365			if (mirror_top_info->nodes[j].node_id != src_node_id)
1366				continue;
1367
1368			mirror_top_info->nodes[j].num_hops = dst_num_hops;
1369			/*
1370			 * prevent 0 num_links value re-reflection since reflection
1371			 * criteria is based on num_hops (direct or indirect).
1372			 *
1373			 */
1374			if (dst_num_links)
1375				mirror_top_info->nodes[j].num_links = dst_num_links;
1376
1377			break;
1378		}
1379
1380		break;
1381	}
1382
1383	amdgpu_put_xgmi_hive(hive);
1384}
1385
1386int psp_xgmi_get_topology_info(struct psp_context *psp,
1387			       int number_devices,
1388			       struct psp_xgmi_topology_info *topology,
1389			       bool get_extended_data)
1390{
1391	struct ta_xgmi_shared_memory *xgmi_cmd;
1392	struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
1393	struct ta_xgmi_cmd_get_topology_info_output *topology_info_output;
1394	int i;
1395	int ret;
1396
1397	if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
1398		return -EINVAL;
1399
1400	xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
1401	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1402	xgmi_cmd->flag_extend_link_record = get_extended_data;
1403
1404	/* Fill in the shared memory with topology information as input */
1405	topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
1406	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_TOPOLOGY_INFO;
1407	topology_info_input->num_nodes = number_devices;
1408
1409	for (i = 0; i < topology_info_input->num_nodes; i++) {
1410		topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
1411		topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
1412		topology_info_input->nodes[i].is_sharing_enabled = topology->nodes[i].is_sharing_enabled;
1413		topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
1414	}
1415
1416	/* Invoke xgmi ta to get the topology information */
1417	ret = psp_xgmi_invoke(psp, TA_COMMAND_XGMI__GET_TOPOLOGY_INFO);
1418	if (ret)
1419		return ret;
1420
1421	/* Read the output topology information from the shared memory */
1422	topology_info_output = &xgmi_cmd->xgmi_out_message.get_topology_info;
1423	topology->num_nodes = xgmi_cmd->xgmi_out_message.get_topology_info.num_nodes;
1424	for (i = 0; i < topology->num_nodes; i++) {
1425		/* extended data will either be 0 or equal to non-extended data */
1426		if (topology_info_output->nodes[i].num_hops)
1427			topology->nodes[i].num_hops = topology_info_output->nodes[i].num_hops;
1428
1429		/* non-extended data gets everything here so no need to update */
1430		if (!get_extended_data) {
1431			topology->nodes[i].node_id = topology_info_output->nodes[i].node_id;
1432			topology->nodes[i].is_sharing_enabled =
1433					topology_info_output->nodes[i].is_sharing_enabled;
1434			topology->nodes[i].sdma_engine =
1435					topology_info_output->nodes[i].sdma_engine;
1436		}
1437
1438	}
1439
1440	/* Invoke xgmi ta again to get the link information */
1441	if (psp_xgmi_peer_link_info_supported(psp)) {
1442		struct ta_xgmi_cmd_get_peer_link_info *link_info_output;
1443		struct ta_xgmi_cmd_get_extend_peer_link_info *link_extend_info_output;
1444		bool requires_reflection =
1445			(psp->xgmi_context.supports_extended_data &&
1446			 get_extended_data) ||
1447			amdgpu_ip_version(psp->adev, MP0_HWIP, 0) ==
1448				IP_VERSION(13, 0, 6);
1449		bool ta_port_num_support = amdgpu_sriov_vf(psp->adev) ? 0 :
1450				psp->xgmi_context.xgmi_ta_caps & EXTEND_PEER_LINK_INFO_CMD_FLAG;
1451
1452		/* popluate the shared output buffer rather than the cmd input buffer
1453		 * with node_ids as the input for GET_PEER_LINKS command execution.
1454		 * This is required for GET_PEER_LINKS per xgmi ta implementation.
1455		 * The same requirement for GET_EXTEND_PEER_LINKS command.
1456		 */
1457		if (ta_port_num_support) {
1458			link_extend_info_output = &xgmi_cmd->xgmi_out_message.get_extend_link_info;
1459
1460			for (i = 0; i < topology->num_nodes; i++)
1461				link_extend_info_output->nodes[i].node_id = topology->nodes[i].node_id;
1462
1463			link_extend_info_output->num_nodes = topology->num_nodes;
1464			xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_EXTEND_PEER_LINKS;
1465		} else {
1466			link_info_output = &xgmi_cmd->xgmi_out_message.get_link_info;
1467
1468			for (i = 0; i < topology->num_nodes; i++)
1469				link_info_output->nodes[i].node_id = topology->nodes[i].node_id;
1470
1471			link_info_output->num_nodes = topology->num_nodes;
1472			xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_PEER_LINKS;
1473		}
1474
1475		ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
1476		if (ret)
1477			return ret;
1478
1479		for (i = 0; i < topology->num_nodes; i++) {
1480			uint8_t node_num_links = ta_port_num_support ?
1481				link_extend_info_output->nodes[i].num_links : link_info_output->nodes[i].num_links;
1482			/* accumulate num_links on extended data */
1483			if (get_extended_data) {
1484				topology->nodes[i].num_links = topology->nodes[i].num_links + node_num_links;
1485			} else {
1486				topology->nodes[i].num_links = (requires_reflection && topology->nodes[i].num_links) ?
1487								topology->nodes[i].num_links : node_num_links;
1488			}
1489			/* popluate the connected port num info if supported and available */
1490			if (ta_port_num_support && topology->nodes[i].num_links) {
1491				memcpy(topology->nodes[i].port_num, link_extend_info_output->nodes[i].port_num,
1492				       sizeof(struct xgmi_connected_port_num) * TA_XGMI__MAX_PORT_NUM);
1493			}
1494
1495			/* reflect the topology information for bi-directionality */
1496			if (requires_reflection && topology->nodes[i].num_hops)
1497				psp_xgmi_reflect_topology_info(psp, topology->nodes[i]);
1498		}
1499	}
1500
1501	return 0;
1502}
1503
1504int psp_xgmi_set_topology_info(struct psp_context *psp,
1505			       int number_devices,
1506			       struct psp_xgmi_topology_info *topology)
1507{
1508	struct ta_xgmi_shared_memory *xgmi_cmd;
1509	struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
1510	int i;
1511
1512	if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
1513		return -EINVAL;
1514
1515	xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
1516	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1517
1518	topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
1519	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__SET_TOPOLOGY_INFO;
1520	topology_info_input->num_nodes = number_devices;
1521
1522	for (i = 0; i < topology_info_input->num_nodes; i++) {
1523		topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
1524		topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
1525		topology_info_input->nodes[i].is_sharing_enabled = 1;
1526		topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
1527	}
1528
1529	/* Invoke xgmi ta to set topology information */
1530	return psp_xgmi_invoke(psp, TA_COMMAND_XGMI__SET_TOPOLOGY_INFO);
1531}
1532
1533// ras begin
1534static void psp_ras_ta_check_status(struct psp_context *psp)
1535{
1536	struct ta_ras_shared_memory *ras_cmd =
1537		(struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1538
1539	switch (ras_cmd->ras_status) {
1540	case TA_RAS_STATUS__ERROR_UNSUPPORTED_IP:
1541		dev_warn(psp->adev->dev,
1542			 "RAS WARNING: cmd failed due to unsupported ip\n");
1543		break;
1544	case TA_RAS_STATUS__ERROR_UNSUPPORTED_ERROR_INJ:
1545		dev_warn(psp->adev->dev,
1546			 "RAS WARNING: cmd failed due to unsupported error injection\n");
1547		break;
1548	case TA_RAS_STATUS__SUCCESS:
1549		break;
1550	case TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED:
1551		if (ras_cmd->cmd_id == TA_RAS_COMMAND__TRIGGER_ERROR)
1552			dev_warn(psp->adev->dev,
1553				 "RAS WARNING: Inject error to critical region is not allowed\n");
1554		break;
1555	default:
1556		dev_warn(psp->adev->dev,
1557			 "RAS WARNING: ras status = 0x%X\n", ras_cmd->ras_status);
1558		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1559	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1560}
1561
1562int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
1563{
1564	struct ta_ras_shared_memory *ras_cmd;
1565	int ret;
1566
1567	ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1568
1569	/*
1570	 * TODO: bypass the loading in sriov for now
1571	 */
1572	if (amdgpu_sriov_vf(psp->adev))
1573		return 0;
1574
1575	ret = psp_ta_invoke(psp, ta_cmd_id, &psp->ras_context.context);
1576
1577	if (amdgpu_ras_intr_triggered())
1578		return ret;
1579
1580	if (ras_cmd->if_version > RAS_TA_HOST_IF_VER) {
1581		dev_warn(psp->adev->dev, "RAS: Unsupported Interface\n");
 
1582		return -EINVAL;
1583	}
1584
1585	if (!ret) {
1586		if (ras_cmd->ras_out_message.flags.err_inject_switch_disable_flag) {
1587			dev_warn(psp->adev->dev, "ECC switch disabled\n");
1588
1589			ras_cmd->ras_status = TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE;
1590		} else if (ras_cmd->ras_out_message.flags.reg_access_failure_flag)
 
1591			dev_warn(psp->adev->dev,
1592				 "RAS internal register access blocked\n");
1593
1594		psp_ras_ta_check_status(psp);
1595	}
1596
1597	return ret;
1598}
1599
1600int psp_ras_enable_features(struct psp_context *psp,
1601		union ta_ras_cmd_input *info, bool enable)
1602{
1603	struct ta_ras_shared_memory *ras_cmd;
1604	int ret;
1605
1606	if (!psp->ras_context.context.initialized)
1607		return -EINVAL;
1608
1609	ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1610	memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
1611
1612	if (enable)
1613		ras_cmd->cmd_id = TA_RAS_COMMAND__ENABLE_FEATURES;
1614	else
1615		ras_cmd->cmd_id = TA_RAS_COMMAND__DISABLE_FEATURES;
1616
1617	ras_cmd->ras_in_message = *info;
1618
1619	ret = psp_ras_invoke(psp, ras_cmd->cmd_id);
1620	if (ret)
1621		return -EINVAL;
1622
1623	return 0;
1624}
1625
1626int psp_ras_terminate(struct psp_context *psp)
1627{
1628	int ret;
1629
1630	/*
1631	 * TODO: bypass the terminate in sriov for now
1632	 */
1633	if (amdgpu_sriov_vf(psp->adev))
1634		return 0;
1635
1636	if (!psp->ras_context.context.initialized)
1637		return 0;
1638
1639	ret = psp_ta_unload(psp, &psp->ras_context.context);
 
 
1640
1641	psp->ras_context.context.initialized = false;
1642
1643	return ret;
 
 
 
 
 
1644}
1645
1646int psp_ras_initialize(struct psp_context *psp)
1647{
1648	int ret;
1649	uint32_t boot_cfg = 0xFF;
1650	struct amdgpu_device *adev = psp->adev;
1651	struct ta_ras_shared_memory *ras_cmd;
1652
1653	/*
1654	 * TODO: bypass the initialize in sriov for now
1655	 */
1656	if (amdgpu_sriov_vf(adev))
1657		return 0;
1658
1659	if (!adev->psp.ras_context.context.bin_desc.size_bytes ||
1660	    !adev->psp.ras_context.context.bin_desc.start_addr) {
1661		dev_info(adev->dev, "RAS: optional ras ta ucode is not available\n");
1662		return 0;
1663	}
1664
1665	if (amdgpu_atomfirmware_dynamic_boot_config_supported(adev)) {
1666		/* query GECC enablement status from boot config
1667		 * boot_cfg: 1: GECC is enabled or 0: GECC is disabled
1668		 */
1669		ret = psp_boot_config_get(adev, &boot_cfg);
1670		if (ret)
1671			dev_warn(adev->dev, "PSP get boot config failed\n");
1672
1673		if (!amdgpu_ras_is_supported(psp->adev, AMDGPU_RAS_BLOCK__UMC)) {
1674			if (!boot_cfg) {
1675				dev_info(adev->dev, "GECC is disabled\n");
1676			} else {
1677				/* disable GECC in next boot cycle if ras is
1678				 * disabled by module parameter amdgpu_ras_enable
1679				 * and/or amdgpu_ras_mask, or boot_config_get call
1680				 * is failed
1681				 */
1682				ret = psp_boot_config_set(adev, 0);
1683				if (ret)
1684					dev_warn(adev->dev, "PSP set boot config failed\n");
1685				else
1686					dev_warn(adev->dev, "GECC will be disabled in next boot cycle if set amdgpu_ras_enable and/or amdgpu_ras_mask to 0x0\n");
1687			}
1688		} else {
1689			if (boot_cfg == 1) {
1690				dev_info(adev->dev, "GECC is enabled\n");
1691			} else {
1692				/* enable GECC in next boot cycle if it is disabled
1693				 * in boot config, or force enable GECC if failed to
1694				 * get boot configuration
1695				 */
1696				ret = psp_boot_config_set(adev, BOOT_CONFIG_GECC);
1697				if (ret)
1698					dev_warn(adev->dev, "PSP set boot config failed\n");
1699				else
1700					dev_warn(adev->dev, "GECC will be enabled in next boot cycle\n");
1701			}
1702		}
1703	}
1704
1705	psp->ras_context.context.mem_context.shared_mem_size = PSP_RAS_SHARED_MEM_SIZE;
1706	psp->ras_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
1707
1708	if (!psp->ras_context.context.mem_context.shared_buf) {
1709		ret = psp_ta_init_shared_buf(psp, &psp->ras_context.context.mem_context);
1710		if (ret)
1711			return ret;
1712	}
1713
1714	ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1715	memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
1716
1717	if (amdgpu_ras_is_poison_mode_supported(adev))
1718		ras_cmd->ras_in_message.init_flags.poison_mode_en = 1;
1719	if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu)
1720		ras_cmd->ras_in_message.init_flags.dgpu_mode = 1;
1721	ras_cmd->ras_in_message.init_flags.xcc_mask =
1722		adev->gfx.xcc_mask;
1723	ras_cmd->ras_in_message.init_flags.channel_dis_num = hweight32(adev->gmc.m_half_use) * 2;
1724
1725	ret = psp_ta_load(psp, &psp->ras_context.context);
1726
1727	if (!ret && !ras_cmd->ras_status)
1728		psp->ras_context.context.initialized = true;
1729	else {
1730		if (ras_cmd->ras_status)
1731			dev_warn(adev->dev, "RAS Init Status: 0x%X\n", ras_cmd->ras_status);
1732
1733		/* fail to load RAS TA */
1734		psp->ras_context.context.initialized = false;
1735	}
1736
1737	return ret;
1738}
1739
1740int psp_ras_trigger_error(struct psp_context *psp,
1741			  struct ta_ras_trigger_error_input *info, uint32_t instance_mask)
1742{
1743	struct ta_ras_shared_memory *ras_cmd;
1744	struct amdgpu_device *adev = psp->adev;
1745	int ret;
1746	uint32_t dev_mask;
1747
1748	if (!psp->ras_context.context.initialized)
1749		return -EINVAL;
1750
1751	switch (info->block_id) {
1752	case TA_RAS_BLOCK__GFX:
1753		dev_mask = GET_MASK(GC, instance_mask);
1754		break;
1755	case TA_RAS_BLOCK__SDMA:
1756		dev_mask = GET_MASK(SDMA0, instance_mask);
1757		break;
1758	case TA_RAS_BLOCK__VCN:
1759	case TA_RAS_BLOCK__JPEG:
1760		dev_mask = GET_MASK(VCN, instance_mask);
1761		break;
1762	default:
1763		dev_mask = instance_mask;
1764		break;
1765	}
1766
1767	/* reuse sub_block_index for backward compatibility */
1768	dev_mask <<= AMDGPU_RAS_INST_SHIFT;
1769	dev_mask &= AMDGPU_RAS_INST_MASK;
1770	info->sub_block_index |= dev_mask;
1771
1772	ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1773	memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
1774
1775	ras_cmd->cmd_id = TA_RAS_COMMAND__TRIGGER_ERROR;
1776	ras_cmd->ras_in_message.trigger_error = *info;
1777
1778	ret = psp_ras_invoke(psp, ras_cmd->cmd_id);
1779	if (ret)
1780		return -EINVAL;
1781
1782	/* If err_event_athub occurs error inject was successful, however
1783	 *  return status from TA is no long reliable
1784	 */
1785	if (amdgpu_ras_intr_triggered())
1786		return 0;
1787
1788	if (ras_cmd->ras_status == TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED)
1789		return -EACCES;
1790	else if (ras_cmd->ras_status)
1791		return -EINVAL;
1792
1793	return 0;
1794}
 
1795
1796int psp_ras_query_address(struct psp_context *psp,
1797			  struct ta_ras_query_address_input *addr_in,
1798			  struct ta_ras_query_address_output *addr_out)
1799{
1800	struct ta_ras_shared_memory *ras_cmd;
1801	int ret;
1802
1803	if (!psp->ras_context.context.initialized)
1804		return -EINVAL;
1805
1806	ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1807	memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
1808
1809	ras_cmd->cmd_id = TA_RAS_COMMAND__QUERY_ADDRESS;
1810	ras_cmd->ras_in_message.address = *addr_in;
1811
1812	ret = psp_ras_invoke(psp, ras_cmd->cmd_id);
1813	if (ret || ras_cmd->ras_status || psp->cmd_buf_mem->resp.status)
1814		return -EINVAL;
1815
1816	*addr_out = ras_cmd->ras_out_message.address;
1817
1818	return 0;
1819}
1820// ras end
1821
1822// HDCP start
1823static int psp_hdcp_initialize(struct psp_context *psp)
1824{
1825	int ret;
 
1826
1827	/*
1828	 * TODO: bypass the initialize in sriov for now
1829	 */
1830	if (amdgpu_sriov_vf(psp->adev))
1831		return 0;
1832
1833	/* bypass hdcp initialization if dmu is harvested */
1834	if (!amdgpu_device_has_display_hardware(psp->adev))
1835		return 0;
1836
1837	if (!psp->hdcp_context.context.bin_desc.size_bytes ||
1838	    !psp->hdcp_context.context.bin_desc.start_addr) {
1839		dev_info(psp->adev->dev, "HDCP: optional hdcp ta ucode is not available\n");
1840		return 0;
1841	}
1842
1843	psp->hdcp_context.context.mem_context.shared_mem_size = PSP_HDCP_SHARED_MEM_SIZE;
1844	psp->hdcp_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
 
 
 
1845
1846	if (!psp->hdcp_context.context.mem_context.shared_buf) {
1847		ret = psp_ta_init_shared_buf(psp, &psp->hdcp_context.context.mem_context);
1848		if (ret)
1849			return ret;
1850	}
1851
1852	ret = psp_ta_load(psp, &psp->hdcp_context.context);
1853	if (!ret) {
1854		psp->hdcp_context.context.initialized = true;
 
1855		mutex_init(&psp->hdcp_context.mutex);
1856	}
1857
1858	return ret;
1859}
1860
1861int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
1862{
1863	/*
1864	 * TODO: bypass the loading in sriov for now
1865	 */
1866	if (amdgpu_sriov_vf(psp->adev))
1867		return 0;
1868
1869	if (!psp->hdcp_context.context.initialized)
1870		return 0;
1871
1872	return psp_ta_invoke(psp, ta_cmd_id, &psp->hdcp_context.context);
1873}
1874
1875static int psp_hdcp_terminate(struct psp_context *psp)
1876{
1877	int ret;
1878
1879	/*
1880	 * TODO: bypass the terminate in sriov for now
1881	 */
1882	if (amdgpu_sriov_vf(psp->adev))
1883		return 0;
1884
1885	if (!psp->hdcp_context.context.initialized)
 
 
1886		return 0;
 
1887
1888	ret = psp_ta_unload(psp, &psp->hdcp_context.context);
 
 
 
 
1889
1890	psp->hdcp_context.context.initialized = false;
 
 
1891
1892	return ret;
1893}
1894// HDCP end
1895
1896// DTM start
1897static int psp_dtm_initialize(struct psp_context *psp)
1898{
1899	int ret;
 
1900
1901	/*
1902	 * TODO: bypass the initialize in sriov for now
1903	 */
1904	if (amdgpu_sriov_vf(psp->adev))
1905		return 0;
1906
1907	/* bypass dtm initialization if dmu is harvested */
1908	if (!amdgpu_device_has_display_hardware(psp->adev))
1909		return 0;
1910
1911	if (!psp->dtm_context.context.bin_desc.size_bytes ||
1912	    !psp->dtm_context.context.bin_desc.start_addr) {
1913		dev_info(psp->adev->dev, "DTM: optional dtm ta ucode is not available\n");
1914		return 0;
1915	}
1916
1917	psp->dtm_context.context.mem_context.shared_mem_size = PSP_DTM_SHARED_MEM_SIZE;
1918	psp->dtm_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
1919
1920	if (!psp->dtm_context.context.mem_context.shared_buf) {
1921		ret = psp_ta_init_shared_buf(psp, &psp->dtm_context.context.mem_context);
1922		if (ret)
1923			return ret;
1924	}
1925
1926	ret = psp_ta_load(psp, &psp->dtm_context.context);
1927	if (!ret) {
1928		psp->dtm_context.context.initialized = true;
1929		mutex_init(&psp->dtm_context.mutex);
1930	}
1931
1932	return ret;
1933}
1934
1935int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
1936{
1937	/*
1938	 * TODO: bypass the loading in sriov for now
1939	 */
1940	if (amdgpu_sriov_vf(psp->adev))
1941		return 0;
1942
1943	if (!psp->dtm_context.context.initialized)
1944		return 0;
1945
1946	return psp_ta_invoke(psp, ta_cmd_id, &psp->dtm_context.context);
1947}
1948
1949static int psp_dtm_terminate(struct psp_context *psp)
1950{
1951	int ret;
1952
1953	/*
1954	 * TODO: bypass the terminate in sriov for now
1955	 */
1956	if (amdgpu_sriov_vf(psp->adev))
1957		return 0;
1958
1959	if (!psp->dtm_context.context.initialized)
1960		return 0;
1961
1962	ret = psp_ta_unload(psp, &psp->dtm_context.context);
1963
1964	psp->dtm_context.context.initialized = false;
1965
1966	return ret;
1967}
1968// DTM end
1969
1970// RAP start
1971static int psp_rap_initialize(struct psp_context *psp)
1972{
1973	int ret;
1974	enum ta_rap_status status = TA_RAP_STATUS__SUCCESS;
1975
1976	/*
1977	 * TODO: bypass the initialize in sriov for now
1978	 */
1979	if (amdgpu_sriov_vf(psp->adev))
1980		return 0;
1981
1982	if (!psp->rap_context.context.bin_desc.size_bytes ||
1983	    !psp->rap_context.context.bin_desc.start_addr) {
1984		dev_info(psp->adev->dev, "RAP: optional rap ta ucode is not available\n");
1985		return 0;
1986	}
1987
1988	psp->rap_context.context.mem_context.shared_mem_size = PSP_RAP_SHARED_MEM_SIZE;
1989	psp->rap_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
1990
1991	if (!psp->rap_context.context.mem_context.shared_buf) {
1992		ret = psp_ta_init_shared_buf(psp, &psp->rap_context.context.mem_context);
1993		if (ret)
1994			return ret;
1995	}
1996
1997	ret = psp_ta_load(psp, &psp->rap_context.context);
1998	if (!ret) {
1999		psp->rap_context.context.initialized = true;
2000		mutex_init(&psp->rap_context.mutex);
2001	} else
2002		return ret;
2003
2004	ret = psp_rap_invoke(psp, TA_CMD_RAP__INITIALIZE, &status);
2005	if (ret || status != TA_RAP_STATUS__SUCCESS) {
2006		psp_rap_terminate(psp);
2007		/* free rap shared memory */
2008		psp_ta_free_shared_buf(&psp->rap_context.context.mem_context);
2009
2010		dev_warn(psp->adev->dev, "RAP TA initialize fail (%d) status %d.\n",
2011			 ret, status);
2012
2013		return ret;
2014	}
2015
2016	return 0;
2017}
 
2018
2019static int psp_rap_terminate(struct psp_context *psp)
 
2020{
2021	int ret;
2022
2023	if (!psp->rap_context.context.initialized)
2024		return 0;
2025
2026	ret = psp_ta_unload(psp, &psp->rap_context.context);
2027
2028	psp->rap_context.context.initialized = false;
 
 
 
2029
2030	return ret;
2031}
2032
2033int psp_rap_invoke(struct psp_context *psp, uint32_t ta_cmd_id, enum ta_rap_status *status)
2034{
2035	struct ta_rap_shared_memory *rap_cmd;
2036	int ret = 0;
2037
2038	if (!psp->rap_context.context.initialized)
 
 
 
2039		return 0;
2040
2041	if (ta_cmd_id != TA_CMD_RAP__INITIALIZE &&
2042	    ta_cmd_id != TA_CMD_RAP__VALIDATE_L0)
2043		return -EINVAL;
2044
2045	mutex_lock(&psp->rap_context.mutex);
2046
2047	rap_cmd = (struct ta_rap_shared_memory *)
2048		  psp->rap_context.context.mem_context.shared_buf;
2049	memset(rap_cmd, 0, sizeof(struct ta_rap_shared_memory));
2050
2051	rap_cmd->cmd_id = ta_cmd_id;
2052	rap_cmd->validation_method_id = METHOD_A;
 
 
 
2053
2054	ret = psp_ta_invoke(psp, rap_cmd->cmd_id, &psp->rap_context.context);
2055	if (ret)
2056		goto out_unlock;
2057
2058	if (status)
2059		*status = rap_cmd->rap_status;
 
 
 
2060
2061out_unlock:
2062	mutex_unlock(&psp->rap_context.mutex);
2063
2064	return ret;
2065}
2066// RAP end
2067
2068/* securedisplay start */
2069static int psp_securedisplay_initialize(struct psp_context *psp)
2070{
2071	int ret;
2072	struct ta_securedisplay_cmd *securedisplay_cmd;
2073
2074	/*
2075	 * TODO: bypass the initialize in sriov for now
2076	 */
2077	if (amdgpu_sriov_vf(psp->adev))
2078		return 0;
2079
2080	/* bypass securedisplay initialization if dmu is harvested */
2081	if (!amdgpu_device_has_display_hardware(psp->adev))
2082		return 0;
2083
2084	if (!psp->securedisplay_context.context.bin_desc.size_bytes ||
2085	    !psp->securedisplay_context.context.bin_desc.start_addr) {
2086		dev_info(psp->adev->dev, "SECUREDISPLAY: securedisplay ta ucode is not available\n");
2087		return 0;
2088	}
2089
2090	psp->securedisplay_context.context.mem_context.shared_mem_size =
2091		PSP_SECUREDISPLAY_SHARED_MEM_SIZE;
2092	psp->securedisplay_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
2093
2094	if (!psp->securedisplay_context.context.initialized) {
2095		ret = psp_ta_init_shared_buf(psp,
2096					     &psp->securedisplay_context.context.mem_context);
2097		if (ret)
2098			return ret;
2099	}
2100
2101	ret = psp_ta_load(psp, &psp->securedisplay_context.context);
2102	if (!ret) {
2103		psp->securedisplay_context.context.initialized = true;
2104		mutex_init(&psp->securedisplay_context.mutex);
2105	} else
2106		return ret;
2107
2108	mutex_lock(&psp->securedisplay_context.mutex);
2109
2110	psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd,
2111			TA_SECUREDISPLAY_COMMAND__QUERY_TA);
2112
2113	ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__QUERY_TA);
2114
2115	mutex_unlock(&psp->securedisplay_context.mutex);
2116
2117	if (ret) {
2118		psp_securedisplay_terminate(psp);
2119		/* free securedisplay shared memory */
2120		psp_ta_free_shared_buf(&psp->securedisplay_context.context.mem_context);
2121		dev_err(psp->adev->dev, "SECUREDISPLAY TA initialize fail.\n");
2122		return -EINVAL;
2123	}
2124
2125	if (securedisplay_cmd->status != TA_SECUREDISPLAY_STATUS__SUCCESS) {
2126		psp_securedisplay_parse_resp_status(psp, securedisplay_cmd->status);
2127		dev_err(psp->adev->dev, "SECUREDISPLAY: query securedisplay TA failed. ret 0x%x\n",
2128			securedisplay_cmd->securedisplay_out_message.query_ta.query_cmd_ret);
2129		/* don't try again */
2130		psp->securedisplay_context.context.bin_desc.size_bytes = 0;
2131	}
2132
2133	return 0;
2134}
2135
2136static int psp_securedisplay_terminate(struct psp_context *psp)
2137{
2138	int ret;
 
2139
2140	/*
2141	 * TODO:bypass the terminate in sriov for now
2142	 */
2143	if (amdgpu_sriov_vf(psp->adev))
2144		return 0;
2145
2146	if (!psp->securedisplay_context.context.initialized)
2147		return 0;
 
2148
2149	ret = psp_ta_unload(psp, &psp->securedisplay_context.context);
2150
2151	psp->securedisplay_context.context.initialized = false;
 
 
2152
2153	return ret;
2154}
2155
2156int psp_securedisplay_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
2157{
2158	int ret;
 
 
 
 
2159
2160	if (!psp->securedisplay_context.context.initialized)
2161		return -EINVAL;
2162
2163	if (ta_cmd_id != TA_SECUREDISPLAY_COMMAND__QUERY_TA &&
2164	    ta_cmd_id != TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC)
2165		return -EINVAL;
2166
2167	ret = psp_ta_invoke(psp, ta_cmd_id, &psp->securedisplay_context.context);
 
 
 
 
2168
2169	return ret;
2170}
2171/* SECUREDISPLAY end */
2172
2173int amdgpu_psp_wait_for_bootloader(struct amdgpu_device *adev)
2174{
2175	struct psp_context *psp = &adev->psp;
2176	int ret = 0;
2177
2178	if (!amdgpu_sriov_vf(adev) && psp->funcs && psp->funcs->wait_for_bootloader != NULL)
2179		ret = psp->funcs->wait_for_bootloader(psp);
2180
2181	return ret;
2182}
 
 
2183
2184bool amdgpu_psp_get_ras_capability(struct psp_context *psp)
2185{
2186	if (psp->funcs &&
2187	    psp->funcs->get_ras_capability) {
2188		return psp->funcs->get_ras_capability(psp);
2189	} else {
2190		return false;
2191	}
2192}
 
2193
2194static int psp_hw_start(struct psp_context *psp)
2195{
2196	struct amdgpu_device *adev = psp->adev;
2197	int ret;
2198
2199	if (!amdgpu_sriov_vf(adev)) {
2200		if ((is_psp_fw_valid(psp->kdb)) &&
2201		    (psp->funcs->bootloader_load_kdb != NULL)) {
2202			ret = psp_bootloader_load_kdb(psp);
2203			if (ret) {
2204				dev_err(adev->dev, "PSP load kdb failed!\n");
2205				return ret;
2206			}
2207		}
2208
2209		if ((is_psp_fw_valid(psp->spl)) &&
2210		    (psp->funcs->bootloader_load_spl != NULL)) {
2211			ret = psp_bootloader_load_spl(psp);
2212			if (ret) {
2213				dev_err(adev->dev, "PSP load spl failed!\n");
2214				return ret;
2215			}
2216		}
2217
2218		if ((is_psp_fw_valid(psp->sys)) &&
2219		    (psp->funcs->bootloader_load_sysdrv != NULL)) {
2220			ret = psp_bootloader_load_sysdrv(psp);
2221			if (ret) {
2222				dev_err(adev->dev, "PSP load sys drv failed!\n");
2223				return ret;
2224			}
2225		}
2226
2227		if ((is_psp_fw_valid(psp->soc_drv)) &&
2228		    (psp->funcs->bootloader_load_soc_drv != NULL)) {
2229			ret = psp_bootloader_load_soc_drv(psp);
2230			if (ret) {
2231				dev_err(adev->dev, "PSP load soc drv failed!\n");
2232				return ret;
2233			}
2234		}
2235
2236		if ((is_psp_fw_valid(psp->intf_drv)) &&
2237		    (psp->funcs->bootloader_load_intf_drv != NULL)) {
2238			ret = psp_bootloader_load_intf_drv(psp);
2239			if (ret) {
2240				dev_err(adev->dev, "PSP load intf drv failed!\n");
2241				return ret;
2242			}
2243		}
2244
2245		if ((is_psp_fw_valid(psp->dbg_drv)) &&
2246		    (psp->funcs->bootloader_load_dbg_drv != NULL)) {
2247			ret = psp_bootloader_load_dbg_drv(psp);
2248			if (ret) {
2249				dev_err(adev->dev, "PSP load dbg drv failed!\n");
2250				return ret;
2251			}
2252		}
2253
2254		if ((is_psp_fw_valid(psp->ras_drv)) &&
2255		    (psp->funcs->bootloader_load_ras_drv != NULL)) {
2256			ret = psp_bootloader_load_ras_drv(psp);
2257			if (ret) {
2258				dev_err(adev->dev, "PSP load ras_drv failed!\n");
2259				return ret;
2260			}
2261		}
2262
2263		if ((is_psp_fw_valid(psp->sos)) &&
2264		    (psp->funcs->bootloader_load_sos != NULL)) {
2265			ret = psp_bootloader_load_sos(psp);
2266			if (ret) {
2267				dev_err(adev->dev, "PSP load sos failed!\n");
2268				return ret;
2269			}
2270		}
2271	}
2272
2273	ret = psp_ring_create(psp, PSP_RING_TYPE__KM);
2274	if (ret) {
2275		dev_err(adev->dev, "PSP create ring failed!\n");
2276		return ret;
2277	}
2278
2279	if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev))
2280		goto skip_pin_bo;
 
 
 
2281
2282	if (!psp->boot_time_tmr || psp->autoload_supported) {
2283		ret = psp_tmr_init(psp);
2284		if (ret) {
2285			dev_err(adev->dev, "PSP tmr init failed!\n");
2286			return ret;
2287		}
2288	}
2289
2290skip_pin_bo:
2291	/*
2292	 * For ASICs with DF Cstate management centralized
2293	 * to PMFW, TMR setup should be performed after PMFW
2294	 * loaded and before other non-psp firmware loaded.
2295	 */
2296	if (psp->pmfw_centralized_cstate_management) {
2297		ret = psp_load_smu_fw(psp);
2298		if (ret)
2299			return ret;
2300	}
2301
2302	if (!psp->boot_time_tmr || !psp->autoload_supported) {
2303		ret = psp_tmr_load(psp);
2304		if (ret) {
2305			dev_err(adev->dev, "PSP load tmr failed!\n");
2306			return ret;
2307		}
2308	}
2309
2310	return 0;
2311}
2312
2313static int psp_get_fw_type(struct amdgpu_firmware_info *ucode,
2314			   enum psp_gfx_fw_type *type)
2315{
2316	switch (ucode->ucode_id) {
2317	case AMDGPU_UCODE_ID_CAP:
2318		*type = GFX_FW_TYPE_CAP;
2319		break;
2320	case AMDGPU_UCODE_ID_SDMA0:
2321		*type = GFX_FW_TYPE_SDMA0;
2322		break;
2323	case AMDGPU_UCODE_ID_SDMA1:
2324		*type = GFX_FW_TYPE_SDMA1;
2325		break;
2326	case AMDGPU_UCODE_ID_SDMA2:
2327		*type = GFX_FW_TYPE_SDMA2;
2328		break;
2329	case AMDGPU_UCODE_ID_SDMA3:
2330		*type = GFX_FW_TYPE_SDMA3;
2331		break;
2332	case AMDGPU_UCODE_ID_SDMA4:
2333		*type = GFX_FW_TYPE_SDMA4;
2334		break;
2335	case AMDGPU_UCODE_ID_SDMA5:
2336		*type = GFX_FW_TYPE_SDMA5;
2337		break;
2338	case AMDGPU_UCODE_ID_SDMA6:
2339		*type = GFX_FW_TYPE_SDMA6;
2340		break;
2341	case AMDGPU_UCODE_ID_SDMA7:
2342		*type = GFX_FW_TYPE_SDMA7;
2343		break;
2344	case AMDGPU_UCODE_ID_CP_MES:
2345		*type = GFX_FW_TYPE_CP_MES;
2346		break;
2347	case AMDGPU_UCODE_ID_CP_MES_DATA:
2348		*type = GFX_FW_TYPE_MES_STACK;
2349		break;
2350	case AMDGPU_UCODE_ID_CP_MES1:
2351		*type = GFX_FW_TYPE_CP_MES_KIQ;
2352		break;
2353	case AMDGPU_UCODE_ID_CP_MES1_DATA:
2354		*type = GFX_FW_TYPE_MES_KIQ_STACK;
2355		break;
2356	case AMDGPU_UCODE_ID_CP_CE:
2357		*type = GFX_FW_TYPE_CP_CE;
2358		break;
2359	case AMDGPU_UCODE_ID_CP_PFP:
2360		*type = GFX_FW_TYPE_CP_PFP;
2361		break;
2362	case AMDGPU_UCODE_ID_CP_ME:
2363		*type = GFX_FW_TYPE_CP_ME;
2364		break;
2365	case AMDGPU_UCODE_ID_CP_MEC1:
2366		*type = GFX_FW_TYPE_CP_MEC;
2367		break;
2368	case AMDGPU_UCODE_ID_CP_MEC1_JT:
2369		*type = GFX_FW_TYPE_CP_MEC_ME1;
2370		break;
2371	case AMDGPU_UCODE_ID_CP_MEC2:
2372		*type = GFX_FW_TYPE_CP_MEC;
2373		break;
2374	case AMDGPU_UCODE_ID_CP_MEC2_JT:
2375		*type = GFX_FW_TYPE_CP_MEC_ME2;
2376		break;
2377	case AMDGPU_UCODE_ID_RLC_P:
2378		*type = GFX_FW_TYPE_RLC_P;
2379		break;
2380	case AMDGPU_UCODE_ID_RLC_V:
2381		*type = GFX_FW_TYPE_RLC_V;
2382		break;
2383	case AMDGPU_UCODE_ID_RLC_G:
2384		*type = GFX_FW_TYPE_RLC_G;
2385		break;
2386	case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL:
2387		*type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_CNTL;
2388		break;
2389	case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM:
2390		*type = GFX_FW_TYPE_RLC_RESTORE_LIST_GPM_MEM;
2391		break;
2392	case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM:
2393		*type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_MEM;
2394		break;
2395	case AMDGPU_UCODE_ID_RLC_IRAM:
2396		*type = GFX_FW_TYPE_RLC_IRAM;
2397		break;
2398	case AMDGPU_UCODE_ID_RLC_DRAM:
2399		*type = GFX_FW_TYPE_RLC_DRAM_BOOT;
2400		break;
2401	case AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS:
2402		*type = GFX_FW_TYPE_GLOBAL_TAP_DELAYS;
2403		break;
2404	case AMDGPU_UCODE_ID_SE0_TAP_DELAYS:
2405		*type = GFX_FW_TYPE_SE0_TAP_DELAYS;
2406		break;
2407	case AMDGPU_UCODE_ID_SE1_TAP_DELAYS:
2408		*type = GFX_FW_TYPE_SE1_TAP_DELAYS;
2409		break;
2410	case AMDGPU_UCODE_ID_SE2_TAP_DELAYS:
2411		*type = GFX_FW_TYPE_SE2_TAP_DELAYS;
2412		break;
2413	case AMDGPU_UCODE_ID_SE3_TAP_DELAYS:
2414		*type = GFX_FW_TYPE_SE3_TAP_DELAYS;
2415		break;
2416	case AMDGPU_UCODE_ID_SMC:
2417		*type = GFX_FW_TYPE_SMU;
2418		break;
2419	case AMDGPU_UCODE_ID_PPTABLE:
2420		*type = GFX_FW_TYPE_PPTABLE;
2421		break;
2422	case AMDGPU_UCODE_ID_UVD:
2423		*type = GFX_FW_TYPE_UVD;
2424		break;
2425	case AMDGPU_UCODE_ID_UVD1:
2426		*type = GFX_FW_TYPE_UVD1;
2427		break;
2428	case AMDGPU_UCODE_ID_VCE:
2429		*type = GFX_FW_TYPE_VCE;
2430		break;
2431	case AMDGPU_UCODE_ID_VCN:
2432		*type = GFX_FW_TYPE_VCN;
2433		break;
2434	case AMDGPU_UCODE_ID_VCN1:
2435		*type = GFX_FW_TYPE_VCN1;
2436		break;
2437	case AMDGPU_UCODE_ID_DMCU_ERAM:
2438		*type = GFX_FW_TYPE_DMCU_ERAM;
2439		break;
2440	case AMDGPU_UCODE_ID_DMCU_INTV:
2441		*type = GFX_FW_TYPE_DMCU_ISR;
2442		break;
2443	case AMDGPU_UCODE_ID_VCN0_RAM:
2444		*type = GFX_FW_TYPE_VCN0_RAM;
2445		break;
2446	case AMDGPU_UCODE_ID_VCN1_RAM:
2447		*type = GFX_FW_TYPE_VCN1_RAM;
2448		break;
2449	case AMDGPU_UCODE_ID_DMCUB:
2450		*type = GFX_FW_TYPE_DMUB;
2451		break;
2452	case AMDGPU_UCODE_ID_SDMA_UCODE_TH0:
2453		*type = GFX_FW_TYPE_SDMA_UCODE_TH0;
2454		break;
2455	case AMDGPU_UCODE_ID_SDMA_UCODE_TH1:
2456		*type = GFX_FW_TYPE_SDMA_UCODE_TH1;
2457		break;
2458	case AMDGPU_UCODE_ID_IMU_I:
2459		*type = GFX_FW_TYPE_IMU_I;
2460		break;
2461	case AMDGPU_UCODE_ID_IMU_D:
2462		*type = GFX_FW_TYPE_IMU_D;
2463		break;
2464	case AMDGPU_UCODE_ID_CP_RS64_PFP:
2465		*type = GFX_FW_TYPE_RS64_PFP;
2466		break;
2467	case AMDGPU_UCODE_ID_CP_RS64_ME:
2468		*type = GFX_FW_TYPE_RS64_ME;
2469		break;
2470	case AMDGPU_UCODE_ID_CP_RS64_MEC:
2471		*type = GFX_FW_TYPE_RS64_MEC;
2472		break;
2473	case AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK:
2474		*type = GFX_FW_TYPE_RS64_PFP_P0_STACK;
2475		break;
2476	case AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK:
2477		*type = GFX_FW_TYPE_RS64_PFP_P1_STACK;
2478		break;
2479	case AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK:
2480		*type = GFX_FW_TYPE_RS64_ME_P0_STACK;
2481		break;
2482	case AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK:
2483		*type = GFX_FW_TYPE_RS64_ME_P1_STACK;
2484		break;
2485	case AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK:
2486		*type = GFX_FW_TYPE_RS64_MEC_P0_STACK;
2487		break;
2488	case AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK:
2489		*type = GFX_FW_TYPE_RS64_MEC_P1_STACK;
2490		break;
2491	case AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK:
2492		*type = GFX_FW_TYPE_RS64_MEC_P2_STACK;
2493		break;
2494	case AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK:
2495		*type = GFX_FW_TYPE_RS64_MEC_P3_STACK;
2496		break;
2497	case AMDGPU_UCODE_ID_VPE_CTX:
2498		*type = GFX_FW_TYPE_VPEC_FW1;
2499		break;
2500	case AMDGPU_UCODE_ID_VPE_CTL:
2501		*type = GFX_FW_TYPE_VPEC_FW2;
2502		break;
2503	case AMDGPU_UCODE_ID_VPE:
2504		*type = GFX_FW_TYPE_VPE;
2505		break;
2506	case AMDGPU_UCODE_ID_UMSCH_MM_UCODE:
2507		*type = GFX_FW_TYPE_UMSCH_UCODE;
2508		break;
2509	case AMDGPU_UCODE_ID_UMSCH_MM_DATA:
2510		*type = GFX_FW_TYPE_UMSCH_DATA;
2511		break;
2512	case AMDGPU_UCODE_ID_UMSCH_MM_CMD_BUFFER:
2513		*type = GFX_FW_TYPE_UMSCH_CMD_BUFFER;
2514		break;
2515	case AMDGPU_UCODE_ID_P2S_TABLE:
2516		*type = GFX_FW_TYPE_P2S_TABLE;
2517		break;
2518	case AMDGPU_UCODE_ID_JPEG_RAM:
2519		*type = GFX_FW_TYPE_JPEG_RAM;
2520		break;
2521	case AMDGPU_UCODE_ID_MAXIMUM:
2522	default:
2523		return -EINVAL;
2524	}
2525
2526	return 0;
2527}
2528
2529static void psp_print_fw_hdr(struct psp_context *psp,
2530			     struct amdgpu_firmware_info *ucode)
2531{
2532	struct amdgpu_device *adev = psp->adev;
2533	struct common_firmware_header *hdr;
2534
2535	switch (ucode->ucode_id) {
2536	case AMDGPU_UCODE_ID_SDMA0:
2537	case AMDGPU_UCODE_ID_SDMA1:
2538	case AMDGPU_UCODE_ID_SDMA2:
2539	case AMDGPU_UCODE_ID_SDMA3:
2540	case AMDGPU_UCODE_ID_SDMA4:
2541	case AMDGPU_UCODE_ID_SDMA5:
2542	case AMDGPU_UCODE_ID_SDMA6:
2543	case AMDGPU_UCODE_ID_SDMA7:
2544		hdr = (struct common_firmware_header *)
2545			adev->sdma.instance[ucode->ucode_id - AMDGPU_UCODE_ID_SDMA0].fw->data;
2546		amdgpu_ucode_print_sdma_hdr(hdr);
2547		break;
2548	case AMDGPU_UCODE_ID_CP_CE:
2549		hdr = (struct common_firmware_header *)adev->gfx.ce_fw->data;
2550		amdgpu_ucode_print_gfx_hdr(hdr);
2551		break;
2552	case AMDGPU_UCODE_ID_CP_PFP:
2553		hdr = (struct common_firmware_header *)adev->gfx.pfp_fw->data;
2554		amdgpu_ucode_print_gfx_hdr(hdr);
2555		break;
2556	case AMDGPU_UCODE_ID_CP_ME:
2557		hdr = (struct common_firmware_header *)adev->gfx.me_fw->data;
2558		amdgpu_ucode_print_gfx_hdr(hdr);
2559		break;
2560	case AMDGPU_UCODE_ID_CP_MEC1:
2561		hdr = (struct common_firmware_header *)adev->gfx.mec_fw->data;
2562		amdgpu_ucode_print_gfx_hdr(hdr);
2563		break;
2564	case AMDGPU_UCODE_ID_RLC_G:
2565		hdr = (struct common_firmware_header *)adev->gfx.rlc_fw->data;
2566		amdgpu_ucode_print_rlc_hdr(hdr);
2567		break;
2568	case AMDGPU_UCODE_ID_SMC:
2569		hdr = (struct common_firmware_header *)adev->pm.fw->data;
2570		amdgpu_ucode_print_smc_hdr(hdr);
2571		break;
2572	default:
2573		break;
2574	}
2575}
2576
2577static int psp_prep_load_ip_fw_cmd_buf(struct psp_context *psp,
2578				       struct amdgpu_firmware_info *ucode,
2579				       struct psp_gfx_cmd_resp *cmd)
2580{
2581	int ret;
2582	uint64_t fw_mem_mc_addr = ucode->mc_addr;
2583
 
 
2584	cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW;
2585	cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(fw_mem_mc_addr);
2586	cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(fw_mem_mc_addr);
2587	cmd->cmd.cmd_load_ip_fw.fw_size = ucode->ucode_size;
2588
2589	ret = psp_get_fw_type(ucode, &cmd->cmd.cmd_load_ip_fw.fw_type);
2590	if (ret)
2591		dev_err(psp->adev->dev, "Unknown firmware type\n");
2592
2593	return ret;
2594}
2595
2596int psp_execute_ip_fw_load(struct psp_context *psp,
2597			   struct amdgpu_firmware_info *ucode)
2598{
2599	int ret = 0;
2600	struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
2601
2602	ret = psp_prep_load_ip_fw_cmd_buf(psp, ucode, cmd);
2603	if (!ret) {
2604		ret = psp_cmd_submit_buf(psp, ucode, cmd,
2605					 psp->fence_buf_mc_addr);
2606	}
2607
2608	release_psp_cmd_buf(psp);
2609
2610	return ret;
2611}
2612
2613static int psp_load_p2s_table(struct psp_context *psp)
2614{
2615	int ret;
2616	struct amdgpu_device *adev = psp->adev;
2617	struct amdgpu_firmware_info *ucode =
2618		&adev->firmware.ucode[AMDGPU_UCODE_ID_P2S_TABLE];
2619
2620	if (adev->in_runpm && (adev->pm.rpm_mode == AMDGPU_RUNPM_BACO))
2621		return 0;
2622
2623	if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6)) {
2624		uint32_t supp_vers = adev->flags & AMD_IS_APU ? 0x0036013D :
2625								0x0036003C;
2626		if (psp->sos.fw_version < supp_vers)
2627			return 0;
2628	}
2629
2630	if (!ucode->fw || amdgpu_sriov_vf(psp->adev))
2631		return 0;
2632
2633	ret = psp_execute_ip_fw_load(psp, ucode);
 
2634
2635	return ret;
2636}
2637
2638static int psp_load_smu_fw(struct psp_context *psp)
2639{
2640	int ret;
2641	struct amdgpu_device *adev = psp->adev;
2642	struct amdgpu_firmware_info *ucode =
2643			&adev->firmware.ucode[AMDGPU_UCODE_ID_SMC];
2644	struct amdgpu_ras *ras = psp->ras_context.ras;
2645
2646	/*
2647	 * Skip SMU FW reloading in case of using BACO for runpm only,
2648	 * as SMU is always alive.
2649	 */
2650	if (adev->in_runpm && (adev->pm.rpm_mode == AMDGPU_RUNPM_BACO))
2651		return 0;
2652
2653	if (!ucode->fw || amdgpu_sriov_vf(psp->adev))
2654		return 0;
2655
2656	if ((amdgpu_in_reset(adev) && ras && adev->ras_enabled &&
2657	     (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 4) ||
2658	      amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 2)))) {
2659		ret = amdgpu_dpm_set_mp1_state(adev, PP_MP1_STATE_UNLOAD);
2660		if (ret)
2661			dev_err(adev->dev, "Failed to set MP1 state prepare for reload\n");
 
2662	}
2663
2664	ret = psp_execute_ip_fw_load(psp, ucode);
2665
2666	if (ret)
2667		dev_err(adev->dev, "PSP load smu failed!\n");
2668
2669	return ret;
2670}
2671
2672static bool fw_load_skip_check(struct psp_context *psp,
2673			       struct amdgpu_firmware_info *ucode)
2674{
2675	if (!ucode->fw || !ucode->ucode_size)
2676		return true;
2677
2678	if (ucode->ucode_id == AMDGPU_UCODE_ID_P2S_TABLE)
2679		return true;
2680
2681	if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC &&
2682	    (psp_smu_reload_quirk(psp) ||
2683	     psp->autoload_supported ||
2684	     psp->pmfw_centralized_cstate_management))
2685		return true;
2686
2687	if (amdgpu_sriov_vf(psp->adev) &&
2688	    amdgpu_virt_fw_load_skip_check(psp->adev, ucode->ucode_id))
 
 
 
 
 
 
 
 
 
 
 
 
 
2689		return true;
2690
2691	if (psp->autoload_supported &&
2692	    (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC1_JT ||
2693	     ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT))
2694		/* skip mec JT when autoload is enabled */
2695		return true;
2696
2697	return false;
2698}
2699
2700int psp_load_fw_list(struct psp_context *psp,
2701		     struct amdgpu_firmware_info **ucode_list, int ucode_count)
2702{
2703	int ret = 0, i;
2704	struct amdgpu_firmware_info *ucode;
2705
2706	for (i = 0; i < ucode_count; ++i) {
2707		ucode = ucode_list[i];
2708		psp_print_fw_hdr(psp, ucode);
2709		ret = psp_execute_ip_fw_load(psp, ucode);
2710		if (ret)
2711			return ret;
2712	}
2713	return ret;
2714}
2715
2716static int psp_load_non_psp_fw(struct psp_context *psp)
2717{
2718	int i, ret;
2719	struct amdgpu_firmware_info *ucode;
2720	struct amdgpu_device *adev = psp->adev;
2721
2722	if (psp->autoload_supported &&
2723	    !psp->pmfw_centralized_cstate_management) {
2724		ret = psp_load_smu_fw(psp);
2725		if (ret)
2726			return ret;
2727	}
2728
2729	/* Load P2S table first if it's available */
2730	psp_load_p2s_table(psp);
2731
2732	for (i = 0; i < adev->firmware.max_ucodes; i++) {
2733		ucode = &adev->firmware.ucode[i];
2734
2735		if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC &&
2736		    !fw_load_skip_check(psp, ucode)) {
2737			ret = psp_load_smu_fw(psp);
2738			if (ret)
2739				return ret;
2740			continue;
2741		}
2742
2743		if (fw_load_skip_check(psp, ucode))
2744			continue;
2745
2746		if (psp->autoload_supported &&
2747		    (amdgpu_ip_version(adev, MP0_HWIP, 0) ==
2748			     IP_VERSION(11, 0, 7) ||
2749		     amdgpu_ip_version(adev, MP0_HWIP, 0) ==
2750			     IP_VERSION(11, 0, 11) ||
2751		     amdgpu_ip_version(adev, MP0_HWIP, 0) ==
2752			     IP_VERSION(11, 0, 12)) &&
2753		    (ucode->ucode_id == AMDGPU_UCODE_ID_SDMA1 ||
2754		     ucode->ucode_id == AMDGPU_UCODE_ID_SDMA2 ||
2755		     ucode->ucode_id == AMDGPU_UCODE_ID_SDMA3))
2756			/* PSP only receive one SDMA fw for sienna_cichlid,
2757			 * as all four sdma fw are same
2758			 */
2759			continue;
2760
2761		psp_print_fw_hdr(psp, ucode);
2762
2763		ret = psp_execute_ip_fw_load(psp, ucode);
2764		if (ret)
2765			return ret;
2766
2767		/* Start rlc autoload after psp recieved all the gfx firmware */
2768		if (psp->autoload_supported && ucode->ucode_id == (amdgpu_sriov_vf(adev) ?
2769		    adev->virt.autoload_ucode_id : AMDGPU_UCODE_ID_RLC_G)) {
2770			ret = psp_rlc_autoload_start(psp);
2771			if (ret) {
2772				dev_err(adev->dev, "Failed to start rlc autoload\n");
2773				return ret;
2774			}
2775		}
2776	}
2777
2778	return 0;
2779}
2780
2781static int psp_load_fw(struct amdgpu_device *adev)
2782{
2783	int ret;
2784	struct psp_context *psp = &adev->psp;
2785
2786	if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) {
2787		/* should not destroy ring, only stop */
2788		psp_ring_stop(psp, PSP_RING_TYPE__KM);
2789	} else {
2790		memset(psp->fence_buf, 0, PSP_FENCE_BUFFER_SIZE);
2791
2792		ret = psp_ring_init(psp, PSP_RING_TYPE__KM);
2793		if (ret) {
2794			dev_err(adev->dev, "PSP ring init failed!\n");
2795			goto failed;
2796		}
2797	}
2798
2799	ret = psp_hw_start(psp);
 
 
 
 
 
 
 
 
2800	if (ret)
2801		goto failed;
2802
2803	ret = psp_load_non_psp_fw(psp);
 
 
 
 
2804	if (ret)
2805		goto failed1;
2806
2807	ret = psp_asd_initialize(psp);
2808	if (ret) {
2809		dev_err(adev->dev, "PSP load asd failed!\n");
2810		goto failed1;
2811	}
 
 
 
2812
2813	ret = psp_rl_load(adev);
2814	if (ret) {
2815		dev_err(adev->dev, "PSP load RL failed!\n");
2816		goto failed1;
2817	}
2818
2819	if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) {
2820		if (adev->gmc.xgmi.num_physical_nodes > 1) {
2821			ret = psp_xgmi_initialize(psp, false, true);
2822			/* Warning the XGMI seesion initialize failure
2823			 * Instead of stop driver initialization
2824			 */
2825			if (ret)
2826				dev_err(psp->adev->dev,
2827					"XGMI: Failed to initialize XGMI session\n");
2828		}
 
 
 
2829	}
2830
2831	if (psp->ta_fw) {
2832		ret = psp_ras_initialize(psp);
2833		if (ret)
2834			dev_err(psp->adev->dev,
2835				"RAS: Failed to initialize RAS\n");
2836
2837		ret = psp_hdcp_initialize(psp);
2838		if (ret)
2839			dev_err(psp->adev->dev,
2840				"HDCP: Failed to initialize HDCP\n");
2841
2842		ret = psp_dtm_initialize(psp);
2843		if (ret)
2844			dev_err(psp->adev->dev,
2845				"DTM: Failed to initialize DTM\n");
2846
2847		ret = psp_rap_initialize(psp);
2848		if (ret)
2849			dev_err(psp->adev->dev,
2850				"RAP: Failed to initialize RAP\n");
2851
2852		ret = psp_securedisplay_initialize(psp);
2853		if (ret)
2854			dev_err(psp->adev->dev,
2855				"SECUREDISPLAY: Failed to initialize SECUREDISPLAY\n");
2856	}
2857
2858	return 0;
2859
2860failed1:
2861	psp_free_shared_bufs(psp);
2862failed:
2863	/*
2864	 * all cleanup jobs (xgmi terminate, ras terminate,
2865	 * ring destroy, cmd/fence/fw buffers destory,
2866	 * psp->cmd destory) are delayed to psp_hw_fini
2867	 */
2868	psp_ring_destroy(psp, PSP_RING_TYPE__KM);
2869	return ret;
2870}
2871
2872static int psp_hw_init(void *handle)
2873{
2874	int ret;
2875	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2876
2877	mutex_lock(&adev->firmware.mutex);
2878	/*
2879	 * This sequence is just used on hw_init only once, no need on
2880	 * resume.
2881	 */
2882	ret = amdgpu_ucode_init_bo(adev);
2883	if (ret)
2884		goto failed;
2885
2886	ret = psp_load_fw(adev);
2887	if (ret) {
2888		dev_err(adev->dev, "PSP firmware loading failed\n");
2889		goto failed;
2890	}
2891
2892	mutex_unlock(&adev->firmware.mutex);
2893	return 0;
2894
2895failed:
2896	adev->firmware.load_type = AMDGPU_FW_LOAD_DIRECT;
2897	mutex_unlock(&adev->firmware.mutex);
2898	return -EINVAL;
2899}
2900
2901static int psp_hw_fini(void *handle)
2902{
2903	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2904	struct psp_context *psp = &adev->psp;
 
2905
2906	if (psp->ta_fw) {
2907		psp_ras_terminate(psp);
2908		psp_securedisplay_terminate(psp);
2909		psp_rap_terminate(psp);
2910		psp_dtm_terminate(psp);
2911		psp_hdcp_terminate(psp);
 
2912
2913		if (adev->gmc.xgmi.num_physical_nodes > 1)
2914			psp_xgmi_terminate(psp);
 
 
 
2915	}
2916
2917	psp_asd_terminate(psp);
2918	psp_tmr_terminate(psp);
2919
2920	psp_ring_destroy(psp, PSP_RING_TYPE__KM);
2921
 
 
 
 
 
 
 
 
 
 
2922	return 0;
2923}
2924
2925static int psp_suspend(void *handle)
2926{
2927	int ret = 0;
2928	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2929	struct psp_context *psp = &adev->psp;
2930
2931	if (adev->gmc.xgmi.num_physical_nodes > 1 &&
2932	    psp->xgmi_context.context.initialized) {
2933		ret = psp_xgmi_terminate(psp);
2934		if (ret) {
2935			dev_err(adev->dev, "Failed to terminate xgmi ta\n");
2936			goto out;
2937		}
2938	}
2939
2940	if (psp->ta_fw) {
2941		ret = psp_ras_terminate(psp);
2942		if (ret) {
2943			dev_err(adev->dev, "Failed to terminate ras ta\n");
2944			goto out;
2945		}
2946		ret = psp_hdcp_terminate(psp);
2947		if (ret) {
2948			dev_err(adev->dev, "Failed to terminate hdcp ta\n");
2949			goto out;
2950		}
2951		ret = psp_dtm_terminate(psp);
2952		if (ret) {
2953			dev_err(adev->dev, "Failed to terminate dtm ta\n");
2954			goto out;
2955		}
2956		ret = psp_rap_terminate(psp);
2957		if (ret) {
2958			dev_err(adev->dev, "Failed to terminate rap ta\n");
2959			goto out;
2960		}
2961		ret = psp_securedisplay_terminate(psp);
2962		if (ret) {
2963			dev_err(adev->dev, "Failed to terminate securedisplay ta\n");
2964			goto out;
2965		}
2966	}
2967
2968	ret = psp_asd_terminate(psp);
2969	if (ret) {
2970		dev_err(adev->dev, "Failed to terminate asd\n");
2971		goto out;
2972	}
2973
2974	ret = psp_tmr_terminate(psp);
2975	if (ret) {
2976		dev_err(adev->dev, "Failed to terminate tmr\n");
2977		goto out;
2978	}
2979
2980	ret = psp_ring_stop(psp, PSP_RING_TYPE__KM);
2981	if (ret)
2982		dev_err(adev->dev, "PSP ring stop failed\n");
 
 
2983
2984out:
2985	return ret;
2986}
2987
2988static int psp_resume(void *handle)
2989{
2990	int ret;
2991	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2992	struct psp_context *psp = &adev->psp;
2993
2994	dev_info(adev->dev, "PSP is resuming...\n");
2995
2996	if (psp->mem_train_ctx.enable_mem_training) {
2997		ret = psp_mem_training(psp, PSP_MEM_TRAIN_RESUME);
2998		if (ret) {
2999			dev_err(adev->dev, "Failed to process memory training!\n");
3000			return ret;
3001		}
3002	}
3003
3004	mutex_lock(&adev->firmware.mutex);
3005
3006	ret = psp_hw_start(psp);
3007	if (ret)
3008		goto failed;
3009
3010	ret = psp_load_non_psp_fw(psp);
3011	if (ret)
3012		goto failed;
3013
3014	ret = psp_asd_initialize(psp);
3015	if (ret) {
3016		dev_err(adev->dev, "PSP load asd failed!\n");
3017		goto failed;
3018	}
3019
3020	ret = psp_rl_load(adev);
3021	if (ret) {
3022		dev_err(adev->dev, "PSP load RL failed!\n");
3023		goto failed;
3024	}
3025
3026	if (adev->gmc.xgmi.num_physical_nodes > 1) {
3027		ret = psp_xgmi_initialize(psp, false, true);
3028		/* Warning the XGMI seesion initialize failure
3029		 * Instead of stop driver initialization
3030		 */
3031		if (ret)
3032			dev_err(psp->adev->dev,
3033				"XGMI: Failed to initialize XGMI session\n");
3034	}
3035
3036	if (psp->ta_fw) {
3037		ret = psp_ras_initialize(psp);
3038		if (ret)
3039			dev_err(psp->adev->dev,
3040				"RAS: Failed to initialize RAS\n");
3041
3042		ret = psp_hdcp_initialize(psp);
3043		if (ret)
3044			dev_err(psp->adev->dev,
3045				"HDCP: Failed to initialize HDCP\n");
3046
3047		ret = psp_dtm_initialize(psp);
3048		if (ret)
3049			dev_err(psp->adev->dev,
3050				"DTM: Failed to initialize DTM\n");
3051
3052		ret = psp_rap_initialize(psp);
3053		if (ret)
3054			dev_err(psp->adev->dev,
3055				"RAP: Failed to initialize RAP\n");
3056
3057		ret = psp_securedisplay_initialize(psp);
3058		if (ret)
3059			dev_err(psp->adev->dev,
3060				"SECUREDISPLAY: Failed to initialize SECUREDISPLAY\n");
3061	}
3062
3063	mutex_unlock(&adev->firmware.mutex);
3064
3065	return 0;
3066
3067failed:
3068	dev_err(adev->dev, "PSP resume failed\n");
3069	mutex_unlock(&adev->firmware.mutex);
3070	return ret;
3071}
3072
3073int psp_gpu_reset(struct amdgpu_device *adev)
3074{
3075	int ret;
3076
3077	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
3078		return 0;
3079
3080	mutex_lock(&adev->psp.mutex);
3081	ret = psp_mode1_reset(&adev->psp);
3082	mutex_unlock(&adev->psp.mutex);
3083
3084	return ret;
3085}
3086
3087int psp_rlc_autoload_start(struct psp_context *psp)
3088{
3089	int ret;
3090	struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
 
 
 
 
3091
3092	cmd->cmd_id = GFX_CMD_ID_AUTOLOAD_RLC;
3093
3094	ret = psp_cmd_submit_buf(psp, NULL, cmd,
3095				 psp->fence_buf_mc_addr);
 
 
 
3096
3097	release_psp_cmd_buf(psp);
 
 
 
3098
3099	return ret;
 
 
 
 
 
3100}
3101
3102int psp_ring_cmd_submit(struct psp_context *psp,
3103			uint64_t cmd_buf_mc_addr,
3104			uint64_t fence_mc_addr,
3105			int index)
3106{
3107	unsigned int psp_write_ptr_reg = 0;
3108	struct psp_gfx_rb_frame *write_frame;
3109	struct psp_ring *ring = &psp->km_ring;
3110	struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem;
3111	struct psp_gfx_rb_frame *ring_buffer_end = ring_buffer_start +
3112		ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1;
3113	struct amdgpu_device *adev = psp->adev;
3114	uint32_t ring_size_dw = ring->ring_size / 4;
3115	uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4;
3116
3117	/* KM (GPCOM) prepare write pointer */
3118	psp_write_ptr_reg = psp_ring_get_wptr(psp);
3119
3120	/* Update KM RB frame pointer to new frame */
3121	/* write_frame ptr increments by size of rb_frame in bytes */
3122	/* psp_write_ptr_reg increments by size of rb_frame in DWORDs */
3123	if ((psp_write_ptr_reg % ring_size_dw) == 0)
3124		write_frame = ring_buffer_start;
3125	else
3126		write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw);
3127	/* Check invalid write_frame ptr address */
3128	if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) {
3129		dev_err(adev->dev,
3130			"ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n",
3131			ring_buffer_start, ring_buffer_end, write_frame);
3132		dev_err(adev->dev,
3133			"write_frame is pointing to address out of bounds\n");
3134		return -EINVAL;
3135	}
3136
3137	/* Initialize KM RB frame */
3138	memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame));
3139
3140	/* Update KM RB frame */
3141	write_frame->cmd_buf_addr_hi = upper_32_bits(cmd_buf_mc_addr);
3142	write_frame->cmd_buf_addr_lo = lower_32_bits(cmd_buf_mc_addr);
3143	write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr);
3144	write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr);
3145	write_frame->fence_value = index;
3146	amdgpu_device_flush_hdp(adev, NULL);
3147
3148	/* Update the write Pointer in DWORDs */
3149	psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw;
3150	psp_ring_set_wptr(psp, psp_write_ptr_reg);
3151	return 0;
3152}
3153
3154int psp_init_asd_microcode(struct psp_context *psp, const char *chip_name)
 
3155{
3156	struct amdgpu_device *adev = psp->adev;
3157	char fw_name[PSP_FW_NAME_LEN];
3158	const struct psp_firmware_header_v1_0 *asd_hdr;
3159	int err = 0;
3160
 
 
 
 
 
3161	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_asd.bin", chip_name);
3162	err = amdgpu_ucode_request(adev, &adev->psp.asd_fw, fw_name);
3163	if (err)
3164		goto out;
3165
3166	asd_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data;
3167	adev->psp.asd_context.bin_desc.fw_version = le32_to_cpu(asd_hdr->header.ucode_version);
3168	adev->psp.asd_context.bin_desc.feature_version = le32_to_cpu(asd_hdr->sos.fw_version);
3169	adev->psp.asd_context.bin_desc.size_bytes = le32_to_cpu(asd_hdr->header.ucode_size_bytes);
3170	adev->psp.asd_context.bin_desc.start_addr = (uint8_t *)asd_hdr +
3171				le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes);
3172	return 0;
3173out:
3174	amdgpu_ucode_release(&adev->psp.asd_fw);
3175	return err;
3176}
3177
3178int psp_init_toc_microcode(struct psp_context *psp, const char *chip_name)
3179{
3180	struct amdgpu_device *adev = psp->adev;
3181	char fw_name[PSP_FW_NAME_LEN];
3182	const struct psp_firmware_header_v1_0 *toc_hdr;
3183	int err = 0;
3184
3185	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_toc.bin", chip_name);
3186	err = amdgpu_ucode_request(adev, &adev->psp.toc_fw, fw_name);
3187	if (err)
3188		goto out;
3189
3190	toc_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.toc_fw->data;
3191	adev->psp.toc.fw_version = le32_to_cpu(toc_hdr->header.ucode_version);
3192	adev->psp.toc.feature_version = le32_to_cpu(toc_hdr->sos.fw_version);
3193	adev->psp.toc.size_bytes = le32_to_cpu(toc_hdr->header.ucode_size_bytes);
3194	adev->psp.toc.start_addr = (uint8_t *)toc_hdr +
3195				le32_to_cpu(toc_hdr->header.ucode_array_offset_bytes);
3196	return 0;
3197out:
3198	amdgpu_ucode_release(&adev->psp.toc_fw);
 
 
3199	return err;
3200}
3201
3202static int parse_sos_bin_descriptor(struct psp_context *psp,
3203				   const struct psp_fw_bin_desc *desc,
3204				   const struct psp_firmware_header_v2_0 *sos_hdr)
3205{
3206	uint8_t *ucode_start_addr  = NULL;
3207
3208	if (!psp || !desc || !sos_hdr)
3209		return -EINVAL;
3210
3211	ucode_start_addr  = (uint8_t *)sos_hdr +
3212			    le32_to_cpu(desc->offset_bytes) +
3213			    le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
3214
3215	switch (desc->fw_type) {
3216	case PSP_FW_TYPE_PSP_SOS:
3217		psp->sos.fw_version        = le32_to_cpu(desc->fw_version);
3218		psp->sos.feature_version   = le32_to_cpu(desc->fw_version);
3219		psp->sos.size_bytes        = le32_to_cpu(desc->size_bytes);
3220		psp->sos.start_addr	   = ucode_start_addr;
3221		break;
3222	case PSP_FW_TYPE_PSP_SYS_DRV:
3223		psp->sys.fw_version        = le32_to_cpu(desc->fw_version);
3224		psp->sys.feature_version   = le32_to_cpu(desc->fw_version);
3225		psp->sys.size_bytes        = le32_to_cpu(desc->size_bytes);
3226		psp->sys.start_addr        = ucode_start_addr;
3227		break;
3228	case PSP_FW_TYPE_PSP_KDB:
3229		psp->kdb.fw_version        = le32_to_cpu(desc->fw_version);
3230		psp->kdb.feature_version   = le32_to_cpu(desc->fw_version);
3231		psp->kdb.size_bytes        = le32_to_cpu(desc->size_bytes);
3232		psp->kdb.start_addr        = ucode_start_addr;
3233		break;
3234	case PSP_FW_TYPE_PSP_TOC:
3235		psp->toc.fw_version        = le32_to_cpu(desc->fw_version);
3236		psp->toc.feature_version   = le32_to_cpu(desc->fw_version);
3237		psp->toc.size_bytes        = le32_to_cpu(desc->size_bytes);
3238		psp->toc.start_addr        = ucode_start_addr;
3239		break;
3240	case PSP_FW_TYPE_PSP_SPL:
3241		psp->spl.fw_version        = le32_to_cpu(desc->fw_version);
3242		psp->spl.feature_version   = le32_to_cpu(desc->fw_version);
3243		psp->spl.size_bytes        = le32_to_cpu(desc->size_bytes);
3244		psp->spl.start_addr        = ucode_start_addr;
3245		break;
3246	case PSP_FW_TYPE_PSP_RL:
3247		psp->rl.fw_version         = le32_to_cpu(desc->fw_version);
3248		psp->rl.feature_version    = le32_to_cpu(desc->fw_version);
3249		psp->rl.size_bytes         = le32_to_cpu(desc->size_bytes);
3250		psp->rl.start_addr         = ucode_start_addr;
3251		break;
3252	case PSP_FW_TYPE_PSP_SOC_DRV:
3253		psp->soc_drv.fw_version         = le32_to_cpu(desc->fw_version);
3254		psp->soc_drv.feature_version    = le32_to_cpu(desc->fw_version);
3255		psp->soc_drv.size_bytes         = le32_to_cpu(desc->size_bytes);
3256		psp->soc_drv.start_addr         = ucode_start_addr;
3257		break;
3258	case PSP_FW_TYPE_PSP_INTF_DRV:
3259		psp->intf_drv.fw_version        = le32_to_cpu(desc->fw_version);
3260		psp->intf_drv.feature_version   = le32_to_cpu(desc->fw_version);
3261		psp->intf_drv.size_bytes        = le32_to_cpu(desc->size_bytes);
3262		psp->intf_drv.start_addr        = ucode_start_addr;
3263		break;
3264	case PSP_FW_TYPE_PSP_DBG_DRV:
3265		psp->dbg_drv.fw_version         = le32_to_cpu(desc->fw_version);
3266		psp->dbg_drv.feature_version    = le32_to_cpu(desc->fw_version);
3267		psp->dbg_drv.size_bytes         = le32_to_cpu(desc->size_bytes);
3268		psp->dbg_drv.start_addr         = ucode_start_addr;
3269		break;
3270	case PSP_FW_TYPE_PSP_RAS_DRV:
3271		psp->ras_drv.fw_version         = le32_to_cpu(desc->fw_version);
3272		psp->ras_drv.feature_version    = le32_to_cpu(desc->fw_version);
3273		psp->ras_drv.size_bytes         = le32_to_cpu(desc->size_bytes);
3274		psp->ras_drv.start_addr         = ucode_start_addr;
3275		break;
3276	default:
3277		dev_warn(psp->adev->dev, "Unsupported PSP FW type: %d\n", desc->fw_type);
3278		break;
3279	}
3280
3281	return 0;
3282}
3283
3284static int psp_init_sos_base_fw(struct amdgpu_device *adev)
3285{
3286	const struct psp_firmware_header_v1_0 *sos_hdr;
3287	const struct psp_firmware_header_v1_3 *sos_hdr_v1_3;
3288	uint8_t *ucode_array_start_addr;
3289
3290	sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data;
3291	ucode_array_start_addr = (uint8_t *)sos_hdr +
3292		le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
3293
3294	if (adev->gmc.xgmi.connected_to_cpu ||
3295	    (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 2))) {
3296		adev->psp.sos.fw_version = le32_to_cpu(sos_hdr->header.ucode_version);
3297		adev->psp.sos.feature_version = le32_to_cpu(sos_hdr->sos.fw_version);
3298
3299		adev->psp.sys.size_bytes = le32_to_cpu(sos_hdr->sos.offset_bytes);
3300		adev->psp.sys.start_addr = ucode_array_start_addr;
3301
3302		adev->psp.sos.size_bytes = le32_to_cpu(sos_hdr->sos.size_bytes);
3303		adev->psp.sos.start_addr = ucode_array_start_addr +
3304				le32_to_cpu(sos_hdr->sos.offset_bytes);
3305	} else {
3306		/* Load alternate PSP SOS FW */
3307		sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data;
3308
3309		adev->psp.sos.fw_version = le32_to_cpu(sos_hdr_v1_3->sos_aux.fw_version);
3310		adev->psp.sos.feature_version = le32_to_cpu(sos_hdr_v1_3->sos_aux.fw_version);
3311
3312		adev->psp.sys.size_bytes = le32_to_cpu(sos_hdr_v1_3->sys_drv_aux.size_bytes);
3313		adev->psp.sys.start_addr = ucode_array_start_addr +
3314			le32_to_cpu(sos_hdr_v1_3->sys_drv_aux.offset_bytes);
3315
3316		adev->psp.sos.size_bytes = le32_to_cpu(sos_hdr_v1_3->sos_aux.size_bytes);
3317		adev->psp.sos.start_addr = ucode_array_start_addr +
3318			le32_to_cpu(sos_hdr_v1_3->sos_aux.offset_bytes);
3319	}
3320
3321	if ((adev->psp.sys.size_bytes == 0) || (adev->psp.sos.size_bytes == 0)) {
3322		dev_warn(adev->dev, "PSP SOS FW not available");
3323		return -EINVAL;
3324	}
3325
3326	return 0;
3327}
3328
3329int psp_init_sos_microcode(struct psp_context *psp, const char *chip_name)
3330{
3331	struct amdgpu_device *adev = psp->adev;
3332	char fw_name[PSP_FW_NAME_LEN];
3333	const struct psp_firmware_header_v1_0 *sos_hdr;
3334	const struct psp_firmware_header_v1_1 *sos_hdr_v1_1;
3335	const struct psp_firmware_header_v1_2 *sos_hdr_v1_2;
3336	const struct psp_firmware_header_v1_3 *sos_hdr_v1_3;
3337	const struct psp_firmware_header_v2_0 *sos_hdr_v2_0;
3338	int err = 0;
3339	uint8_t *ucode_array_start_addr;
3340	int fw_index = 0;
 
 
 
3341
3342	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sos.bin", chip_name);
3343	err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, fw_name);
 
 
 
 
3344	if (err)
3345		goto out;
3346
3347	sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data;
3348	ucode_array_start_addr = (uint8_t *)sos_hdr +
3349		le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
3350	amdgpu_ucode_print_psp_hdr(&sos_hdr->header);
3351
3352	switch (sos_hdr->header.header_version_major) {
3353	case 1:
3354		err = psp_init_sos_base_fw(adev);
3355		if (err)
3356			goto out;
3357
 
 
 
 
3358		if (sos_hdr->header.header_version_minor == 1) {
3359			sos_hdr_v1_1 = (const struct psp_firmware_header_v1_1 *)adev->psp.sos_fw->data;
3360			adev->psp.toc.size_bytes = le32_to_cpu(sos_hdr_v1_1->toc.size_bytes);
3361			adev->psp.toc.start_addr = (uint8_t *)adev->psp.sys.start_addr +
3362					le32_to_cpu(sos_hdr_v1_1->toc.offset_bytes);
3363			adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_1->kdb.size_bytes);
3364			adev->psp.kdb.start_addr = (uint8_t *)adev->psp.sys.start_addr +
3365					le32_to_cpu(sos_hdr_v1_1->kdb.offset_bytes);
3366		}
3367		if (sos_hdr->header.header_version_minor == 2) {
3368			sos_hdr_v1_2 = (const struct psp_firmware_header_v1_2 *)adev->psp.sos_fw->data;
3369			adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_2->kdb.size_bytes);
3370			adev->psp.kdb.start_addr = (uint8_t *)adev->psp.sys.start_addr +
3371						    le32_to_cpu(sos_hdr_v1_2->kdb.offset_bytes);
3372		}
3373		if (sos_hdr->header.header_version_minor == 3) {
3374			sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data;
3375			adev->psp.toc.size_bytes = le32_to_cpu(sos_hdr_v1_3->v1_1.toc.size_bytes);
3376			adev->psp.toc.start_addr = ucode_array_start_addr +
3377				le32_to_cpu(sos_hdr_v1_3->v1_1.toc.offset_bytes);
3378			adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_3->v1_1.kdb.size_bytes);
3379			adev->psp.kdb.start_addr = ucode_array_start_addr +
3380				le32_to_cpu(sos_hdr_v1_3->v1_1.kdb.offset_bytes);
3381			adev->psp.spl.size_bytes = le32_to_cpu(sos_hdr_v1_3->spl.size_bytes);
3382			adev->psp.spl.start_addr = ucode_array_start_addr +
3383				le32_to_cpu(sos_hdr_v1_3->spl.offset_bytes);
3384			adev->psp.rl.size_bytes = le32_to_cpu(sos_hdr_v1_3->rl.size_bytes);
3385			adev->psp.rl.start_addr = ucode_array_start_addr +
3386				le32_to_cpu(sos_hdr_v1_3->rl.offset_bytes);
3387		}
3388		break;
3389	case 2:
3390		sos_hdr_v2_0 = (const struct psp_firmware_header_v2_0 *)adev->psp.sos_fw->data;
3391
3392		if (le32_to_cpu(sos_hdr_v2_0->psp_fw_bin_count) >= UCODE_MAX_PSP_PACKAGING) {
3393			dev_err(adev->dev, "packed SOS count exceeds maximum limit\n");
3394			err = -EINVAL;
3395			goto out;
3396		}
3397
3398		for (fw_index = 0; fw_index < le32_to_cpu(sos_hdr_v2_0->psp_fw_bin_count); fw_index++) {
3399			err = parse_sos_bin_descriptor(psp,
3400						       &sos_hdr_v2_0->psp_fw_bin[fw_index],
3401						       sos_hdr_v2_0);
3402			if (err)
3403				goto out;
3404		}
3405		break;
3406	default:
3407		dev_err(adev->dev,
3408			"unsupported psp sos firmware\n");
3409		err = -EINVAL;
3410		goto out;
3411	}
3412
3413	return 0;
3414out:
3415	amdgpu_ucode_release(&adev->psp.sos_fw);
 
 
 
3416
3417	return err;
3418}
3419
3420static int parse_ta_bin_descriptor(struct psp_context *psp,
3421				   const struct psp_fw_bin_desc *desc,
3422				   const struct ta_firmware_header_v2_0 *ta_hdr)
3423{
3424	uint8_t *ucode_start_addr  = NULL;
3425
3426	if (!psp || !desc || !ta_hdr)
3427		return -EINVAL;
3428
3429	ucode_start_addr  = (uint8_t *)ta_hdr +
3430			    le32_to_cpu(desc->offset_bytes) +
3431			    le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
3432
3433	switch (desc->fw_type) {
3434	case TA_FW_TYPE_PSP_ASD:
3435		psp->asd_context.bin_desc.fw_version        = le32_to_cpu(desc->fw_version);
3436		psp->asd_context.bin_desc.feature_version   = le32_to_cpu(desc->fw_version);
3437		psp->asd_context.bin_desc.size_bytes        = le32_to_cpu(desc->size_bytes);
3438		psp->asd_context.bin_desc.start_addr        = ucode_start_addr;
3439		break;
3440	case TA_FW_TYPE_PSP_XGMI:
3441		psp->xgmi_context.context.bin_desc.fw_version       = le32_to_cpu(desc->fw_version);
3442		psp->xgmi_context.context.bin_desc.size_bytes       = le32_to_cpu(desc->size_bytes);
3443		psp->xgmi_context.context.bin_desc.start_addr       = ucode_start_addr;
3444		break;
3445	case TA_FW_TYPE_PSP_RAS:
3446		psp->ras_context.context.bin_desc.fw_version        = le32_to_cpu(desc->fw_version);
3447		psp->ras_context.context.bin_desc.size_bytes        = le32_to_cpu(desc->size_bytes);
3448		psp->ras_context.context.bin_desc.start_addr        = ucode_start_addr;
3449		break;
3450	case TA_FW_TYPE_PSP_HDCP:
3451		psp->hdcp_context.context.bin_desc.fw_version       = le32_to_cpu(desc->fw_version);
3452		psp->hdcp_context.context.bin_desc.size_bytes       = le32_to_cpu(desc->size_bytes);
3453		psp->hdcp_context.context.bin_desc.start_addr       = ucode_start_addr;
3454		break;
3455	case TA_FW_TYPE_PSP_DTM:
3456		psp->dtm_context.context.bin_desc.fw_version       = le32_to_cpu(desc->fw_version);
3457		psp->dtm_context.context.bin_desc.size_bytes       = le32_to_cpu(desc->size_bytes);
3458		psp->dtm_context.context.bin_desc.start_addr       = ucode_start_addr;
3459		break;
3460	case TA_FW_TYPE_PSP_RAP:
3461		psp->rap_context.context.bin_desc.fw_version       = le32_to_cpu(desc->fw_version);
3462		psp->rap_context.context.bin_desc.size_bytes       = le32_to_cpu(desc->size_bytes);
3463		psp->rap_context.context.bin_desc.start_addr       = ucode_start_addr;
3464		break;
3465	case TA_FW_TYPE_PSP_SECUREDISPLAY:
3466		psp->securedisplay_context.context.bin_desc.fw_version =
3467			le32_to_cpu(desc->fw_version);
3468		psp->securedisplay_context.context.bin_desc.size_bytes =
3469			le32_to_cpu(desc->size_bytes);
3470		psp->securedisplay_context.context.bin_desc.start_addr =
3471			ucode_start_addr;
3472		break;
3473	default:
3474		dev_warn(psp->adev->dev, "Unsupported TA type: %d\n", desc->fw_type);
3475		break;
3476	}
3477
3478	return 0;
3479}
3480
3481static int parse_ta_v1_microcode(struct psp_context *psp)
 
3482{
3483	const struct ta_firmware_header_v1_0 *ta_hdr;
3484	struct amdgpu_device *adev = psp->adev;
3485
3486	ta_hdr = (const struct ta_firmware_header_v1_0 *) adev->psp.ta_fw->data;
3487
3488	if (le16_to_cpu(ta_hdr->header.header_version_major) != 1)
3489		return -EINVAL;
3490
3491	adev->psp.xgmi_context.context.bin_desc.fw_version =
3492		le32_to_cpu(ta_hdr->xgmi.fw_version);
3493	adev->psp.xgmi_context.context.bin_desc.size_bytes =
3494		le32_to_cpu(ta_hdr->xgmi.size_bytes);
3495	adev->psp.xgmi_context.context.bin_desc.start_addr =
3496		(uint8_t *)ta_hdr +
3497		le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
3498
3499	adev->psp.ras_context.context.bin_desc.fw_version =
3500		le32_to_cpu(ta_hdr->ras.fw_version);
3501	adev->psp.ras_context.context.bin_desc.size_bytes =
3502		le32_to_cpu(ta_hdr->ras.size_bytes);
3503	adev->psp.ras_context.context.bin_desc.start_addr =
3504		(uint8_t *)adev->psp.xgmi_context.context.bin_desc.start_addr +
3505		le32_to_cpu(ta_hdr->ras.offset_bytes);
3506
3507	adev->psp.hdcp_context.context.bin_desc.fw_version =
3508		le32_to_cpu(ta_hdr->hdcp.fw_version);
3509	adev->psp.hdcp_context.context.bin_desc.size_bytes =
3510		le32_to_cpu(ta_hdr->hdcp.size_bytes);
3511	adev->psp.hdcp_context.context.bin_desc.start_addr =
3512		(uint8_t *)ta_hdr +
3513		le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
3514
3515	adev->psp.dtm_context.context.bin_desc.fw_version =
3516		le32_to_cpu(ta_hdr->dtm.fw_version);
3517	adev->psp.dtm_context.context.bin_desc.size_bytes =
3518		le32_to_cpu(ta_hdr->dtm.size_bytes);
3519	adev->psp.dtm_context.context.bin_desc.start_addr =
3520		(uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr +
3521		le32_to_cpu(ta_hdr->dtm.offset_bytes);
3522
3523	adev->psp.securedisplay_context.context.bin_desc.fw_version =
3524		le32_to_cpu(ta_hdr->securedisplay.fw_version);
3525	adev->psp.securedisplay_context.context.bin_desc.size_bytes =
3526		le32_to_cpu(ta_hdr->securedisplay.size_bytes);
3527	adev->psp.securedisplay_context.context.bin_desc.start_addr =
3528		(uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr +
3529		le32_to_cpu(ta_hdr->securedisplay.offset_bytes);
3530
3531	adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version);
3532
3533	return 0;
3534}
3535
3536static int parse_ta_v2_microcode(struct psp_context *psp)
3537{
3538	const struct ta_firmware_header_v2_0 *ta_hdr;
3539	struct amdgpu_device *adev = psp->adev;
3540	int err = 0;
3541	int ta_index = 0;
3542
3543	ta_hdr = (const struct ta_firmware_header_v2_0 *)adev->psp.ta_fw->data;
3544
3545	if (le16_to_cpu(ta_hdr->header.header_version_major) != 2)
3546		return -EINVAL;
3547
3548	if (le32_to_cpu(ta_hdr->ta_fw_bin_count) >= UCODE_MAX_PSP_PACKAGING) {
3549		dev_err(adev->dev, "packed TA count exceeds maximum limit\n");
3550		return -EINVAL;
3551	}
3552
3553	for (ta_index = 0; ta_index < le32_to_cpu(ta_hdr->ta_fw_bin_count); ta_index++) {
3554		err = parse_ta_bin_descriptor(psp,
3555					      &ta_hdr->ta_fw_bin[ta_index],
3556					      ta_hdr);
3557		if (err)
3558			return err;
3559	}
3560
3561	return 0;
3562}
3563
3564int psp_init_ta_microcode(struct psp_context *psp, const char *chip_name)
3565{
3566	const struct common_firmware_header *hdr;
3567	struct amdgpu_device *adev = psp->adev;
3568	char fw_name[PSP_FW_NAME_LEN];
3569	int err;
3570
3571	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name);
3572	err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, fw_name);
3573	if (err)
3574		return err;
3575
3576	hdr = (const struct common_firmware_header *)adev->psp.ta_fw->data;
3577	switch (le16_to_cpu(hdr->header_version_major)) {
3578	case 1:
3579		err = parse_ta_v1_microcode(psp);
3580		break;
3581	case 2:
3582		err = parse_ta_v2_microcode(psp);
3583		break;
3584	default:
3585		dev_err(adev->dev, "unsupported TA header version\n");
3586		err = -EINVAL;
3587	}
3588
 
3589	if (err)
3590		amdgpu_ucode_release(&adev->psp.ta_fw);
3591
3592	return err;
3593}
3594
3595int psp_init_cap_microcode(struct psp_context *psp, const char *chip_name)
3596{
3597	struct amdgpu_device *adev = psp->adev;
3598	char fw_name[PSP_FW_NAME_LEN];
3599	const struct psp_firmware_header_v1_0 *cap_hdr_v1_0;
3600	struct amdgpu_firmware_info *info = NULL;
3601	int err = 0;
3602
3603	if (!amdgpu_sriov_vf(adev)) {
3604		dev_err(adev->dev, "cap microcode should only be loaded under SRIOV\n");
3605		return -EINVAL;
 
3606	}
3607
3608	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_cap.bin", chip_name);
3609	err = amdgpu_ucode_request(adev, &adev->psp.cap_fw, fw_name);
3610	if (err) {
3611		if (err == -ENODEV) {
3612			dev_warn(adev->dev, "cap microcode does not exist, skip\n");
3613			err = 0;
3614			goto out;
3615		}
3616		dev_err(adev->dev, "fail to initialize cap microcode\n");
3617	}
3618
3619	info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CAP];
3620	info->ucode_id = AMDGPU_UCODE_ID_CAP;
3621	info->fw = adev->psp.cap_fw;
3622	cap_hdr_v1_0 = (const struct psp_firmware_header_v1_0 *)
3623		adev->psp.cap_fw->data;
3624	adev->firmware.fw_size += ALIGN(
3625			le32_to_cpu(cap_hdr_v1_0->header.ucode_size_bytes), PAGE_SIZE);
3626	adev->psp.cap_fw_version = le32_to_cpu(cap_hdr_v1_0->header.ucode_version);
3627	adev->psp.cap_feature_version = le32_to_cpu(cap_hdr_v1_0->sos.fw_version);
3628	adev->psp.cap_ucode_size = le32_to_cpu(cap_hdr_v1_0->header.ucode_size_bytes);
3629
3630	return 0;
3631
3632out:
3633	amdgpu_ucode_release(&adev->psp.cap_fw);
 
 
3634	return err;
3635}
3636
3637static int psp_set_clockgating_state(void *handle,
3638				     enum amd_clockgating_state state)
3639{
3640	return 0;
3641}
3642
3643static int psp_set_powergating_state(void *handle,
3644				     enum amd_powergating_state state)
3645{
3646	return 0;
3647}
3648
3649static ssize_t psp_usbc_pd_fw_sysfs_read(struct device *dev,
3650					 struct device_attribute *attr,
3651					 char *buf)
3652{
3653	struct drm_device *ddev = dev_get_drvdata(dev);
3654	struct amdgpu_device *adev = drm_to_adev(ddev);
3655	uint32_t fw_ver;
3656	int ret;
3657
3658	if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) {
3659		dev_info(adev->dev, "PSP block is not ready yet\n.");
3660		return -EBUSY;
3661	}
3662
3663	mutex_lock(&adev->psp.mutex);
3664	ret = psp_read_usbc_pd_fw(&adev->psp, &fw_ver);
3665	mutex_unlock(&adev->psp.mutex);
3666
3667	if (ret) {
3668		dev_err(adev->dev, "Failed to read USBC PD FW, err = %d\n", ret);
3669		return ret;
3670	}
3671
3672	return sysfs_emit(buf, "%x\n", fw_ver);
3673}
3674
3675static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev,
3676						       struct device_attribute *attr,
3677						       const char *buf,
3678						       size_t count)
3679{
3680	struct drm_device *ddev = dev_get_drvdata(dev);
3681	struct amdgpu_device *adev = drm_to_adev(ddev);
3682	int ret, idx;
 
 
3683	char fw_name[100];
3684	const struct firmware *usbc_pd_fw;
3685	struct amdgpu_bo *fw_buf_bo = NULL;
3686	uint64_t fw_pri_mc_addr;
3687	void *fw_pri_cpu_addr;
3688
3689	if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) {
3690		dev_err(adev->dev, "PSP block is not ready yet.");
3691		return -EBUSY;
3692	}
3693
3694	if (!drm_dev_enter(ddev, &idx))
3695		return -ENODEV;
3696
3697	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s", buf);
3698	ret = request_firmware(&usbc_pd_fw, fw_name, adev->dev);
3699	if (ret)
3700		goto fail;
3701
3702	/* LFB address which is aligned to 1MB boundary per PSP request */
3703	ret = amdgpu_bo_create_kernel(adev, usbc_pd_fw->size, 0x100000,
3704				      AMDGPU_GEM_DOMAIN_VRAM |
3705				      AMDGPU_GEM_DOMAIN_GTT,
3706				      &fw_buf_bo, &fw_pri_mc_addr,
3707				      &fw_pri_cpu_addr);
3708	if (ret)
3709		goto rel_buf;
3710
3711	memcpy_toio(fw_pri_cpu_addr, usbc_pd_fw->data, usbc_pd_fw->size);
 
 
 
 
 
 
 
 
 
 
3712
3713	mutex_lock(&adev->psp.mutex);
3714	ret = psp_load_usbc_pd_fw(&adev->psp, fw_pri_mc_addr);
3715	mutex_unlock(&adev->psp.mutex);
3716
3717	amdgpu_bo_free_kernel(&fw_buf_bo, &fw_pri_mc_addr, &fw_pri_cpu_addr);
3718
3719rel_buf:
 
3720	release_firmware(usbc_pd_fw);
 
3721fail:
3722	if (ret) {
3723		dev_err(adev->dev, "Failed to load USBC PD FW, err = %d", ret);
3724		count = ret;
3725	}
3726
3727	drm_dev_exit(idx);
3728	return count;
3729}
3730
3731void psp_copy_fw(struct psp_context *psp, uint8_t *start_addr, uint32_t bin_size)
3732{
3733	int idx;
3734
3735	if (!drm_dev_enter(adev_to_drm(psp->adev), &idx))
3736		return;
3737
3738	memset(psp->fw_pri_buf, 0, PSP_1_MEG);
3739	memcpy(psp->fw_pri_buf, start_addr, bin_size);
3740
3741	drm_dev_exit(idx);
3742}
3743
3744/**
3745 * DOC: usbc_pd_fw
3746 * Reading from this file will retrieve the USB-C PD firmware version. Writing to
3747 * this file will trigger the update process.
3748 */
3749static DEVICE_ATTR(usbc_pd_fw, 0644,
3750		   psp_usbc_pd_fw_sysfs_read,
3751		   psp_usbc_pd_fw_sysfs_write);
3752
3753int is_psp_fw_valid(struct psp_bin_desc bin)
3754{
3755	return bin.size_bytes;
3756}
3757
3758static ssize_t amdgpu_psp_vbflash_write(struct file *filp, struct kobject *kobj,
3759					struct bin_attribute *bin_attr,
3760					char *buffer, loff_t pos, size_t count)
3761{
3762	struct device *dev = kobj_to_dev(kobj);
3763	struct drm_device *ddev = dev_get_drvdata(dev);
3764	struct amdgpu_device *adev = drm_to_adev(ddev);
3765
3766	adev->psp.vbflash_done = false;
3767
3768	/* Safeguard against memory drain */
3769	if (adev->psp.vbflash_image_size > AMD_VBIOS_FILE_MAX_SIZE_B) {
3770		dev_err(adev->dev, "File size cannot exceed %u\n", AMD_VBIOS_FILE_MAX_SIZE_B);
3771		kvfree(adev->psp.vbflash_tmp_buf);
3772		adev->psp.vbflash_tmp_buf = NULL;
3773		adev->psp.vbflash_image_size = 0;
3774		return -ENOMEM;
3775	}
3776
3777	/* TODO Just allocate max for now and optimize to realloc later if needed */
3778	if (!adev->psp.vbflash_tmp_buf) {
3779		adev->psp.vbflash_tmp_buf = kvmalloc(AMD_VBIOS_FILE_MAX_SIZE_B, GFP_KERNEL);
3780		if (!adev->psp.vbflash_tmp_buf)
3781			return -ENOMEM;
3782	}
3783
3784	mutex_lock(&adev->psp.mutex);
3785	memcpy(adev->psp.vbflash_tmp_buf + pos, buffer, count);
3786	adev->psp.vbflash_image_size += count;
3787	mutex_unlock(&adev->psp.mutex);
3788
3789	dev_dbg(adev->dev, "IFWI staged for update\n");
3790
3791	return count;
3792}
3793
3794static ssize_t amdgpu_psp_vbflash_read(struct file *filp, struct kobject *kobj,
3795				       struct bin_attribute *bin_attr, char *buffer,
3796				       loff_t pos, size_t count)
3797{
3798	struct device *dev = kobj_to_dev(kobj);
3799	struct drm_device *ddev = dev_get_drvdata(dev);
3800	struct amdgpu_device *adev = drm_to_adev(ddev);
3801	struct amdgpu_bo *fw_buf_bo = NULL;
3802	uint64_t fw_pri_mc_addr;
3803	void *fw_pri_cpu_addr;
3804	int ret;
3805
3806	if (adev->psp.vbflash_image_size == 0)
3807		return -EINVAL;
3808
3809	dev_dbg(adev->dev, "PSP IFWI flash process initiated\n");
3810
3811	ret = amdgpu_bo_create_kernel(adev, adev->psp.vbflash_image_size,
3812					AMDGPU_GPU_PAGE_SIZE,
3813					AMDGPU_GEM_DOMAIN_VRAM,
3814					&fw_buf_bo,
3815					&fw_pri_mc_addr,
3816					&fw_pri_cpu_addr);
3817	if (ret)
3818		goto rel_buf;
3819
3820	memcpy_toio(fw_pri_cpu_addr, adev->psp.vbflash_tmp_buf, adev->psp.vbflash_image_size);
3821
3822	mutex_lock(&adev->psp.mutex);
3823	ret = psp_update_spirom(&adev->psp, fw_pri_mc_addr);
3824	mutex_unlock(&adev->psp.mutex);
3825
3826	amdgpu_bo_free_kernel(&fw_buf_bo, &fw_pri_mc_addr, &fw_pri_cpu_addr);
3827
3828rel_buf:
3829	kvfree(adev->psp.vbflash_tmp_buf);
3830	adev->psp.vbflash_tmp_buf = NULL;
3831	adev->psp.vbflash_image_size = 0;
3832
3833	if (ret) {
3834		dev_err(adev->dev, "Failed to load IFWI, err = %d\n", ret);
3835		return ret;
3836	}
3837
3838	dev_dbg(adev->dev, "PSP IFWI flash process done\n");
3839	return 0;
3840}
3841
3842/**
3843 * DOC: psp_vbflash
3844 * Writing to this file will stage an IFWI for update. Reading from this file
3845 * will trigger the update process.
3846 */
3847static struct bin_attribute psp_vbflash_bin_attr = {
3848	.attr = {.name = "psp_vbflash", .mode = 0660},
3849	.size = 0,
3850	.write = amdgpu_psp_vbflash_write,
3851	.read = amdgpu_psp_vbflash_read,
3852};
3853
3854/**
3855 * DOC: psp_vbflash_status
3856 * The status of the flash process.
3857 * 0: IFWI flash not complete.
3858 * 1: IFWI flash complete.
3859 */
3860static ssize_t amdgpu_psp_vbflash_status(struct device *dev,
3861					 struct device_attribute *attr,
3862					 char *buf)
3863{
3864	struct drm_device *ddev = dev_get_drvdata(dev);
3865	struct amdgpu_device *adev = drm_to_adev(ddev);
3866	uint32_t vbflash_status;
3867
3868	vbflash_status = psp_vbflash_status(&adev->psp);
3869	if (!adev->psp.vbflash_done)
3870		vbflash_status = 0;
3871	else if (adev->psp.vbflash_done && !(vbflash_status & 0x80000000))
3872		vbflash_status = 1;
3873
3874	return sysfs_emit(buf, "0x%x\n", vbflash_status);
3875}
3876static DEVICE_ATTR(psp_vbflash_status, 0440, amdgpu_psp_vbflash_status, NULL);
3877
3878static struct bin_attribute *bin_flash_attrs[] = {
3879	&psp_vbflash_bin_attr,
3880	NULL
3881};
3882
3883static struct attribute *flash_attrs[] = {
3884	&dev_attr_psp_vbflash_status.attr,
3885	&dev_attr_usbc_pd_fw.attr,
3886	NULL
3887};
3888
3889static umode_t amdgpu_flash_attr_is_visible(struct kobject *kobj, struct attribute *attr, int idx)
3890{
3891	struct device *dev = kobj_to_dev(kobj);
3892	struct drm_device *ddev = dev_get_drvdata(dev);
3893	struct amdgpu_device *adev = drm_to_adev(ddev);
3894
3895	if (attr == &dev_attr_usbc_pd_fw.attr)
3896		return adev->psp.sup_pd_fw_up ? 0660 : 0;
3897
3898	return adev->psp.sup_ifwi_up ? 0440 : 0;
3899}
3900
3901static umode_t amdgpu_bin_flash_attr_is_visible(struct kobject *kobj,
3902						struct bin_attribute *attr,
3903						int idx)
3904{
3905	struct device *dev = kobj_to_dev(kobj);
3906	struct drm_device *ddev = dev_get_drvdata(dev);
3907	struct amdgpu_device *adev = drm_to_adev(ddev);
3908
3909	return adev->psp.sup_ifwi_up ? 0660 : 0;
3910}
3911
3912const struct attribute_group amdgpu_flash_attr_group = {
3913	.attrs = flash_attrs,
3914	.bin_attrs = bin_flash_attrs,
3915	.is_bin_visible = amdgpu_bin_flash_attr_is_visible,
3916	.is_visible = amdgpu_flash_attr_is_visible,
3917};
3918
3919const struct amd_ip_funcs psp_ip_funcs = {
3920	.name = "psp",
3921	.early_init = psp_early_init,
3922	.late_init = NULL,
3923	.sw_init = psp_sw_init,
3924	.sw_fini = psp_sw_fini,
3925	.hw_init = psp_hw_init,
3926	.hw_fini = psp_hw_fini,
3927	.suspend = psp_suspend,
3928	.resume = psp_resume,
3929	.is_idle = NULL,
3930	.check_soft_reset = NULL,
3931	.wait_for_idle = NULL,
3932	.soft_reset = NULL,
3933	.set_clockgating_state = psp_set_clockgating_state,
3934	.set_powergating_state = psp_set_powergating_state,
3935};
3936
3937const struct amdgpu_ip_block_version psp_v3_1_ip_block = {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3938	.type = AMD_IP_BLOCK_TYPE_PSP,
3939	.major = 3,
3940	.minor = 1,
3941	.rev = 0,
3942	.funcs = &psp_ip_funcs,
3943};
3944
3945const struct amdgpu_ip_block_version psp_v10_0_ip_block = {
 
3946	.type = AMD_IP_BLOCK_TYPE_PSP,
3947	.major = 10,
3948	.minor = 0,
3949	.rev = 0,
3950	.funcs = &psp_ip_funcs,
3951};
3952
3953const struct amdgpu_ip_block_version psp_v11_0_ip_block = {
 
3954	.type = AMD_IP_BLOCK_TYPE_PSP,
3955	.major = 11,
3956	.minor = 0,
3957	.rev = 0,
3958	.funcs = &psp_ip_funcs,
3959};
3960
3961const struct amdgpu_ip_block_version psp_v11_0_8_ip_block = {
3962	.type = AMD_IP_BLOCK_TYPE_PSP,
3963	.major = 11,
3964	.minor = 0,
3965	.rev = 8,
3966	.funcs = &psp_ip_funcs,
3967};
3968
3969const struct amdgpu_ip_block_version psp_v12_0_ip_block = {
3970	.type = AMD_IP_BLOCK_TYPE_PSP,
3971	.major = 12,
3972	.minor = 0,
3973	.rev = 0,
3974	.funcs = &psp_ip_funcs,
3975};
3976
3977const struct amdgpu_ip_block_version psp_v13_0_ip_block = {
3978	.type = AMD_IP_BLOCK_TYPE_PSP,
3979	.major = 13,
3980	.minor = 0,
3981	.rev = 0,
3982	.funcs = &psp_ip_funcs,
3983};
3984
3985const struct amdgpu_ip_block_version psp_v13_0_4_ip_block = {
3986	.type = AMD_IP_BLOCK_TYPE_PSP,
3987	.major = 13,
3988	.minor = 0,
3989	.rev = 4,
3990	.funcs = &psp_ip_funcs,
3991};
3992
3993const struct amdgpu_ip_block_version psp_v14_0_ip_block = {
3994	.type = AMD_IP_BLOCK_TYPE_PSP,
3995	.major = 14,
3996	.minor = 0,
3997	.rev = 0,
3998	.funcs = &psp_ip_funcs,
3999};
v5.9
   1/*
   2 * Copyright 2016 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 * Author: Huang Rui
  23 *
  24 */
  25
  26#include <linux/firmware.h>
  27#include <linux/dma-mapping.h>
  28
  29#include "amdgpu.h"
  30#include "amdgpu_psp.h"
  31#include "amdgpu_ucode.h"
 
  32#include "soc15_common.h"
  33#include "psp_v3_1.h"
  34#include "psp_v10_0.h"
  35#include "psp_v11_0.h"
 
  36#include "psp_v12_0.h"
 
 
 
  37
  38#include "amdgpu_ras.h"
 
 
  39
  40static int psp_sysfs_init(struct amdgpu_device *adev);
  41static void psp_sysfs_fini(struct amdgpu_device *adev);
  42
  43static int psp_load_smu_fw(struct psp_context *psp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  44
  45/*
  46 * Due to DF Cstate management centralized to PMFW, the firmware
  47 * loading sequence will be updated as below:
  48 *   - Load KDB
  49 *   - Load SYS_DRV
  50 *   - Load tOS
  51 *   - Load PMFW
  52 *   - Setup TMR
  53 *   - Load other non-psp fw
  54 *   - Load ASD
  55 *   - Load XGMI/RAS/HDCP/DTM TA if any
  56 *
  57 * This new sequence is required for
  58 *   - Arcturus
  59 *   - Navi12 and onwards
  60 */
  61static void psp_check_pmfw_centralized_cstate_management(struct psp_context *psp)
  62{
  63	struct amdgpu_device *adev = psp->adev;
  64
  65	psp->pmfw_centralized_cstate_management = false;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  66
  67	if (amdgpu_sriov_vf(adev))
  68		return;
 
 
 
  69
  70	if (adev->flags & AMD_IS_APU)
  71		return;
  72
  73	if ((adev->asic_type == CHIP_ARCTURUS) ||
  74	    (adev->asic_type >= CHIP_NAVI12))
  75		psp->pmfw_centralized_cstate_management = true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  76}
  77
  78static int psp_early_init(void *handle)
  79{
  80	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  81	struct psp_context *psp = &adev->psp;
  82
  83	switch (adev->asic_type) {
  84	case CHIP_VEGA10:
  85	case CHIP_VEGA12:
 
 
  86		psp_v3_1_set_psp_funcs(psp);
  87		psp->autoload_supported = false;
 
  88		break;
  89	case CHIP_RAVEN:
 
  90		psp_v10_0_set_psp_funcs(psp);
  91		psp->autoload_supported = false;
 
  92		break;
  93	case CHIP_VEGA20:
  94	case CHIP_ARCTURUS:
  95		psp_v11_0_set_psp_funcs(psp);
  96		psp->autoload_supported = false;
 
  97		break;
  98	case CHIP_NAVI10:
  99	case CHIP_NAVI14:
 100	case CHIP_NAVI12:
 101	case CHIP_SIENNA_CICHLID:
 102	case CHIP_NAVY_FLOUNDER:
 
 
 
 
 
 103		psp_v11_0_set_psp_funcs(psp);
 104		psp->autoload_supported = true;
 105		break;
 106	case CHIP_RENOIR:
 
 107		psp_v12_0_set_psp_funcs(psp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 108		break;
 109	default:
 110		return -EINVAL;
 111	}
 112
 113	psp->adev = adev;
 114
 
 
 115	psp_check_pmfw_centralized_cstate_management(psp);
 116
 117	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 118}
 119
 120static void psp_memory_training_fini(struct psp_context *psp)
 121{
 122	struct psp_memory_training_context *ctx = &psp->mem_train_ctx;
 123
 124	ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT;
 125	kfree(ctx->sys_cache);
 126	ctx->sys_cache = NULL;
 127}
 128
 129static int psp_memory_training_init(struct psp_context *psp)
 130{
 131	int ret;
 132	struct psp_memory_training_context *ctx = &psp->mem_train_ctx;
 133
 134	if (ctx->init != PSP_MEM_TRAIN_RESERVE_SUCCESS) {
 135		DRM_DEBUG("memory training is not supported!\n");
 136		return 0;
 137	}
 138
 139	ctx->sys_cache = kzalloc(ctx->train_data_size, GFP_KERNEL);
 140	if (ctx->sys_cache == NULL) {
 141		DRM_ERROR("alloc mem_train_ctx.sys_cache failed!\n");
 142		ret = -ENOMEM;
 143		goto Err_out;
 144	}
 145
 146	DRM_DEBUG("train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n",
 147		  ctx->train_data_size,
 148		  ctx->p2c_train_data_offset,
 149		  ctx->c2p_train_data_offset);
 
 150	ctx->init = PSP_MEM_TRAIN_INIT_SUCCESS;
 151	return 0;
 152
 153Err_out:
 154	psp_memory_training_fini(psp);
 155	return ret;
 156}
 157
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 158static int psp_sw_init(void *handle)
 159{
 160	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 161	struct psp_context *psp = &adev->psp;
 162	int ret;
 
 
 
 163
 164	ret = psp_init_microcode(psp);
 165	if (ret) {
 166		DRM_ERROR("Failed to load psp firmware!\n");
 167		return ret;
 168	}
 169
 170	ret = psp_memory_training_init(psp);
 171	if (ret) {
 172		DRM_ERROR("Failed to initialize memory training!\n");
 173		return ret;
 174	}
 175	ret = psp_mem_training(psp, PSP_MEM_TRAIN_COLD_BOOT);
 176	if (ret) {
 177		DRM_ERROR("Failed to process memory training!\n");
 178		return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 179	}
 180
 181	if (adev->asic_type == CHIP_NAVI10 || adev->asic_type == CHIP_SIENNA_CICHLID) {
 182		ret= psp_sysfs_init(adev);
 
 
 
 
 
 
 183		if (ret) {
 
 184			return ret;
 185		}
 186	}
 187
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 188	return 0;
 
 
 
 
 
 
 
 
 189}
 190
 191static int psp_sw_fini(void *handle)
 192{
 193	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
 
 
 
 
 
 
 
 
 
 194
 195	psp_memory_training_fini(&adev->psp);
 196	if (adev->psp.sos_fw) {
 197		release_firmware(adev->psp.sos_fw);
 198		adev->psp.sos_fw = NULL;
 199	}
 200	if (adev->psp.asd_fw) {
 201		release_firmware(adev->psp.asd_fw);
 202		adev->psp.asd_fw = NULL;
 203	}
 204	if (adev->psp.ta_fw) {
 205		release_firmware(adev->psp.ta_fw);
 206		adev->psp.ta_fw = NULL;
 207	}
 208
 209	if (adev->asic_type == CHIP_NAVI10)
 210		psp_sysfs_fini(adev);
 
 
 
 
 
 
 
 
 
 211
 212	return 0;
 213}
 214
 215int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
 216		 uint32_t reg_val, uint32_t mask, bool check_changed)
 217{
 218	uint32_t val;
 219	int i;
 220	struct amdgpu_device *adev = psp->adev;
 221
 
 
 
 222	for (i = 0; i < adev->usec_timeout; i++) {
 223		val = RREG32(reg_index);
 224		if (check_changed) {
 225			if (val != reg_val)
 226				return 0;
 227		} else {
 228			if ((val & mask) == reg_val)
 229				return 0;
 230		}
 231		udelay(1);
 232	}
 233
 234	return -ETIME;
 235}
 236
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 237static int
 238psp_cmd_submit_buf(struct psp_context *psp,
 239		   struct amdgpu_firmware_info *ucode,
 240		   struct psp_gfx_cmd_resp *cmd, uint64_t fence_mc_addr)
 241{
 242	int ret;
 243	int index;
 244	int timeout = 2000;
 245	bool ras_intr = false;
 246	bool skip_unsupport = false;
 247
 248	mutex_lock(&psp->mutex);
 
 249
 250	memset(psp->cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE);
 251
 252	memcpy(psp->cmd_buf_mem, cmd, sizeof(struct psp_gfx_cmd_resp));
 253
 254	index = atomic_inc_return(&psp->fence_value);
 255	ret = psp_ring_cmd_submit(psp, psp->cmd_buf_mc_addr, fence_mc_addr, index);
 256	if (ret) {
 257		atomic_dec(&psp->fence_value);
 258		mutex_unlock(&psp->mutex);
 259		return ret;
 260	}
 261
 262	amdgpu_asic_invalidate_hdp(psp->adev, NULL);
 263	while (*((unsigned int *)psp->fence_buf) != index) {
 264		if (--timeout == 0)
 265			break;
 266		/*
 267		 * Shouldn't wait for timeout when err_event_athub occurs,
 268		 * because gpu reset thread triggered and lock resource should
 269		 * be released for psp resume sequence.
 270		 */
 271		ras_intr = amdgpu_ras_intr_triggered();
 272		if (ras_intr)
 273			break;
 274		msleep(1);
 275		amdgpu_asic_invalidate_hdp(psp->adev, NULL);
 276	}
 277
 278	/* We allow TEE_ERROR_NOT_SUPPORTED for VMR command and PSP_ERR_UNKNOWN_COMMAND in SRIOV */
 279	skip_unsupport = (psp->cmd_buf_mem->resp.status == TEE_ERROR_NOT_SUPPORTED ||
 280		psp->cmd_buf_mem->resp.status == PSP_ERR_UNKNOWN_COMMAND) && amdgpu_sriov_vf(psp->adev);
 281
 
 
 282	/* In some cases, psp response status is not 0 even there is no
 283	 * problem while the command is submitted. Some version of PSP FW
 284	 * doesn't write 0 to that field.
 285	 * So here we would like to only print a warning instead of an error
 286	 * during psp initialization to avoid breaking hw_init and it doesn't
 287	 * return -EINVAL.
 288	 */
 289	if (!skip_unsupport && (psp->cmd_buf_mem->resp.status || !timeout) && !ras_intr) {
 290		if (ucode)
 291			DRM_WARN("failed to load ucode id (%d) ",
 292				  ucode->ucode_id);
 293		DRM_WARN("psp command (0x%X) failed and response status is (0x%X)\n",
 294			 psp->cmd_buf_mem->cmd_id,
 
 
 295			 psp->cmd_buf_mem->resp.status);
 296		if (!timeout) {
 297			mutex_unlock(&psp->mutex);
 298			return -EINVAL;
 
 
 
 
 299		}
 300	}
 301
 302	/* get xGMI session id from response buffer */
 303	cmd->resp.session_id = psp->cmd_buf_mem->resp.session_id;
 304
 305	if (ucode) {
 306		ucode->tmr_mc_addr_lo = psp->cmd_buf_mem->resp.fw_addr_lo;
 307		ucode->tmr_mc_addr_hi = psp->cmd_buf_mem->resp.fw_addr_hi;
 308	}
 309	mutex_unlock(&psp->mutex);
 310
 
 311	return ret;
 312}
 313
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 314static void psp_prep_tmr_cmd_buf(struct psp_context *psp,
 315				 struct psp_gfx_cmd_resp *cmd,
 316				 uint64_t tmr_mc, uint32_t size)
 317{
 
 
 
 
 
 
 
 
 
 318	if (amdgpu_sriov_vf(psp->adev))
 319		cmd->cmd_id = GFX_CMD_ID_SETUP_VMR;
 320	else
 321		cmd->cmd_id = GFX_CMD_ID_SETUP_TMR;
 322	cmd->cmd.cmd_setup_tmr.buf_phy_addr_lo = lower_32_bits(tmr_mc);
 323	cmd->cmd.cmd_setup_tmr.buf_phy_addr_hi = upper_32_bits(tmr_mc);
 324	cmd->cmd.cmd_setup_tmr.buf_size = size;
 
 
 
 325}
 326
 327static void psp_prep_load_toc_cmd_buf(struct psp_gfx_cmd_resp *cmd,
 328				      uint64_t pri_buf_mc, uint32_t size)
 329{
 330	cmd->cmd_id = GFX_CMD_ID_LOAD_TOC;
 331	cmd->cmd.cmd_load_toc.toc_phy_addr_lo = lower_32_bits(pri_buf_mc);
 332	cmd->cmd.cmd_load_toc.toc_phy_addr_hi = upper_32_bits(pri_buf_mc);
 333	cmd->cmd.cmd_load_toc.toc_size = size;
 334}
 335
 336/* Issue LOAD TOC cmd to PSP to part toc and calculate tmr size needed */
 337static int psp_load_toc(struct psp_context *psp,
 338			uint32_t *tmr_size)
 339{
 340	int ret;
 341	struct psp_gfx_cmd_resp *cmd;
 342
 343	cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
 344	if (!cmd)
 345		return -ENOMEM;
 346	/* Copy toc to psp firmware private buffer */
 347	memset(psp->fw_pri_buf, 0, PSP_1_MEG);
 348	memcpy(psp->fw_pri_buf, psp->toc_start_addr, psp->toc_bin_size);
 349
 350	psp_prep_load_toc_cmd_buf(cmd, psp->fw_pri_mc_addr, psp->toc_bin_size);
 351
 352	ret = psp_cmd_submit_buf(psp, NULL, cmd,
 353				 psp->fence_buf_mc_addr);
 354	if (!ret)
 355		*tmr_size = psp->cmd_buf_mem->resp.tmr_size;
 356	kfree(cmd);
 
 
 357	return ret;
 358}
 359
 360/* Set up Trusted Memory Region */
 361static int psp_tmr_init(struct psp_context *psp)
 362{
 363	int ret;
 364	int tmr_size;
 365	void *tmr_buf;
 366	void **pptr;
 367
 368	/*
 369	 * According to HW engineer, they prefer the TMR address be "naturally
 370	 * aligned" , e.g. the start address be an integer divide of TMR size.
 371	 *
 372	 * Note: this memory need be reserved till the driver
 373	 * uninitializes.
 374	 */
 375	tmr_size = PSP_TMR_SIZE;
 376
 377	/* For ASICs support RLC autoload, psp will parse the toc
 378	 * and calculate the total size of TMR needed */
 
 379	if (!amdgpu_sriov_vf(psp->adev) &&
 380	    psp->toc_start_addr &&
 381	    psp->toc_bin_size &&
 382	    psp->fw_pri_buf) {
 383		ret = psp_load_toc(psp, &tmr_size);
 384		if (ret) {
 385			DRM_ERROR("Failed to load toc\n");
 386			return ret;
 387		}
 388	}
 389
 390	pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
 391	ret = amdgpu_bo_create_kernel(psp->adev, tmr_size, PSP_TMR_SIZE,
 392				      AMDGPU_GEM_DOMAIN_VRAM,
 393				      &psp->tmr_bo, &psp->tmr_mc_addr, pptr);
 394
 395	return ret;
 396}
 397
 398static int psp_clear_vf_fw(struct psp_context *psp)
 399{
 400	int ret;
 401	struct psp_gfx_cmd_resp *cmd;
 402
 403	if (!amdgpu_sriov_vf(psp->adev) || psp->adev->asic_type != CHIP_NAVI12)
 404		return 0;
 405
 406	cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
 407	if (!cmd)
 408		return -ENOMEM;
 409
 410	cmd->cmd_id = GFX_CMD_ID_CLEAR_VF_FW;
 411
 412	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
 413	kfree(cmd);
 414
 415	return ret;
 416}
 417
 418static bool psp_skip_tmr(struct psp_context *psp)
 419{
 420	switch (psp->adev->asic_type) {
 421	case CHIP_NAVI12:
 422	case CHIP_SIENNA_CICHLID:
 
 
 
 423		return true;
 424	default:
 425		return false;
 426	}
 427}
 428
 429static int psp_tmr_load(struct psp_context *psp)
 430{
 431	int ret;
 432	struct psp_gfx_cmd_resp *cmd;
 433
 434	/* For Navi12 and CHIP_SIENNA_CICHLID SRIOV, do not set up TMR.
 435	 * Already set up by host driver.
 436	 */
 437	if (amdgpu_sriov_vf(psp->adev) && psp_skip_tmr(psp))
 438		return 0;
 439
 440	cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
 441	if (!cmd)
 442		return -ENOMEM;
 443
 444	psp_prep_tmr_cmd_buf(psp, cmd, psp->tmr_mc_addr,
 445			     amdgpu_bo_size(psp->tmr_bo));
 446	DRM_INFO("reserve 0x%lx from 0x%llx for PSP TMR\n",
 447		 amdgpu_bo_size(psp->tmr_bo), psp->tmr_mc_addr);
 448
 449	ret = psp_cmd_submit_buf(psp, NULL, cmd,
 450				 psp->fence_buf_mc_addr);
 451
 452	kfree(cmd);
 453
 454	return ret;
 455}
 456
 457static void psp_prep_tmr_unload_cmd_buf(struct psp_context *psp,
 458					struct psp_gfx_cmd_resp *cmd)
 459{
 460	if (amdgpu_sriov_vf(psp->adev))
 461		cmd->cmd_id = GFX_CMD_ID_DESTROY_VMR;
 462	else
 463		cmd->cmd_id = GFX_CMD_ID_DESTROY_TMR;
 464}
 465
 466static int psp_tmr_unload(struct psp_context *psp)
 467{
 468	int ret;
 469	struct psp_gfx_cmd_resp *cmd;
 470
 471	cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
 472	if (!cmd)
 473		return -ENOMEM;
 
 
 
 
 474
 475	psp_prep_tmr_unload_cmd_buf(psp, cmd);
 476	DRM_INFO("free PSP TMR buffer\n");
 477
 478	ret = psp_cmd_submit_buf(psp, NULL, cmd,
 479				 psp->fence_buf_mc_addr);
 480
 481	kfree(cmd);
 482
 483	return ret;
 484}
 485
 486static int psp_tmr_terminate(struct psp_context *psp)
 487{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 488	int ret;
 489	void *tmr_buf;
 490	void **pptr;
 491
 492	ret = psp_tmr_unload(psp);
 493	if (ret)
 494		return ret;
 
 
 
 
 
 
 
 
 
 
 495
 496	/* free TMR memory buffer */
 497	pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
 498	amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, pptr);
 499
 500	return 0;
 501}
 502
 503static void psp_prep_asd_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
 504				uint64_t asd_mc, uint32_t size)
 505{
 506	cmd->cmd_id = GFX_CMD_ID_LOAD_ASD;
 507	cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(asd_mc);
 508	cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(asd_mc);
 509	cmd->cmd.cmd_load_ta.app_len = size;
 
 
 
 
 510
 511	cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = 0;
 512	cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = 0;
 513	cmd->cmd.cmd_load_ta.cmd_buf_len = 0;
 
 
 
 
 
 
 
 514}
 515
 516static int psp_asd_load(struct psp_context *psp)
 517{
 518	int ret;
 
 519	struct psp_gfx_cmd_resp *cmd;
 520
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 521	/* If PSP version doesn't match ASD version, asd loading will be failed.
 522	 * add workaround to bypass it for sriov now.
 523	 * TODO: add version check to make it common
 524	 */
 525	if (amdgpu_sriov_vf(psp->adev) || !psp->asd_fw)
 526		return 0;
 527
 528	cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
 529	if (!cmd)
 530		return -ENOMEM;
 531
 532	memset(psp->fw_pri_buf, 0, PSP_1_MEG);
 533	memcpy(psp->fw_pri_buf, psp->asd_start_addr, psp->asd_ucode_size);
 534
 535	psp_prep_asd_load_cmd_buf(cmd, psp->fw_pri_mc_addr,
 536				  psp->asd_ucode_size);
 537
 538	ret = psp_cmd_submit_buf(psp, NULL, cmd,
 539				 psp->fence_buf_mc_addr);
 540	if (!ret) {
 541		psp->asd_context.asd_initialized = true;
 542		psp->asd_context.session_id = cmd->resp.session_id;
 543	}
 544
 545	kfree(cmd);
 546
 547	return ret;
 548}
 549
 550static void psp_prep_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd,
 551				       uint32_t session_id)
 552{
 553	cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA;
 554	cmd->cmd.cmd_unload_ta.session_id = session_id;
 555}
 556
 557static int psp_asd_unload(struct psp_context *psp)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 558{
 559	int ret;
 560	struct psp_gfx_cmd_resp *cmd;
 561
 562	if (amdgpu_sriov_vf(psp->adev))
 563		return 0;
 564
 565	if (!psp->asd_context.asd_initialized)
 566		return 0;
 567
 568	cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
 569	if (!cmd)
 570		return -ENOMEM;
 571
 572	psp_prep_ta_unload_cmd_buf(cmd, psp->asd_context.session_id);
 573
 574	ret = psp_cmd_submit_buf(psp, NULL, cmd,
 575				 psp->fence_buf_mc_addr);
 576	if (!ret)
 577		psp->asd_context.asd_initialized = false;
 578
 579	kfree(cmd);
 580
 581	return ret;
 582}
 583
 584static void psp_prep_reg_prog_cmd_buf(struct psp_gfx_cmd_resp *cmd,
 585		uint32_t id, uint32_t value)
 586{
 587	cmd->cmd_id = GFX_CMD_ID_PROG_REG;
 588	cmd->cmd.cmd_setup_reg_prog.reg_value = value;
 589	cmd->cmd.cmd_setup_reg_prog.reg_id = id;
 590}
 591
 592int psp_reg_program(struct psp_context *psp, enum psp_reg_prog_id reg,
 593		uint32_t value)
 594{
 595	struct psp_gfx_cmd_resp *cmd = NULL;
 596	int ret = 0;
 597
 598	if (reg >= PSP_REG_LAST)
 599		return -EINVAL;
 600
 601	cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
 602	if (!cmd)
 603		return -ENOMEM;
 604
 605	psp_prep_reg_prog_cmd_buf(cmd, reg, value);
 606	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
 
 
 
 
 607
 608	kfree(cmd);
 609	return ret;
 610}
 611
 612static void psp_prep_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
 613				     uint64_t ta_bin_mc,
 614				     uint32_t ta_bin_size,
 615				     uint64_t ta_shared_mc,
 616				     uint32_t ta_shared_size)
 617{
 618	cmd->cmd_id 				= GFX_CMD_ID_LOAD_TA;
 619	cmd->cmd.cmd_load_ta.app_phy_addr_lo 	= lower_32_bits(ta_bin_mc);
 620	cmd->cmd.cmd_load_ta.app_phy_addr_hi 	= upper_32_bits(ta_bin_mc);
 621	cmd->cmd.cmd_load_ta.app_len 		= ta_bin_size;
 622
 623	cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = lower_32_bits(ta_shared_mc);
 624	cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = upper_32_bits(ta_shared_mc);
 625	cmd->cmd.cmd_load_ta.cmd_buf_len 	 = ta_shared_size;
 
 
 626}
 627
 628static int psp_xgmi_init_shared_buf(struct psp_context *psp)
 
 629{
 630	int ret;
 631
 632	/*
 633	 * Allocate 16k memory aligned to 4k from Frame Buffer (local
 634	 * physical) for xgmi ta <-> Driver
 635	 */
 636	ret = amdgpu_bo_create_kernel(psp->adev, PSP_XGMI_SHARED_MEM_SIZE,
 637				      PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
 638				      &psp->xgmi_context.xgmi_shared_bo,
 639				      &psp->xgmi_context.xgmi_shared_mc_addr,
 640				      &psp->xgmi_context.xgmi_shared_buf);
 641
 642	return ret;
 643}
 644
 645static void psp_prep_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd,
 646				       uint32_t ta_cmd_id,
 647				       uint32_t session_id)
 648{
 649	cmd->cmd_id 				= GFX_CMD_ID_INVOKE_CMD;
 650	cmd->cmd.cmd_invoke_cmd.session_id 	= session_id;
 651	cmd->cmd.cmd_invoke_cmd.ta_cmd_id 	= ta_cmd_id;
 652}
 653
 654static int psp_ta_invoke(struct psp_context *psp,
 655		  uint32_t ta_cmd_id,
 656		  uint32_t session_id)
 657{
 658	int ret;
 659	struct psp_gfx_cmd_resp *cmd;
 660
 661	cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
 662	if (!cmd)
 663		return -ENOMEM;
 664
 665	psp_prep_ta_invoke_cmd_buf(cmd, ta_cmd_id, session_id);
 666
 667	ret = psp_cmd_submit_buf(psp, NULL, cmd,
 668				 psp->fence_buf_mc_addr);
 669
 670	kfree(cmd);
 
 
 671
 672	return ret;
 673}
 674
 675static int psp_xgmi_load(struct psp_context *psp)
 676{
 677	int ret;
 678	struct psp_gfx_cmd_resp *cmd;
 679
 680	/*
 681	 * TODO: bypass the loading in sriov for now
 682	 */
 683
 684	cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
 685	if (!cmd)
 686		return -ENOMEM;
 687
 688	memset(psp->fw_pri_buf, 0, PSP_1_MEG);
 689	memcpy(psp->fw_pri_buf, psp->ta_xgmi_start_addr, psp->ta_xgmi_ucode_size);
 690
 691	psp_prep_ta_load_cmd_buf(cmd,
 692				 psp->fw_pri_mc_addr,
 693				 psp->ta_xgmi_ucode_size,
 694				 psp->xgmi_context.xgmi_shared_mc_addr,
 695				 PSP_XGMI_SHARED_MEM_SIZE);
 696
 697	ret = psp_cmd_submit_buf(psp, NULL, cmd,
 698				 psp->fence_buf_mc_addr);
 699
 700	if (!ret) {
 701		psp->xgmi_context.initialized = 1;
 702		psp->xgmi_context.session_id = cmd->resp.session_id;
 703	}
 704
 705	kfree(cmd);
 706
 707	return ret;
 708}
 709
 710static int psp_xgmi_unload(struct psp_context *psp)
 711{
 712	int ret;
 713	struct psp_gfx_cmd_resp *cmd;
 714	struct amdgpu_device *adev = psp->adev;
 715
 716	/* XGMI TA unload currently is not supported on Arcturus */
 717	if (adev->asic_type == CHIP_ARCTURUS)
 718		return 0;
 719
 720	/*
 721	 * TODO: bypass the unloading in sriov for now
 722	 */
 723
 724	cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
 725	if (!cmd)
 726		return -ENOMEM;
 727
 728	psp_prep_ta_unload_cmd_buf(cmd, psp->xgmi_context.session_id);
 729
 730	ret = psp_cmd_submit_buf(psp, NULL, cmd,
 731				 psp->fence_buf_mc_addr);
 732
 733	kfree(cmd);
 734
 735	return ret;
 736}
 737
 738int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
 739{
 740	return psp_ta_invoke(psp, ta_cmd_id, psp->xgmi_context.session_id);
 741}
 742
 743int psp_xgmi_terminate(struct psp_context *psp)
 744{
 745	int ret;
 
 746
 747	if (!psp->xgmi_context.initialized)
 
 
 
 748		return 0;
 749
 750	ret = psp_xgmi_unload(psp);
 751	if (ret)
 752		return ret;
 753
 754	psp->xgmi_context.initialized = 0;
 755
 756	/* free xgmi shared memory */
 757	amdgpu_bo_free_kernel(&psp->xgmi_context.xgmi_shared_bo,
 758			&psp->xgmi_context.xgmi_shared_mc_addr,
 759			&psp->xgmi_context.xgmi_shared_buf);
 760
 761	return 0;
 762}
 763
 764int psp_xgmi_initialize(struct psp_context *psp)
 765{
 766	struct ta_xgmi_shared_memory *xgmi_cmd;
 767	int ret;
 768
 769	if (!psp->adev->psp.ta_fw ||
 770	    !psp->adev->psp.ta_xgmi_ucode_size ||
 771	    !psp->adev->psp.ta_xgmi_start_addr)
 772		return -ENOENT;
 773
 774	if (!psp->xgmi_context.initialized) {
 775		ret = psp_xgmi_init_shared_buf(psp);
 
 
 
 
 
 
 776		if (ret)
 777			return ret;
 778	}
 779
 780	/* Load XGMI TA */
 781	ret = psp_xgmi_load(psp);
 782	if (ret)
 
 
 783		return ret;
 784
 
 785	/* Initialize XGMI session */
 786	xgmi_cmd = (struct ta_xgmi_shared_memory *)(psp->xgmi_context.xgmi_shared_buf);
 787	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
 
 788	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__INITIALIZE;
 789
 790	ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
 
 
 791
 792	return ret;
 793}
 794
 795int psp_xgmi_get_hive_id(struct psp_context *psp, uint64_t *hive_id)
 796{
 797	struct ta_xgmi_shared_memory *xgmi_cmd;
 798	int ret;
 799
 800	xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
 801	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
 802
 803	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_HIVE_ID;
 804
 805	/* Invoke xgmi ta to get hive id */
 806	ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
 807	if (ret)
 808		return ret;
 809
 810	*hive_id = xgmi_cmd->xgmi_out_message.get_hive_id.hive_id;
 811
 812	return 0;
 813}
 814
 815int psp_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id)
 816{
 817	struct ta_xgmi_shared_memory *xgmi_cmd;
 818	int ret;
 819
 820	xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
 821	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
 822
 823	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_NODE_ID;
 824
 825	/* Invoke xgmi ta to get the node id */
 826	ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
 827	if (ret)
 828		return ret;
 829
 830	*node_id = xgmi_cmd->xgmi_out_message.get_node_id.node_id;
 831
 832	return 0;
 833}
 834
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 835int psp_xgmi_get_topology_info(struct psp_context *psp,
 836			       int number_devices,
 837			       struct psp_xgmi_topology_info *topology)
 
 838{
 839	struct ta_xgmi_shared_memory *xgmi_cmd;
 840	struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
 841	struct ta_xgmi_cmd_get_topology_info_output *topology_info_output;
 842	int i;
 843	int ret;
 844
 845	if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
 846		return -EINVAL;
 847
 848	xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
 849	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
 
 850
 851	/* Fill in the shared memory with topology information as input */
 852	topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
 853	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_GET_TOPOLOGY_INFO;
 854	topology_info_input->num_nodes = number_devices;
 855
 856	for (i = 0; i < topology_info_input->num_nodes; i++) {
 857		topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
 858		topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
 859		topology_info_input->nodes[i].is_sharing_enabled = topology->nodes[i].is_sharing_enabled;
 860		topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
 861	}
 862
 863	/* Invoke xgmi ta to get the topology information */
 864	ret = psp_xgmi_invoke(psp, TA_COMMAND_XGMI__GET_GET_TOPOLOGY_INFO);
 865	if (ret)
 866		return ret;
 867
 868	/* Read the output topology information from the shared memory */
 869	topology_info_output = &xgmi_cmd->xgmi_out_message.get_topology_info;
 870	topology->num_nodes = xgmi_cmd->xgmi_out_message.get_topology_info.num_nodes;
 871	for (i = 0; i < topology->num_nodes; i++) {
 872		topology->nodes[i].node_id = topology_info_output->nodes[i].node_id;
 873		topology->nodes[i].num_hops = topology_info_output->nodes[i].num_hops;
 874		topology->nodes[i].is_sharing_enabled = topology_info_output->nodes[i].is_sharing_enabled;
 875		topology->nodes[i].sdma_engine = topology_info_output->nodes[i].sdma_engine;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 876	}
 877
 878	return 0;
 879}
 880
 881int psp_xgmi_set_topology_info(struct psp_context *psp,
 882			       int number_devices,
 883			       struct psp_xgmi_topology_info *topology)
 884{
 885	struct ta_xgmi_shared_memory *xgmi_cmd;
 886	struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
 887	int i;
 888
 889	if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
 890		return -EINVAL;
 891
 892	xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
 893	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
 894
 895	topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
 896	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__SET_TOPOLOGY_INFO;
 897	topology_info_input->num_nodes = number_devices;
 898
 899	for (i = 0; i < topology_info_input->num_nodes; i++) {
 900		topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
 901		topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
 902		topology_info_input->nodes[i].is_sharing_enabled = 1;
 903		topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
 904	}
 905
 906	/* Invoke xgmi ta to set topology information */
 907	return psp_xgmi_invoke(psp, TA_COMMAND_XGMI__SET_TOPOLOGY_INFO);
 908}
 909
 910// ras begin
 911static int psp_ras_init_shared_buf(struct psp_context *psp)
 912{
 913	int ret;
 
 914
 915	/*
 916	 * Allocate 16k memory aligned to 4k from Frame Buffer (local
 917	 * physical) for ras ta <-> Driver
 918	 */
 919	ret = amdgpu_bo_create_kernel(psp->adev, PSP_RAS_SHARED_MEM_SIZE,
 920			PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
 921			&psp->ras.ras_shared_bo,
 922			&psp->ras.ras_shared_mc_addr,
 923			&psp->ras.ras_shared_buf);
 924
 925	return ret;
 926}
 927
 928static int psp_ras_load(struct psp_context *psp)
 929{
 930	int ret;
 931	struct psp_gfx_cmd_resp *cmd;
 932
 933	/*
 934	 * TODO: bypass the loading in sriov for now
 935	 */
 936	if (amdgpu_sriov_vf(psp->adev))
 937		return 0;
 938
 939	cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
 940	if (!cmd)
 941		return -ENOMEM;
 942
 943	memset(psp->fw_pri_buf, 0, PSP_1_MEG);
 944	memcpy(psp->fw_pri_buf, psp->ta_ras_start_addr, psp->ta_ras_ucode_size);
 945
 946	psp_prep_ta_load_cmd_buf(cmd,
 947				 psp->fw_pri_mc_addr,
 948				 psp->ta_ras_ucode_size,
 949				 psp->ras.ras_shared_mc_addr,
 950				 PSP_RAS_SHARED_MEM_SIZE);
 951
 952	ret = psp_cmd_submit_buf(psp, NULL, cmd,
 953			psp->fence_buf_mc_addr);
 954
 955	if (!ret) {
 956		psp->ras.ras_initialized = true;
 957		psp->ras.session_id = cmd->resp.session_id;
 958	}
 959
 960	kfree(cmd);
 961
 962	return ret;
 963}
 964
 965static int psp_ras_unload(struct psp_context *psp)
 966{
 967	int ret;
 968	struct psp_gfx_cmd_resp *cmd;
 969
 970	/*
 971	 * TODO: bypass the unloading in sriov for now
 972	 */
 973	if (amdgpu_sriov_vf(psp->adev))
 974		return 0;
 975
 976	cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
 977	if (!cmd)
 978		return -ENOMEM;
 979
 980	psp_prep_ta_unload_cmd_buf(cmd, psp->ras.session_id);
 981
 982	ret = psp_cmd_submit_buf(psp, NULL, cmd,
 983			psp->fence_buf_mc_addr);
 984
 985	kfree(cmd);
 986
 987	return ret;
 988}
 989
 990int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
 991{
 992	struct ta_ras_shared_memory *ras_cmd;
 993	int ret;
 994
 995	ras_cmd = (struct ta_ras_shared_memory *)psp->ras.ras_shared_buf;
 996
 997	/*
 998	 * TODO: bypass the loading in sriov for now
 999	 */
1000	if (amdgpu_sriov_vf(psp->adev))
1001		return 0;
1002
1003	ret = psp_ta_invoke(psp, ta_cmd_id, psp->ras.session_id);
1004
1005	if (amdgpu_ras_intr_triggered())
1006		return ret;
1007
1008	if (ras_cmd->if_version > RAS_TA_HOST_IF_VER)
1009	{
1010		DRM_WARN("RAS: Unsupported Interface");
1011		return -EINVAL;
1012	}
1013
1014	if (!ret) {
1015		if (ras_cmd->ras_out_message.flags.err_inject_switch_disable_flag) {
1016			dev_warn(psp->adev->dev, "ECC switch disabled\n");
1017
1018			ras_cmd->ras_status = TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE;
1019		}
1020		else if (ras_cmd->ras_out_message.flags.reg_access_failure_flag)
1021			dev_warn(psp->adev->dev,
1022				 "RAS internal register access blocked\n");
 
 
1023	}
1024
1025	return ret;
1026}
1027
1028int psp_ras_enable_features(struct psp_context *psp,
1029		union ta_ras_cmd_input *info, bool enable)
1030{
1031	struct ta_ras_shared_memory *ras_cmd;
1032	int ret;
1033
1034	if (!psp->ras.ras_initialized)
1035		return -EINVAL;
1036
1037	ras_cmd = (struct ta_ras_shared_memory *)psp->ras.ras_shared_buf;
1038	memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
1039
1040	if (enable)
1041		ras_cmd->cmd_id = TA_RAS_COMMAND__ENABLE_FEATURES;
1042	else
1043		ras_cmd->cmd_id = TA_RAS_COMMAND__DISABLE_FEATURES;
1044
1045	ras_cmd->ras_in_message = *info;
1046
1047	ret = psp_ras_invoke(psp, ras_cmd->cmd_id);
1048	if (ret)
1049		return -EINVAL;
1050
1051	return ras_cmd->ras_status;
1052}
1053
1054static int psp_ras_terminate(struct psp_context *psp)
1055{
1056	int ret;
1057
1058	/*
1059	 * TODO: bypass the terminate in sriov for now
1060	 */
1061	if (amdgpu_sriov_vf(psp->adev))
1062		return 0;
1063
1064	if (!psp->ras.ras_initialized)
1065		return 0;
1066
1067	ret = psp_ras_unload(psp);
1068	if (ret)
1069		return ret;
1070
1071	psp->ras.ras_initialized = false;
1072
1073	/* free ras shared memory */
1074	amdgpu_bo_free_kernel(&psp->ras.ras_shared_bo,
1075			&psp->ras.ras_shared_mc_addr,
1076			&psp->ras.ras_shared_buf);
1077
1078	return 0;
1079}
1080
1081static int psp_ras_initialize(struct psp_context *psp)
1082{
1083	int ret;
 
 
 
1084
1085	/*
1086	 * TODO: bypass the initialize in sriov for now
1087	 */
1088	if (amdgpu_sriov_vf(psp->adev))
1089		return 0;
1090
1091	if (!psp->adev->psp.ta_ras_ucode_size ||
1092	    !psp->adev->psp.ta_ras_start_addr) {
1093		dev_info(psp->adev->dev, "RAS: optional ras ta ucode is not available\n");
1094		return 0;
1095	}
1096
1097	if (!psp->ras.ras_initialized) {
1098		ret = psp_ras_init_shared_buf(psp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1099		if (ret)
1100			return ret;
1101	}
1102
1103	ret = psp_ras_load(psp);
1104	if (ret)
1105		return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1106
1107	return 0;
1108}
1109
1110int psp_ras_trigger_error(struct psp_context *psp,
1111			  struct ta_ras_trigger_error_input *info)
1112{
1113	struct ta_ras_shared_memory *ras_cmd;
 
1114	int ret;
 
1115
1116	if (!psp->ras.ras_initialized)
1117		return -EINVAL;
1118
1119	ras_cmd = (struct ta_ras_shared_memory *)psp->ras.ras_shared_buf;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1120	memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
1121
1122	ras_cmd->cmd_id = TA_RAS_COMMAND__TRIGGER_ERROR;
1123	ras_cmd->ras_in_message.trigger_error = *info;
1124
1125	ret = psp_ras_invoke(psp, ras_cmd->cmd_id);
1126	if (ret)
1127		return -EINVAL;
1128
1129	/* If err_event_athub occurs error inject was successful, however
1130	   return status from TA is no long reliable */
 
1131	if (amdgpu_ras_intr_triggered())
1132		return 0;
1133
1134	return ras_cmd->ras_status;
 
 
 
 
 
1135}
1136// ras end
1137
1138// HDCP start
1139static int psp_hdcp_init_shared_buf(struct psp_context *psp)
 
1140{
 
1141	int ret;
1142
1143	/*
1144	 * Allocate 16k memory aligned to 4k from Frame Buffer (local
1145	 * physical) for hdcp ta <-> Driver
1146	 */
1147	ret = amdgpu_bo_create_kernel(psp->adev, PSP_HDCP_SHARED_MEM_SIZE,
1148				      PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
1149				      &psp->hdcp_context.hdcp_shared_bo,
1150				      &psp->hdcp_context.hdcp_shared_mc_addr,
1151				      &psp->hdcp_context.hdcp_shared_buf);
 
 
 
 
 
1152
1153	return ret;
1154}
 
1155
1156static int psp_hdcp_load(struct psp_context *psp)
 
1157{
1158	int ret;
1159	struct psp_gfx_cmd_resp *cmd;
1160
1161	/*
1162	 * TODO: bypass the loading in sriov for now
1163	 */
1164	if (amdgpu_sriov_vf(psp->adev))
1165		return 0;
1166
1167	cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
1168	if (!cmd)
1169		return -ENOMEM;
1170
1171	memset(psp->fw_pri_buf, 0, PSP_1_MEG);
1172	memcpy(psp->fw_pri_buf, psp->ta_hdcp_start_addr,
1173	       psp->ta_hdcp_ucode_size);
 
 
1174
1175	psp_prep_ta_load_cmd_buf(cmd,
1176				 psp->fw_pri_mc_addr,
1177				 psp->ta_hdcp_ucode_size,
1178				 psp->hdcp_context.hdcp_shared_mc_addr,
1179				 PSP_HDCP_SHARED_MEM_SIZE);
1180
1181	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
 
 
 
 
1182
 
1183	if (!ret) {
1184		psp->hdcp_context.hdcp_initialized = true;
1185		psp->hdcp_context.session_id = cmd->resp.session_id;
1186		mutex_init(&psp->hdcp_context.mutex);
1187	}
1188
1189	kfree(cmd);
 
 
 
 
 
 
 
 
 
 
 
 
1190
1191	return ret;
1192}
1193static int psp_hdcp_initialize(struct psp_context *psp)
 
1194{
1195	int ret;
1196
1197	/*
1198	 * TODO: bypass the initialize in sriov for now
1199	 */
1200	if (amdgpu_sriov_vf(psp->adev))
1201		return 0;
1202
1203	if (!psp->adev->psp.ta_hdcp_ucode_size ||
1204	    !psp->adev->psp.ta_hdcp_start_addr) {
1205		dev_info(psp->adev->dev, "HDCP: optional hdcp ta ucode is not available\n");
1206		return 0;
1207	}
1208
1209	if (!psp->hdcp_context.hdcp_initialized) {
1210		ret = psp_hdcp_init_shared_buf(psp);
1211		if (ret)
1212			return ret;
1213	}
1214
1215	ret = psp_hdcp_load(psp);
1216	if (ret)
1217		return ret;
1218
1219	return 0;
1220}
 
1221
1222static int psp_hdcp_unload(struct psp_context *psp)
 
1223{
1224	int ret;
1225	struct psp_gfx_cmd_resp *cmd;
1226
1227	/*
1228	 * TODO: bypass the unloading in sriov for now
1229	 */
1230	if (amdgpu_sriov_vf(psp->adev))
1231		return 0;
1232
1233	cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
1234	if (!cmd)
1235		return -ENOMEM;
 
 
 
 
 
 
1236
1237	psp_prep_ta_unload_cmd_buf(cmd, psp->hdcp_context.session_id);
 
1238
1239	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
 
 
 
 
1240
1241	kfree(cmd);
 
 
 
 
1242
1243	return ret;
1244}
1245
1246int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
1247{
1248	/*
1249	 * TODO: bypass the loading in sriov for now
1250	 */
1251	if (amdgpu_sriov_vf(psp->adev))
1252		return 0;
1253
1254	return psp_ta_invoke(psp, ta_cmd_id, psp->hdcp_context.session_id);
 
 
 
1255}
1256
1257static int psp_hdcp_terminate(struct psp_context *psp)
1258{
1259	int ret;
1260
1261	/*
1262	 * TODO: bypass the terminate in sriov for now
1263	 */
1264	if (amdgpu_sriov_vf(psp->adev))
1265		return 0;
1266
1267	if (!psp->hdcp_context.hdcp_initialized)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1268		return 0;
 
 
 
 
1269
1270	ret = psp_hdcp_unload(psp);
1271	if (ret)
 
 
 
 
 
 
 
 
 
1272		return ret;
1273
1274	psp->hdcp_context.hdcp_initialized = false;
 
 
 
 
1275
1276	/* free hdcp shared memory */
1277	amdgpu_bo_free_kernel(&psp->hdcp_context.hdcp_shared_bo,
1278			      &psp->hdcp_context.hdcp_shared_mc_addr,
1279			      &psp->hdcp_context.hdcp_shared_buf);
 
1280
1281	return 0;
1282}
1283// HDCP end
1284
1285// DTM start
1286static int psp_dtm_init_shared_buf(struct psp_context *psp)
1287{
1288	int ret;
1289
1290	/*
1291	 * Allocate 16k memory aligned to 4k from Frame Buffer (local
1292	 * physical) for dtm ta <-> Driver
1293	 */
1294	ret = amdgpu_bo_create_kernel(psp->adev, PSP_DTM_SHARED_MEM_SIZE,
1295				      PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
1296				      &psp->dtm_context.dtm_shared_bo,
1297				      &psp->dtm_context.dtm_shared_mc_addr,
1298				      &psp->dtm_context.dtm_shared_buf);
1299
1300	return ret;
1301}
1302
1303static int psp_dtm_load(struct psp_context *psp)
1304{
1305	int ret;
1306	struct psp_gfx_cmd_resp *cmd;
1307
1308	/*
1309	 * TODO: bypass the loading in sriov for now
1310	 */
1311	if (amdgpu_sriov_vf(psp->adev))
1312		return 0;
1313
1314	cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
1315	if (!cmd)
1316		return -ENOMEM;
 
 
1317
1318	memset(psp->fw_pri_buf, 0, PSP_1_MEG);
1319	memcpy(psp->fw_pri_buf, psp->ta_dtm_start_addr, psp->ta_dtm_ucode_size);
 
1320
1321	psp_prep_ta_load_cmd_buf(cmd,
1322				 psp->fw_pri_mc_addr,
1323				 psp->ta_dtm_ucode_size,
1324				 psp->dtm_context.dtm_shared_mc_addr,
1325				 PSP_DTM_SHARED_MEM_SIZE);
1326
1327	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
 
 
1328
1329	if (!ret) {
1330		psp->dtm_context.dtm_initialized = true;
1331		psp->dtm_context.session_id = cmd->resp.session_id;
1332		mutex_init(&psp->dtm_context.mutex);
1333	}
1334
1335	kfree(cmd);
 
1336
1337	return ret;
1338}
 
1339
1340static int psp_dtm_initialize(struct psp_context *psp)
 
1341{
1342	int ret;
 
1343
1344	/*
1345	 * TODO: bypass the initialize in sriov for now
1346	 */
1347	if (amdgpu_sriov_vf(psp->adev))
1348		return 0;
1349
1350	if (!psp->adev->psp.ta_dtm_ucode_size ||
1351	    !psp->adev->psp.ta_dtm_start_addr) {
1352		dev_info(psp->adev->dev, "DTM: optional dtm ta ucode is not available\n");
 
 
 
 
1353		return 0;
1354	}
1355
1356	if (!psp->dtm_context.dtm_initialized) {
1357		ret = psp_dtm_init_shared_buf(psp);
 
 
 
 
 
1358		if (ret)
1359			return ret;
1360	}
1361
1362	ret = psp_dtm_load(psp);
1363	if (ret)
 
 
 
1364		return ret;
1365
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1366	return 0;
1367}
1368
1369static int psp_dtm_unload(struct psp_context *psp)
1370{
1371	int ret;
1372	struct psp_gfx_cmd_resp *cmd;
1373
1374	/*
1375	 * TODO: bypass the unloading in sriov for now
1376	 */
1377	if (amdgpu_sriov_vf(psp->adev))
1378		return 0;
1379
1380	cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
1381	if (!cmd)
1382		return -ENOMEM;
1383
1384	psp_prep_ta_unload_cmd_buf(cmd, psp->dtm_context.session_id);
1385
1386	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1387
1388	kfree(cmd);
1389
1390	return ret;
1391}
1392
1393int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
1394{
1395	/*
1396	 * TODO: bypass the loading in sriov for now
1397	 */
1398	if (amdgpu_sriov_vf(psp->adev))
1399		return 0;
1400
1401	return psp_ta_invoke(psp, ta_cmd_id, psp->dtm_context.session_id);
1402}
1403
1404static int psp_dtm_terminate(struct psp_context *psp)
1405{
1406	int ret;
1407
1408	/*
1409	 * TODO: bypass the terminate in sriov for now
1410	 */
1411	if (amdgpu_sriov_vf(psp->adev))
1412		return 0;
1413
1414	if (!psp->dtm_context.dtm_initialized)
1415		return 0;
 
1416
1417	ret = psp_dtm_unload(psp);
1418	if (ret)
1419		return ret;
 
1420
1421	psp->dtm_context.dtm_initialized = false;
 
1422
1423	/* free hdcp shared memory */
1424	amdgpu_bo_free_kernel(&psp->dtm_context.dtm_shared_bo,
1425			      &psp->dtm_context.dtm_shared_mc_addr,
1426			      &psp->dtm_context.dtm_shared_buf);
1427
1428	return 0;
 
 
 
 
 
 
 
1429}
1430// DTM end
1431
1432static int psp_hw_start(struct psp_context *psp)
1433{
1434	struct amdgpu_device *adev = psp->adev;
1435	int ret;
1436
1437	if (!amdgpu_sriov_vf(adev)) {
1438		if (psp->kdb_bin_size &&
1439		    (psp->funcs->bootloader_load_kdb != NULL)) {
1440			ret = psp_bootloader_load_kdb(psp);
1441			if (ret) {
1442				DRM_ERROR("PSP load kdb failed!\n");
1443				return ret;
1444			}
1445		}
1446
1447		if (psp->spl_bin_size) {
 
1448			ret = psp_bootloader_load_spl(psp);
1449			if (ret) {
1450				DRM_ERROR("PSP load spl failed!\n");
1451				return ret;
1452			}
1453		}
1454
1455		ret = psp_bootloader_load_sysdrv(psp);
1456		if (ret) {
1457			DRM_ERROR("PSP load sysdrv failed!\n");
1458			return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1459		}
1460
1461		ret = psp_bootloader_load_sos(psp);
1462		if (ret) {
1463			DRM_ERROR("PSP load sos failed!\n");
1464			return ret;
 
 
 
1465		}
1466	}
1467
1468	ret = psp_ring_create(psp, PSP_RING_TYPE__KM);
1469	if (ret) {
1470		DRM_ERROR("PSP create ring failed!\n");
1471		return ret;
1472	}
1473
1474	ret = psp_clear_vf_fw(psp);
1475	if (ret) {
1476		DRM_ERROR("PSP clear vf fw!\n");
1477		return ret;
1478	}
1479
1480	ret = psp_tmr_init(psp);
1481	if (ret) {
1482		DRM_ERROR("PSP tmr init failed!\n");
1483		return ret;
 
 
1484	}
1485
 
1486	/*
1487	 * For ASICs with DF Cstate management centralized
1488	 * to PMFW, TMR setup should be performed after PMFW
1489	 * loaded and before other non-psp firmware loaded.
1490	 */
1491	if (psp->pmfw_centralized_cstate_management) {
1492		ret = psp_load_smu_fw(psp);
1493		if (ret)
1494			return ret;
1495	}
1496
1497	ret = psp_tmr_load(psp);
1498	if (ret) {
1499		DRM_ERROR("PSP load tmr failed!\n");
1500		return ret;
 
 
1501	}
1502
1503	return 0;
1504}
1505
1506static int psp_get_fw_type(struct amdgpu_firmware_info *ucode,
1507			   enum psp_gfx_fw_type *type)
1508{
1509	switch (ucode->ucode_id) {
 
 
 
1510	case AMDGPU_UCODE_ID_SDMA0:
1511		*type = GFX_FW_TYPE_SDMA0;
1512		break;
1513	case AMDGPU_UCODE_ID_SDMA1:
1514		*type = GFX_FW_TYPE_SDMA1;
1515		break;
1516	case AMDGPU_UCODE_ID_SDMA2:
1517		*type = GFX_FW_TYPE_SDMA2;
1518		break;
1519	case AMDGPU_UCODE_ID_SDMA3:
1520		*type = GFX_FW_TYPE_SDMA3;
1521		break;
1522	case AMDGPU_UCODE_ID_SDMA4:
1523		*type = GFX_FW_TYPE_SDMA4;
1524		break;
1525	case AMDGPU_UCODE_ID_SDMA5:
1526		*type = GFX_FW_TYPE_SDMA5;
1527		break;
1528	case AMDGPU_UCODE_ID_SDMA6:
1529		*type = GFX_FW_TYPE_SDMA6;
1530		break;
1531	case AMDGPU_UCODE_ID_SDMA7:
1532		*type = GFX_FW_TYPE_SDMA7;
1533		break;
1534	case AMDGPU_UCODE_ID_CP_MES:
1535		*type = GFX_FW_TYPE_CP_MES;
1536		break;
1537	case AMDGPU_UCODE_ID_CP_MES_DATA:
1538		*type = GFX_FW_TYPE_MES_STACK;
1539		break;
 
 
 
 
 
 
1540	case AMDGPU_UCODE_ID_CP_CE:
1541		*type = GFX_FW_TYPE_CP_CE;
1542		break;
1543	case AMDGPU_UCODE_ID_CP_PFP:
1544		*type = GFX_FW_TYPE_CP_PFP;
1545		break;
1546	case AMDGPU_UCODE_ID_CP_ME:
1547		*type = GFX_FW_TYPE_CP_ME;
1548		break;
1549	case AMDGPU_UCODE_ID_CP_MEC1:
1550		*type = GFX_FW_TYPE_CP_MEC;
1551		break;
1552	case AMDGPU_UCODE_ID_CP_MEC1_JT:
1553		*type = GFX_FW_TYPE_CP_MEC_ME1;
1554		break;
1555	case AMDGPU_UCODE_ID_CP_MEC2:
1556		*type = GFX_FW_TYPE_CP_MEC;
1557		break;
1558	case AMDGPU_UCODE_ID_CP_MEC2_JT:
1559		*type = GFX_FW_TYPE_CP_MEC_ME2;
1560		break;
 
 
 
 
 
 
1561	case AMDGPU_UCODE_ID_RLC_G:
1562		*type = GFX_FW_TYPE_RLC_G;
1563		break;
1564	case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL:
1565		*type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_CNTL;
1566		break;
1567	case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM:
1568		*type = GFX_FW_TYPE_RLC_RESTORE_LIST_GPM_MEM;
1569		break;
1570	case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM:
1571		*type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_MEM;
1572		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1573	case AMDGPU_UCODE_ID_SMC:
1574		*type = GFX_FW_TYPE_SMU;
1575		break;
 
 
 
1576	case AMDGPU_UCODE_ID_UVD:
1577		*type = GFX_FW_TYPE_UVD;
1578		break;
1579	case AMDGPU_UCODE_ID_UVD1:
1580		*type = GFX_FW_TYPE_UVD1;
1581		break;
1582	case AMDGPU_UCODE_ID_VCE:
1583		*type = GFX_FW_TYPE_VCE;
1584		break;
1585	case AMDGPU_UCODE_ID_VCN:
1586		*type = GFX_FW_TYPE_VCN;
1587		break;
1588	case AMDGPU_UCODE_ID_VCN1:
1589		*type = GFX_FW_TYPE_VCN1;
1590		break;
1591	case AMDGPU_UCODE_ID_DMCU_ERAM:
1592		*type = GFX_FW_TYPE_DMCU_ERAM;
1593		break;
1594	case AMDGPU_UCODE_ID_DMCU_INTV:
1595		*type = GFX_FW_TYPE_DMCU_ISR;
1596		break;
1597	case AMDGPU_UCODE_ID_VCN0_RAM:
1598		*type = GFX_FW_TYPE_VCN0_RAM;
1599		break;
1600	case AMDGPU_UCODE_ID_VCN1_RAM:
1601		*type = GFX_FW_TYPE_VCN1_RAM;
1602		break;
1603	case AMDGPU_UCODE_ID_DMCUB:
1604		*type = GFX_FW_TYPE_DMUB;
1605		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1606	case AMDGPU_UCODE_ID_MAXIMUM:
1607	default:
1608		return -EINVAL;
1609	}
1610
1611	return 0;
1612}
1613
1614static void psp_print_fw_hdr(struct psp_context *psp,
1615			     struct amdgpu_firmware_info *ucode)
1616{
1617	struct amdgpu_device *adev = psp->adev;
1618	struct common_firmware_header *hdr;
1619
1620	switch (ucode->ucode_id) {
1621	case AMDGPU_UCODE_ID_SDMA0:
1622	case AMDGPU_UCODE_ID_SDMA1:
1623	case AMDGPU_UCODE_ID_SDMA2:
1624	case AMDGPU_UCODE_ID_SDMA3:
1625	case AMDGPU_UCODE_ID_SDMA4:
1626	case AMDGPU_UCODE_ID_SDMA5:
1627	case AMDGPU_UCODE_ID_SDMA6:
1628	case AMDGPU_UCODE_ID_SDMA7:
1629		hdr = (struct common_firmware_header *)
1630			adev->sdma.instance[ucode->ucode_id - AMDGPU_UCODE_ID_SDMA0].fw->data;
1631		amdgpu_ucode_print_sdma_hdr(hdr);
1632		break;
1633	case AMDGPU_UCODE_ID_CP_CE:
1634		hdr = (struct common_firmware_header *)adev->gfx.ce_fw->data;
1635		amdgpu_ucode_print_gfx_hdr(hdr);
1636		break;
1637	case AMDGPU_UCODE_ID_CP_PFP:
1638		hdr = (struct common_firmware_header *)adev->gfx.pfp_fw->data;
1639		amdgpu_ucode_print_gfx_hdr(hdr);
1640		break;
1641	case AMDGPU_UCODE_ID_CP_ME:
1642		hdr = (struct common_firmware_header *)adev->gfx.me_fw->data;
1643		amdgpu_ucode_print_gfx_hdr(hdr);
1644		break;
1645	case AMDGPU_UCODE_ID_CP_MEC1:
1646		hdr = (struct common_firmware_header *)adev->gfx.mec_fw->data;
1647		amdgpu_ucode_print_gfx_hdr(hdr);
1648		break;
1649	case AMDGPU_UCODE_ID_RLC_G:
1650		hdr = (struct common_firmware_header *)adev->gfx.rlc_fw->data;
1651		amdgpu_ucode_print_rlc_hdr(hdr);
1652		break;
1653	case AMDGPU_UCODE_ID_SMC:
1654		hdr = (struct common_firmware_header *)adev->pm.fw->data;
1655		amdgpu_ucode_print_smc_hdr(hdr);
1656		break;
1657	default:
1658		break;
1659	}
1660}
1661
1662static int psp_prep_load_ip_fw_cmd_buf(struct amdgpu_firmware_info *ucode,
 
1663				       struct psp_gfx_cmd_resp *cmd)
1664{
1665	int ret;
1666	uint64_t fw_mem_mc_addr = ucode->mc_addr;
1667
1668	memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp));
1669
1670	cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW;
1671	cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(fw_mem_mc_addr);
1672	cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(fw_mem_mc_addr);
1673	cmd->cmd.cmd_load_ip_fw.fw_size = ucode->ucode_size;
1674
1675	ret = psp_get_fw_type(ucode, &cmd->cmd.cmd_load_ip_fw.fw_type);
1676	if (ret)
1677		DRM_ERROR("Unknown firmware type\n");
1678
1679	return ret;
1680}
1681
1682static int psp_execute_np_fw_load(struct psp_context *psp,
1683			          struct amdgpu_firmware_info *ucode)
1684{
1685	int ret = 0;
 
1686
1687	ret = psp_prep_load_ip_fw_cmd_buf(ucode, psp->cmd);
1688	if (ret)
1689		return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1690
1691	ret = psp_cmd_submit_buf(psp, ucode, psp->cmd,
1692				 psp->fence_buf_mc_addr);
1693
1694	return ret;
1695}
1696
1697static int psp_load_smu_fw(struct psp_context *psp)
1698{
1699	int ret;
1700	struct amdgpu_device* adev = psp->adev;
1701	struct amdgpu_firmware_info *ucode =
1702			&adev->firmware.ucode[AMDGPU_UCODE_ID_SMC];
1703	struct amdgpu_ras *ras = psp->ras.ras;
 
 
 
 
 
 
 
1704
1705	if (!ucode->fw || amdgpu_sriov_vf(psp->adev))
1706		return 0;
1707
1708
1709	if (adev->in_gpu_reset && ras && ras->supported) {
 
1710		ret = amdgpu_dpm_set_mp1_state(adev, PP_MP1_STATE_UNLOAD);
1711		if (ret) {
1712			DRM_WARN("Failed to set MP1 state prepare for reload\n");
1713		}
1714	}
1715
1716	ret = psp_execute_np_fw_load(psp, ucode);
1717
1718	if (ret)
1719		DRM_ERROR("PSP load smu failed!\n");
1720
1721	return ret;
1722}
1723
1724static bool fw_load_skip_check(struct psp_context *psp,
1725			       struct amdgpu_firmware_info *ucode)
1726{
1727	if (!ucode->fw)
 
 
 
1728		return true;
1729
1730	if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC &&
1731	    (psp_smu_reload_quirk(psp) ||
1732	     psp->autoload_supported ||
1733	     psp->pmfw_centralized_cstate_management))
1734		return true;
1735
1736	if (amdgpu_sriov_vf(psp->adev) &&
1737	   (ucode->ucode_id == AMDGPU_UCODE_ID_SDMA0
1738	    || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA1
1739	    || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA2
1740	    || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA3
1741	    || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA4
1742	    || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA5
1743	    || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA6
1744	    || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA7
1745	    || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_G
1746	    || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL
1747	    || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM
1748	    || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM
1749	    || ucode->ucode_id == AMDGPU_UCODE_ID_SMC))
1750		/*skip ucode loading in SRIOV VF */
1751		return true;
1752
1753	if (psp->autoload_supported &&
1754	    (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC1_JT ||
1755	     ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT))
1756		/* skip mec JT when autoload is enabled */
1757		return true;
1758
1759	return false;
1760}
1761
1762static int psp_np_fw_load(struct psp_context *psp)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1763{
1764	int i, ret;
1765	struct amdgpu_firmware_info *ucode;
1766	struct amdgpu_device* adev = psp->adev;
1767
1768	if (psp->autoload_supported &&
1769	    !psp->pmfw_centralized_cstate_management) {
1770		ret = psp_load_smu_fw(psp);
1771		if (ret)
1772			return ret;
1773	}
1774
 
 
 
1775	for (i = 0; i < adev->firmware.max_ucodes; i++) {
1776		ucode = &adev->firmware.ucode[i];
1777
1778		if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC &&
1779		    !fw_load_skip_check(psp, ucode)) {
1780			ret = psp_load_smu_fw(psp);
1781			if (ret)
1782				return ret;
1783			continue;
1784		}
1785
1786		if (fw_load_skip_check(psp, ucode))
1787			continue;
1788
1789		if (psp->autoload_supported &&
1790		    (adev->asic_type == CHIP_SIENNA_CICHLID ||
1791		     adev->asic_type == CHIP_NAVY_FLOUNDER) &&
 
 
 
 
1792		    (ucode->ucode_id == AMDGPU_UCODE_ID_SDMA1 ||
1793		     ucode->ucode_id == AMDGPU_UCODE_ID_SDMA2 ||
1794		     ucode->ucode_id == AMDGPU_UCODE_ID_SDMA3))
1795			/* PSP only receive one SDMA fw for sienna_cichlid,
1796			 * as all four sdma fw are same */
 
1797			continue;
1798
1799		psp_print_fw_hdr(psp, ucode);
1800
1801		ret = psp_execute_np_fw_load(psp, ucode);
1802		if (ret)
1803			return ret;
1804
1805		/* Start rlc autoload after psp recieved all the gfx firmware */
1806		if (psp->autoload_supported && ucode->ucode_id == (amdgpu_sriov_vf(adev) ?
1807		    AMDGPU_UCODE_ID_CP_MEC2 : AMDGPU_UCODE_ID_RLC_G)) {
1808			ret = psp_rlc_autoload_start(psp);
1809			if (ret) {
1810				DRM_ERROR("Failed to start rlc autoload\n");
1811				return ret;
1812			}
1813		}
1814	}
1815
1816	return 0;
1817}
1818
1819static int psp_load_fw(struct amdgpu_device *adev)
1820{
1821	int ret;
1822	struct psp_context *psp = &adev->psp;
1823
1824	if (amdgpu_sriov_vf(adev) && adev->in_gpu_reset) {
1825		psp_ring_stop(psp, PSP_RING_TYPE__KM); /* should not destroy ring, only stop */
1826		goto skip_memalloc;
 
 
 
 
 
 
 
 
1827	}
1828
1829	psp->cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
1830	if (!psp->cmd)
1831		return -ENOMEM;
1832
1833	ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG,
1834					AMDGPU_GEM_DOMAIN_GTT,
1835					&psp->fw_pri_bo,
1836					&psp->fw_pri_mc_addr,
1837					&psp->fw_pri_buf);
1838	if (ret)
1839		goto failed;
1840
1841	ret = amdgpu_bo_create_kernel(adev, PSP_FENCE_BUFFER_SIZE, PAGE_SIZE,
1842					AMDGPU_GEM_DOMAIN_VRAM,
1843					&psp->fence_buf_bo,
1844					&psp->fence_buf_mc_addr,
1845					&psp->fence_buf);
1846	if (ret)
1847		goto failed;
1848
1849	ret = amdgpu_bo_create_kernel(adev, PSP_CMD_BUFFER_SIZE, PAGE_SIZE,
1850				      AMDGPU_GEM_DOMAIN_VRAM,
1851				      &psp->cmd_buf_bo, &psp->cmd_buf_mc_addr,
1852				      (void **)&psp->cmd_buf_mem);
1853	if (ret)
1854		goto failed;
1855
1856	memset(psp->fence_buf, 0, PSP_FENCE_BUFFER_SIZE);
1857
1858	ret = psp_ring_init(psp, PSP_RING_TYPE__KM);
1859	if (ret) {
1860		DRM_ERROR("PSP ring init failed!\n");
1861		goto failed;
1862	}
1863
1864skip_memalloc:
1865	ret = psp_hw_start(psp);
1866	if (ret)
1867		goto failed;
1868
1869	ret = psp_np_fw_load(psp);
1870	if (ret)
1871		goto failed;
1872
1873	ret = psp_asd_load(psp);
1874	if (ret) {
1875		DRM_ERROR("PSP load asd failed!\n");
1876		return ret;
1877	}
1878
1879	if (psp->adev->psp.ta_fw) {
1880		ret = psp_ras_initialize(psp);
1881		if (ret)
1882			dev_err(psp->adev->dev,
1883					"RAS: Failed to initialize RAS\n");
1884
1885		ret = psp_hdcp_initialize(psp);
1886		if (ret)
1887			dev_err(psp->adev->dev,
1888				"HDCP: Failed to initialize HDCP\n");
1889
1890		ret = psp_dtm_initialize(psp);
1891		if (ret)
1892			dev_err(psp->adev->dev,
1893				"DTM: Failed to initialize DTM\n");
 
 
 
 
 
 
 
 
 
 
1894	}
1895
1896	return 0;
1897
 
 
1898failed:
1899	/*
1900	 * all cleanup jobs (xgmi terminate, ras terminate,
1901	 * ring destroy, cmd/fence/fw buffers destory,
1902	 * psp->cmd destory) are delayed to psp_hw_fini
1903	 */
 
1904	return ret;
1905}
1906
1907static int psp_hw_init(void *handle)
1908{
1909	int ret;
1910	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1911
1912	mutex_lock(&adev->firmware.mutex);
1913	/*
1914	 * This sequence is just used on hw_init only once, no need on
1915	 * resume.
1916	 */
1917	ret = amdgpu_ucode_init_bo(adev);
1918	if (ret)
1919		goto failed;
1920
1921	ret = psp_load_fw(adev);
1922	if (ret) {
1923		DRM_ERROR("PSP firmware loading failed\n");
1924		goto failed;
1925	}
1926
1927	mutex_unlock(&adev->firmware.mutex);
1928	return 0;
1929
1930failed:
1931	adev->firmware.load_type = AMDGPU_FW_LOAD_DIRECT;
1932	mutex_unlock(&adev->firmware.mutex);
1933	return -EINVAL;
1934}
1935
1936static int psp_hw_fini(void *handle)
1937{
1938	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1939	struct psp_context *psp = &adev->psp;
1940	int ret;
1941
1942	if (psp->adev->psp.ta_fw) {
1943		psp_ras_terminate(psp);
 
 
1944		psp_dtm_terminate(psp);
1945		psp_hdcp_terminate(psp);
1946	}
1947
1948	psp_asd_unload(psp);
1949	ret = psp_clear_vf_fw(psp);
1950	if (ret) {
1951		DRM_ERROR("PSP clear vf fw!\n");
1952		return ret;
1953	}
1954
 
1955	psp_tmr_terminate(psp);
 
1956	psp_ring_destroy(psp, PSP_RING_TYPE__KM);
1957
1958	amdgpu_bo_free_kernel(&psp->fw_pri_bo,
1959			      &psp->fw_pri_mc_addr, &psp->fw_pri_buf);
1960	amdgpu_bo_free_kernel(&psp->fence_buf_bo,
1961			      &psp->fence_buf_mc_addr, &psp->fence_buf);
1962	amdgpu_bo_free_kernel(&psp->cmd_buf_bo, &psp->cmd_buf_mc_addr,
1963			      (void **)&psp->cmd_buf_mem);
1964
1965	kfree(psp->cmd);
1966	psp->cmd = NULL;
1967
1968	return 0;
1969}
1970
1971static int psp_suspend(void *handle)
1972{
1973	int ret;
1974	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1975	struct psp_context *psp = &adev->psp;
1976
1977	if (adev->gmc.xgmi.num_physical_nodes > 1 &&
1978	    psp->xgmi_context.initialized == 1) {
1979		ret = psp_xgmi_terminate(psp);
1980		if (ret) {
1981			DRM_ERROR("Failed to terminate xgmi ta\n");
1982			return ret;
1983		}
1984	}
1985
1986	if (psp->adev->psp.ta_fw) {
1987		ret = psp_ras_terminate(psp);
1988		if (ret) {
1989			DRM_ERROR("Failed to terminate ras ta\n");
1990			return ret;
1991		}
1992		ret = psp_hdcp_terminate(psp);
1993		if (ret) {
1994			DRM_ERROR("Failed to terminate hdcp ta\n");
1995			return ret;
1996		}
1997		ret = psp_dtm_terminate(psp);
1998		if (ret) {
1999			DRM_ERROR("Failed to terminate dtm ta\n");
2000			return ret;
 
 
 
 
 
 
 
 
 
 
2001		}
2002	}
2003
2004	ret = psp_asd_unload(psp);
2005	if (ret) {
2006		DRM_ERROR("Failed to unload asd\n");
2007		return ret;
2008	}
2009
2010	ret = psp_tmr_terminate(psp);
2011	if (ret) {
2012		DRM_ERROR("Failed to terminate tmr\n");
2013		return ret;
2014	}
2015
2016	ret = psp_ring_stop(psp, PSP_RING_TYPE__KM);
2017	if (ret) {
2018		DRM_ERROR("PSP ring stop failed\n");
2019		return ret;
2020	}
2021
2022	return 0;
 
2023}
2024
2025static int psp_resume(void *handle)
2026{
2027	int ret;
2028	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2029	struct psp_context *psp = &adev->psp;
2030
2031	DRM_INFO("PSP is resuming...\n");
2032
2033	ret = psp_mem_training(psp, PSP_MEM_TRAIN_RESUME);
2034	if (ret) {
2035		DRM_ERROR("Failed to process memory training!\n");
2036		return ret;
 
 
2037	}
2038
2039	mutex_lock(&adev->firmware.mutex);
2040
2041	ret = psp_hw_start(psp);
2042	if (ret)
2043		goto failed;
2044
2045	ret = psp_np_fw_load(psp);
2046	if (ret)
2047		goto failed;
2048
2049	ret = psp_asd_load(psp);
2050	if (ret) {
2051		DRM_ERROR("PSP load asd failed!\n");
 
 
 
 
 
 
2052		goto failed;
2053	}
2054
2055	if (adev->gmc.xgmi.num_physical_nodes > 1) {
2056		ret = psp_xgmi_initialize(psp);
2057		/* Warning the XGMI seesion initialize failure
2058		 * Instead of stop driver initialization
2059		 */
2060		if (ret)
2061			dev_err(psp->adev->dev,
2062				"XGMI: Failed to initialize XGMI session\n");
2063	}
2064
2065	if (psp->adev->psp.ta_fw) {
2066		ret = psp_ras_initialize(psp);
2067		if (ret)
2068			dev_err(psp->adev->dev,
2069					"RAS: Failed to initialize RAS\n");
2070
2071		ret = psp_hdcp_initialize(psp);
2072		if (ret)
2073			dev_err(psp->adev->dev,
2074				"HDCP: Failed to initialize HDCP\n");
2075
2076		ret = psp_dtm_initialize(psp);
2077		if (ret)
2078			dev_err(psp->adev->dev,
2079				"DTM: Failed to initialize DTM\n");
 
 
 
 
 
 
 
 
 
 
2080	}
2081
2082	mutex_unlock(&adev->firmware.mutex);
2083
2084	return 0;
2085
2086failed:
2087	DRM_ERROR("PSP resume failed\n");
2088	mutex_unlock(&adev->firmware.mutex);
2089	return ret;
2090}
2091
2092int psp_gpu_reset(struct amdgpu_device *adev)
2093{
2094	int ret;
2095
2096	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
2097		return 0;
2098
2099	mutex_lock(&adev->psp.mutex);
2100	ret = psp_mode1_reset(&adev->psp);
2101	mutex_unlock(&adev->psp.mutex);
2102
2103	return ret;
2104}
2105
2106int psp_rlc_autoload_start(struct psp_context *psp)
2107{
2108	int ret;
2109	struct psp_gfx_cmd_resp *cmd;
2110
2111	cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
2112	if (!cmd)
2113		return -ENOMEM;
2114
2115	cmd->cmd_id = GFX_CMD_ID_AUTOLOAD_RLC;
2116
2117	ret = psp_cmd_submit_buf(psp, NULL, cmd,
2118				 psp->fence_buf_mc_addr);
2119	kfree(cmd);
2120	return ret;
2121}
2122
2123int psp_update_vcn_sram(struct amdgpu_device *adev, int inst_idx,
2124			uint64_t cmd_gpu_addr, int cmd_size)
2125{
2126	struct amdgpu_firmware_info ucode = {0};
2127
2128	ucode.ucode_id = inst_idx ? AMDGPU_UCODE_ID_VCN1_RAM :
2129		AMDGPU_UCODE_ID_VCN0_RAM;
2130	ucode.mc_addr = cmd_gpu_addr;
2131	ucode.ucode_size = cmd_size;
2132
2133	return psp_execute_np_fw_load(&adev->psp, &ucode);
2134}
2135
2136int psp_ring_cmd_submit(struct psp_context *psp,
2137			uint64_t cmd_buf_mc_addr,
2138			uint64_t fence_mc_addr,
2139			int index)
2140{
2141	unsigned int psp_write_ptr_reg = 0;
2142	struct psp_gfx_rb_frame *write_frame;
2143	struct psp_ring *ring = &psp->km_ring;
2144	struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem;
2145	struct psp_gfx_rb_frame *ring_buffer_end = ring_buffer_start +
2146		ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1;
2147	struct amdgpu_device *adev = psp->adev;
2148	uint32_t ring_size_dw = ring->ring_size / 4;
2149	uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4;
2150
2151	/* KM (GPCOM) prepare write pointer */
2152	psp_write_ptr_reg = psp_ring_get_wptr(psp);
2153
2154	/* Update KM RB frame pointer to new frame */
2155	/* write_frame ptr increments by size of rb_frame in bytes */
2156	/* psp_write_ptr_reg increments by size of rb_frame in DWORDs */
2157	if ((psp_write_ptr_reg % ring_size_dw) == 0)
2158		write_frame = ring_buffer_start;
2159	else
2160		write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw);
2161	/* Check invalid write_frame ptr address */
2162	if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) {
2163		DRM_ERROR("ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n",
2164			  ring_buffer_start, ring_buffer_end, write_frame);
2165		DRM_ERROR("write_frame is pointing to address out of bounds\n");
 
 
2166		return -EINVAL;
2167	}
2168
2169	/* Initialize KM RB frame */
2170	memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame));
2171
2172	/* Update KM RB frame */
2173	write_frame->cmd_buf_addr_hi = upper_32_bits(cmd_buf_mc_addr);
2174	write_frame->cmd_buf_addr_lo = lower_32_bits(cmd_buf_mc_addr);
2175	write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr);
2176	write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr);
2177	write_frame->fence_value = index;
2178	amdgpu_asic_flush_hdp(adev, NULL);
2179
2180	/* Update the write Pointer in DWORDs */
2181	psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw;
2182	psp_ring_set_wptr(psp, psp_write_ptr_reg);
2183	return 0;
2184}
2185
2186int psp_init_asd_microcode(struct psp_context *psp,
2187			   const char *chip_name)
2188{
2189	struct amdgpu_device *adev = psp->adev;
2190	char fw_name[30];
2191	const struct psp_firmware_header_v1_0 *asd_hdr;
2192	int err = 0;
2193
2194	if (!chip_name) {
2195		dev_err(adev->dev, "invalid chip name for asd microcode\n");
2196		return -EINVAL;
2197	}
2198
2199	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_asd.bin", chip_name);
2200	err = request_firmware(&adev->psp.asd_fw, fw_name, adev->dev);
2201	if (err)
2202		goto out;
2203
2204	err = amdgpu_ucode_validate(adev->psp.asd_fw);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2205	if (err)
2206		goto out;
2207
2208	asd_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data;
2209	adev->psp.asd_fw_version = le32_to_cpu(asd_hdr->header.ucode_version);
2210	adev->psp.asd_feature_version = le32_to_cpu(asd_hdr->ucode_feature_version);
2211	adev->psp.asd_ucode_size = le32_to_cpu(asd_hdr->header.ucode_size_bytes);
2212	adev->psp.asd_start_addr = (uint8_t *)asd_hdr +
2213				le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes);
2214	return 0;
2215out:
2216	dev_err(adev->dev, "fail to initialize asd microcode\n");
2217	release_firmware(adev->psp.asd_fw);
2218	adev->psp.asd_fw = NULL;
2219	return err;
2220}
2221
2222int psp_init_sos_microcode(struct psp_context *psp,
2223			   const char *chip_name)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2224{
2225	struct amdgpu_device *adev = psp->adev;
2226	char fw_name[30];
2227	const struct psp_firmware_header_v1_0 *sos_hdr;
2228	const struct psp_firmware_header_v1_1 *sos_hdr_v1_1;
2229	const struct psp_firmware_header_v1_2 *sos_hdr_v1_2;
2230	const struct psp_firmware_header_v1_3 *sos_hdr_v1_3;
 
2231	int err = 0;
2232
2233	if (!chip_name) {
2234		dev_err(adev->dev, "invalid chip name for sos microcode\n");
2235		return -EINVAL;
2236	}
2237
2238	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sos.bin", chip_name);
2239	err = request_firmware(&adev->psp.sos_fw, fw_name, adev->dev);
2240	if (err)
2241		goto out;
2242
2243	err = amdgpu_ucode_validate(adev->psp.sos_fw);
2244	if (err)
2245		goto out;
2246
2247	sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data;
 
 
2248	amdgpu_ucode_print_psp_hdr(&sos_hdr->header);
2249
2250	switch (sos_hdr->header.header_version_major) {
2251	case 1:
2252		adev->psp.sos_fw_version = le32_to_cpu(sos_hdr->header.ucode_version);
2253		adev->psp.sos_feature_version = le32_to_cpu(sos_hdr->ucode_feature_version);
2254		adev->psp.sos_bin_size = le32_to_cpu(sos_hdr->sos_size_bytes);
2255		adev->psp.sys_bin_size = le32_to_cpu(sos_hdr->sos_offset_bytes);
2256		adev->psp.sys_start_addr = (uint8_t *)sos_hdr +
2257				le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
2258		adev->psp.sos_start_addr = (uint8_t *)adev->psp.sys_start_addr +
2259				le32_to_cpu(sos_hdr->sos_offset_bytes);
2260		if (sos_hdr->header.header_version_minor == 1) {
2261			sos_hdr_v1_1 = (const struct psp_firmware_header_v1_1 *)adev->psp.sos_fw->data;
2262			adev->psp.toc_bin_size = le32_to_cpu(sos_hdr_v1_1->toc_size_bytes);
2263			adev->psp.toc_start_addr = (uint8_t *)adev->psp.sys_start_addr +
2264					le32_to_cpu(sos_hdr_v1_1->toc_offset_bytes);
2265			adev->psp.kdb_bin_size = le32_to_cpu(sos_hdr_v1_1->kdb_size_bytes);
2266			adev->psp.kdb_start_addr = (uint8_t *)adev->psp.sys_start_addr +
2267					le32_to_cpu(sos_hdr_v1_1->kdb_offset_bytes);
2268		}
2269		if (sos_hdr->header.header_version_minor == 2) {
2270			sos_hdr_v1_2 = (const struct psp_firmware_header_v1_2 *)adev->psp.sos_fw->data;
2271			adev->psp.kdb_bin_size = le32_to_cpu(sos_hdr_v1_2->kdb_size_bytes);
2272			adev->psp.kdb_start_addr = (uint8_t *)adev->psp.sys_start_addr +
2273						    le32_to_cpu(sos_hdr_v1_2->kdb_offset_bytes);
2274		}
2275		if (sos_hdr->header.header_version_minor == 3) {
2276			sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data;
2277			adev->psp.toc_bin_size = le32_to_cpu(sos_hdr_v1_3->v1_1.toc_size_bytes);
2278			adev->psp.toc_start_addr = (uint8_t *)adev->psp.sys_start_addr +
2279				le32_to_cpu(sos_hdr_v1_3->v1_1.toc_offset_bytes);
2280			adev->psp.kdb_bin_size = le32_to_cpu(sos_hdr_v1_3->v1_1.kdb_size_bytes);
2281			adev->psp.kdb_start_addr = (uint8_t *)adev->psp.sys_start_addr +
2282				le32_to_cpu(sos_hdr_v1_3->v1_1.kdb_offset_bytes);
2283			adev->psp.spl_bin_size = le32_to_cpu(sos_hdr_v1_3->spl_size_bytes);
2284			adev->psp.spl_start_addr = (uint8_t *)adev->psp.sys_start_addr +
2285				le32_to_cpu(sos_hdr_v1_3->spl_offset_bytes);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2286		}
2287		break;
2288	default:
2289		dev_err(adev->dev,
2290			"unsupported psp sos firmware\n");
2291		err = -EINVAL;
2292		goto out;
2293	}
2294
2295	return 0;
2296out:
2297	dev_err(adev->dev,
2298		"failed to init sos firmware\n");
2299	release_firmware(adev->psp.sos_fw);
2300	adev->psp.sos_fw = NULL;
2301
2302	return err;
2303}
2304
2305int parse_ta_bin_descriptor(struct psp_context *psp,
2306			    const struct ta_fw_bin_desc *desc,
2307			    const struct ta_firmware_header_v2_0 *ta_hdr)
2308{
2309	uint8_t *ucode_start_addr  = NULL;
2310
2311	if (!psp || !desc || !ta_hdr)
2312		return -EINVAL;
2313
2314	ucode_start_addr  = (uint8_t *)ta_hdr +
2315			    le32_to_cpu(desc->offset_bytes) +
2316			    le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
2317
2318	switch (desc->fw_type) {
2319	case TA_FW_TYPE_PSP_ASD:
2320		psp->asd_fw_version 	   = le32_to_cpu(desc->fw_version);
2321		psp->asd_feature_version   = le32_to_cpu(desc->fw_version);
2322		psp->asd_ucode_size 	   = le32_to_cpu(desc->size_bytes);
2323		psp->asd_start_addr 	   = ucode_start_addr;
2324		break;
2325	case TA_FW_TYPE_PSP_XGMI:
2326		psp->ta_xgmi_ucode_version = le32_to_cpu(desc->fw_version);
2327		psp->ta_xgmi_ucode_size    = le32_to_cpu(desc->size_bytes);
2328		psp->ta_xgmi_start_addr    = ucode_start_addr;
2329		break;
2330	case TA_FW_TYPE_PSP_RAS:
2331		psp->ta_ras_ucode_version  = le32_to_cpu(desc->fw_version);
2332		psp->ta_ras_ucode_size     = le32_to_cpu(desc->size_bytes);
2333		psp->ta_ras_start_addr     = ucode_start_addr;
2334		break;
2335	case TA_FW_TYPE_PSP_HDCP:
2336		psp->ta_hdcp_ucode_version = le32_to_cpu(desc->fw_version);
2337		psp->ta_hdcp_ucode_size    = le32_to_cpu(desc->size_bytes);
2338		psp->ta_hdcp_start_addr    = ucode_start_addr;
2339		break;
2340	case TA_FW_TYPE_PSP_DTM:
2341		psp->ta_dtm_ucode_version  = le32_to_cpu(desc->fw_version);
2342		psp->ta_dtm_ucode_size     = le32_to_cpu(desc->size_bytes);
2343		psp->ta_dtm_start_addr     = ucode_start_addr;
 
 
 
 
 
 
 
 
 
 
 
 
 
2344		break;
2345	default:
2346		dev_warn(psp->adev->dev, "Unsupported TA type: %d\n", desc->fw_type);
2347		break;
2348	}
2349
2350	return 0;
2351}
2352
2353int psp_init_ta_microcode(struct psp_context *psp,
2354			  const char *chip_name)
2355{
 
2356	struct amdgpu_device *adev = psp->adev;
2357	char fw_name[30];
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2358	const struct ta_firmware_header_v2_0 *ta_hdr;
 
2359	int err = 0;
2360	int ta_index = 0;
2361
2362	if (!chip_name) {
2363		dev_err(adev->dev, "invalid chip name for ta microcode\n");
 
 
 
 
 
2364		return -EINVAL;
2365	}
2366
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2367	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name);
2368	err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev);
2369	if (err)
2370		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
2371
2372	err = amdgpu_ucode_validate(adev->psp.ta_fw);
2373	if (err)
2374		goto out;
2375
2376	ta_hdr = (const struct ta_firmware_header_v2_0 *)adev->psp.ta_fw->data;
 
2377
2378	if (le16_to_cpu(ta_hdr->header.header_version_major) != 2) {
2379		dev_err(adev->dev, "unsupported TA header version\n");
2380		err = -EINVAL;
2381		goto out;
2382	}
 
 
2383
2384	if (le32_to_cpu(ta_hdr->ta_fw_bin_count) >= UCODE_MAX_TA_PACKAGING) {
2385		dev_err(adev->dev, "packed TA count exceeds maximum limit\n");
2386		err = -EINVAL;
2387		goto out;
2388	}
2389
2390	for (ta_index = 0; ta_index < le32_to_cpu(ta_hdr->ta_fw_bin_count); ta_index++) {
2391		err = parse_ta_bin_descriptor(psp,
2392					      &ta_hdr->ta_fw_bin[ta_index],
2393					      ta_hdr);
2394		if (err)
 
2395			goto out;
 
 
2396	}
2397
 
 
 
 
 
 
 
 
 
 
 
2398	return 0;
 
2399out:
2400	dev_err(adev->dev, "fail to initialize ta microcode\n");
2401	release_firmware(adev->psp.ta_fw);
2402	adev->psp.ta_fw = NULL;
2403	return err;
2404}
2405
2406static int psp_set_clockgating_state(void *handle,
2407				     enum amd_clockgating_state state)
2408{
2409	return 0;
2410}
2411
2412static int psp_set_powergating_state(void *handle,
2413				     enum amd_powergating_state state)
2414{
2415	return 0;
2416}
2417
2418static ssize_t psp_usbc_pd_fw_sysfs_read(struct device *dev,
2419					 struct device_attribute *attr,
2420					 char *buf)
2421{
2422	struct drm_device *ddev = dev_get_drvdata(dev);
2423	struct amdgpu_device *adev = ddev->dev_private;
2424	uint32_t fw_ver;
2425	int ret;
2426
2427	if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) {
2428		DRM_INFO("PSP block is not ready yet.");
2429		return -EBUSY;
2430	}
2431
2432	mutex_lock(&adev->psp.mutex);
2433	ret = psp_read_usbc_pd_fw(&adev->psp, &fw_ver);
2434	mutex_unlock(&adev->psp.mutex);
2435
2436	if (ret) {
2437		DRM_ERROR("Failed to read USBC PD FW, err = %d", ret);
2438		return ret;
2439	}
2440
2441	return snprintf(buf, PAGE_SIZE, "%x\n", fw_ver);
2442}
2443
2444static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev,
2445						       struct device_attribute *attr,
2446						       const char *buf,
2447						       size_t count)
2448{
2449	struct drm_device *ddev = dev_get_drvdata(dev);
2450	struct amdgpu_device *adev = ddev->dev_private;
2451	void *cpu_addr;
2452	dma_addr_t dma_addr;
2453	int ret;
2454	char fw_name[100];
2455	const struct firmware *usbc_pd_fw;
 
 
 
2456
2457	if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) {
2458		DRM_INFO("PSP block is not ready yet.");
2459		return -EBUSY;
2460	}
2461
 
 
 
2462	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s", buf);
2463	ret = request_firmware(&usbc_pd_fw, fw_name, adev->dev);
2464	if (ret)
2465		goto fail;
2466
2467	/* We need contiguous physical mem to place the FW  for psp to access */
2468	cpu_addr = dma_alloc_coherent(adev->dev, usbc_pd_fw->size, &dma_addr, GFP_KERNEL);
2469
2470	ret = dma_mapping_error(adev->dev, dma_addr);
 
 
2471	if (ret)
2472		goto rel_buf;
2473
2474	memcpy_toio(cpu_addr, usbc_pd_fw->data, usbc_pd_fw->size);
2475
2476	/*
2477	 * x86 specific workaround.
2478	 * Without it the buffer is invisible in PSP.
2479	 *
2480	 * TODO Remove once PSP starts snooping CPU cache
2481	 */
2482#ifdef CONFIG_X86
2483	clflush_cache_range(cpu_addr, (usbc_pd_fw->size & ~(L1_CACHE_BYTES - 1)));
2484#endif
2485
2486	mutex_lock(&adev->psp.mutex);
2487	ret = psp_load_usbc_pd_fw(&adev->psp, dma_addr);
2488	mutex_unlock(&adev->psp.mutex);
2489
 
 
2490rel_buf:
2491	dma_free_coherent(adev->dev, usbc_pd_fw->size, cpu_addr, dma_addr);
2492	release_firmware(usbc_pd_fw);
2493
2494fail:
2495	if (ret) {
2496		DRM_ERROR("Failed to load USBC PD FW, err = %d", ret);
2497		return ret;
2498	}
2499
 
2500	return count;
2501}
2502
2503static DEVICE_ATTR(usbc_pd_fw, S_IRUGO | S_IWUSR,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2504		   psp_usbc_pd_fw_sysfs_read,
2505		   psp_usbc_pd_fw_sysfs_write);
2506
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2507
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2508
2509const struct amd_ip_funcs psp_ip_funcs = {
2510	.name = "psp",
2511	.early_init = psp_early_init,
2512	.late_init = NULL,
2513	.sw_init = psp_sw_init,
2514	.sw_fini = psp_sw_fini,
2515	.hw_init = psp_hw_init,
2516	.hw_fini = psp_hw_fini,
2517	.suspend = psp_suspend,
2518	.resume = psp_resume,
2519	.is_idle = NULL,
2520	.check_soft_reset = NULL,
2521	.wait_for_idle = NULL,
2522	.soft_reset = NULL,
2523	.set_clockgating_state = psp_set_clockgating_state,
2524	.set_powergating_state = psp_set_powergating_state,
2525};
2526
2527static int psp_sysfs_init(struct amdgpu_device *adev)
2528{
2529	int ret = device_create_file(adev->dev, &dev_attr_usbc_pd_fw);
2530
2531	if (ret)
2532		DRM_ERROR("Failed to create USBC PD FW control file!");
2533
2534	return ret;
2535}
2536
2537static void psp_sysfs_fini(struct amdgpu_device *adev)
2538{
2539	device_remove_file(adev->dev, &dev_attr_usbc_pd_fw);
2540}
2541
2542const struct amdgpu_ip_block_version psp_v3_1_ip_block =
2543{
2544	.type = AMD_IP_BLOCK_TYPE_PSP,
2545	.major = 3,
2546	.minor = 1,
2547	.rev = 0,
2548	.funcs = &psp_ip_funcs,
2549};
2550
2551const struct amdgpu_ip_block_version psp_v10_0_ip_block =
2552{
2553	.type = AMD_IP_BLOCK_TYPE_PSP,
2554	.major = 10,
2555	.minor = 0,
2556	.rev = 0,
2557	.funcs = &psp_ip_funcs,
2558};
2559
2560const struct amdgpu_ip_block_version psp_v11_0_ip_block =
2561{
2562	.type = AMD_IP_BLOCK_TYPE_PSP,
2563	.major = 11,
2564	.minor = 0,
2565	.rev = 0,
2566	.funcs = &psp_ip_funcs,
2567};
2568
2569const struct amdgpu_ip_block_version psp_v12_0_ip_block =
2570{
 
 
 
 
 
 
 
2571	.type = AMD_IP_BLOCK_TYPE_PSP,
2572	.major = 12,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2573	.minor = 0,
2574	.rev = 0,
2575	.funcs = &psp_ip_funcs,
2576};