Linux Audio

Check our new training course

Loading...
v6.8
   1/*
   2 * Copyright 2016 Advanced Micro Devices, Inc.
   3 * All Rights Reserved.
   4 *
   5 * Permission is hereby granted, free of charge, to any person obtaining a
   6 * copy of this software and associated documentation files (the
   7 * "Software"), to deal in the Software without restriction, including
   8 * without limitation the rights to use, copy, modify, merge, publish,
   9 * distribute, sub license, and/or sell copies of the Software, and to
  10 * permit persons to whom the Software is furnished to do so, subject to
  11 * the following conditions:
  12 *
  13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  20 *
  21 * The above copyright notice and this permission notice (including the
  22 * next paragraph) shall be included in all copies or substantial portions
  23 * of the Software.
  24 *
  25 */
  26
  27#include <linux/firmware.h>
  28#include <linux/module.h>
  29#include <linux/dmi.h>
  30#include <linux/pci.h>
  31#include <linux/debugfs.h>
  32#include <drm/drm_drv.h>
  33
  34#include "amdgpu.h"
  35#include "amdgpu_pm.h"
  36#include "amdgpu_vcn.h"
  37#include "soc15d.h"
  38
  39/* Firmware Names */
  40#define FIRMWARE_RAVEN			"amdgpu/raven_vcn.bin"
  41#define FIRMWARE_PICASSO		"amdgpu/picasso_vcn.bin"
  42#define FIRMWARE_RAVEN2			"amdgpu/raven2_vcn.bin"
  43#define FIRMWARE_ARCTURUS		"amdgpu/arcturus_vcn.bin"
  44#define FIRMWARE_RENOIR			"amdgpu/renoir_vcn.bin"
  45#define FIRMWARE_GREEN_SARDINE		"amdgpu/green_sardine_vcn.bin"
  46#define FIRMWARE_NAVI10			"amdgpu/navi10_vcn.bin"
  47#define FIRMWARE_NAVI14			"amdgpu/navi14_vcn.bin"
  48#define FIRMWARE_NAVI12			"amdgpu/navi12_vcn.bin"
  49#define FIRMWARE_SIENNA_CICHLID		"amdgpu/sienna_cichlid_vcn.bin"
  50#define FIRMWARE_NAVY_FLOUNDER		"amdgpu/navy_flounder_vcn.bin"
  51#define FIRMWARE_VANGOGH		"amdgpu/vangogh_vcn.bin"
  52#define FIRMWARE_DIMGREY_CAVEFISH	"amdgpu/dimgrey_cavefish_vcn.bin"
  53#define FIRMWARE_ALDEBARAN		"amdgpu/aldebaran_vcn.bin"
  54#define FIRMWARE_BEIGE_GOBY		"amdgpu/beige_goby_vcn.bin"
  55#define FIRMWARE_YELLOW_CARP		"amdgpu/yellow_carp_vcn.bin"
  56#define FIRMWARE_VCN_3_1_2		"amdgpu/vcn_3_1_2.bin"
  57#define FIRMWARE_VCN4_0_0		"amdgpu/vcn_4_0_0.bin"
  58#define FIRMWARE_VCN4_0_2		"amdgpu/vcn_4_0_2.bin"
  59#define FIRMWARE_VCN4_0_3		"amdgpu/vcn_4_0_3.bin"
  60#define FIRMWARE_VCN4_0_4		"amdgpu/vcn_4_0_4.bin"
  61#define FIRMWARE_VCN4_0_5		"amdgpu/vcn_4_0_5.bin"
 
 
 
  62
  63MODULE_FIRMWARE(FIRMWARE_RAVEN);
  64MODULE_FIRMWARE(FIRMWARE_PICASSO);
  65MODULE_FIRMWARE(FIRMWARE_RAVEN2);
  66MODULE_FIRMWARE(FIRMWARE_ARCTURUS);
  67MODULE_FIRMWARE(FIRMWARE_RENOIR);
  68MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE);
  69MODULE_FIRMWARE(FIRMWARE_ALDEBARAN);
  70MODULE_FIRMWARE(FIRMWARE_NAVI10);
  71MODULE_FIRMWARE(FIRMWARE_NAVI14);
  72MODULE_FIRMWARE(FIRMWARE_NAVI12);
  73MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID);
  74MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER);
  75MODULE_FIRMWARE(FIRMWARE_VANGOGH);
  76MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH);
  77MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY);
  78MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP);
  79MODULE_FIRMWARE(FIRMWARE_VCN_3_1_2);
  80MODULE_FIRMWARE(FIRMWARE_VCN4_0_0);
  81MODULE_FIRMWARE(FIRMWARE_VCN4_0_2);
  82MODULE_FIRMWARE(FIRMWARE_VCN4_0_3);
  83MODULE_FIRMWARE(FIRMWARE_VCN4_0_4);
  84MODULE_FIRMWARE(FIRMWARE_VCN4_0_5);
 
 
 
  85
  86static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
  87
  88int amdgpu_vcn_early_init(struct amdgpu_device *adev)
  89{
  90	char ucode_prefix[30];
  91	char fw_name[40];
  92	int r;
  93
  94	amdgpu_ucode_ip_version_decode(adev, UVD_HWIP, ucode_prefix, sizeof(ucode_prefix));
  95	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", ucode_prefix);
  96	r = amdgpu_ucode_request(adev, &adev->vcn.fw, fw_name);
  97	if (r)
  98		amdgpu_ucode_release(&adev->vcn.fw);
  99
 
 
 
 
 
 100	return r;
 101}
 102
 103int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
 104{
 105	unsigned long bo_size;
 106	const struct common_firmware_header *hdr;
 107	unsigned char fw_check;
 108	unsigned int fw_shared_size, log_offset;
 109	int i, r;
 110
 111	INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler);
 112	mutex_init(&adev->vcn.vcn_pg_lock);
 113	mutex_init(&adev->vcn.vcn1_jpeg1_workaround);
 114	atomic_set(&adev->vcn.total_submission_cnt, 0);
 115	for (i = 0; i < adev->vcn.num_vcn_inst; i++)
 116		atomic_set(&adev->vcn.inst[i].dpg_enc_submission_cnt, 0);
 117
 118	if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
 119	    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
 120		adev->vcn.indirect_sram = true;
 121
 122	/*
 123	 * Some Steam Deck's BIOS versions are incompatible with the
 124	 * indirect SRAM mode, leading to amdgpu being unable to get
 125	 * properly probed (and even potentially crashing the kernel).
 126	 * Hence, check for these versions here - notice this is
 127	 * restricted to Vangogh (Deck's APU).
 128	 */
 129	if (amdgpu_ip_version(adev, UVD_HWIP, 0) == IP_VERSION(3, 0, 2)) {
 130		const char *bios_ver = dmi_get_system_info(DMI_BIOS_VERSION);
 131
 132		if (bios_ver && (!strncmp("F7A0113", bios_ver, 7) ||
 133		     !strncmp("F7A0114", bios_ver, 7))) {
 134			adev->vcn.indirect_sram = false;
 135			dev_info(adev->dev,
 136				"Steam Deck quirk: indirect SRAM disabled on BIOS %s\n", bios_ver);
 137		}
 138	}
 139
 140	hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
 
 
 
 
 141	adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version);
 142
 143	/* Bit 20-23, it is encode major and non-zero for new naming convention.
 144	 * This field is part of version minor and DRM_DISABLED_FLAG in old naming
 145	 * convention. Since the l:wq!atest version minor is 0x5B and DRM_DISABLED_FLAG
 146	 * is zero in old naming convention, this field is always zero so far.
 147	 * These four bits are used to tell which naming convention is present.
 148	 */
 149	fw_check = (le32_to_cpu(hdr->ucode_version) >> 20) & 0xf;
 150	if (fw_check) {
 151		unsigned int dec_ver, enc_major, enc_minor, vep, fw_rev;
 152
 153		fw_rev = le32_to_cpu(hdr->ucode_version) & 0xfff;
 154		enc_minor = (le32_to_cpu(hdr->ucode_version) >> 12) & 0xff;
 155		enc_major = fw_check;
 156		dec_ver = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xf;
 157		vep = (le32_to_cpu(hdr->ucode_version) >> 28) & 0xf;
 158		DRM_INFO("Found VCN firmware Version ENC: %u.%u DEC: %u VEP: %u Revision: %u\n",
 159			enc_major, enc_minor, dec_ver, vep, fw_rev);
 160	} else {
 161		unsigned int version_major, version_minor, family_id;
 162
 163		family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
 164		version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
 165		version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
 166		DRM_INFO("Found VCN firmware Version: %u.%u Family ID: %u\n",
 167			version_major, version_minor, family_id);
 168	}
 169
 170	bo_size = AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_CONTEXT_SIZE;
 171	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
 172		bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
 173
 174	if (amdgpu_ip_version(adev, UVD_HWIP, 0) >= IP_VERSION(4, 0, 0)) {
 
 
 
 175		fw_shared_size = AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared));
 176		log_offset = offsetof(struct amdgpu_vcn4_fw_shared, fw_log);
 177	} else {
 178		fw_shared_size = AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared));
 179		log_offset = offsetof(struct amdgpu_fw_shared, fw_log);
 180	}
 181
 182	bo_size += fw_shared_size;
 183
 184	if (amdgpu_vcnfw_log)
 185		bo_size += AMDGPU_VCNFW_LOG_SIZE;
 186
 187	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
 188		if (adev->vcn.harvest_config & (1 << i))
 189			continue;
 190
 191		r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
 192					    AMDGPU_GEM_DOMAIN_VRAM |
 193					    AMDGPU_GEM_DOMAIN_GTT,
 194					    &adev->vcn.inst[i].vcpu_bo,
 195					    &adev->vcn.inst[i].gpu_addr,
 196					    &adev->vcn.inst[i].cpu_addr);
 197		if (r) {
 198			dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", r);
 199			return r;
 200		}
 201
 202		adev->vcn.inst[i].fw_shared.cpu_addr = adev->vcn.inst[i].cpu_addr +
 203				bo_size - fw_shared_size;
 204		adev->vcn.inst[i].fw_shared.gpu_addr = adev->vcn.inst[i].gpu_addr +
 205				bo_size - fw_shared_size;
 206
 207		adev->vcn.inst[i].fw_shared.mem_size = fw_shared_size;
 208
 209		if (amdgpu_vcnfw_log) {
 210			adev->vcn.inst[i].fw_shared.cpu_addr -= AMDGPU_VCNFW_LOG_SIZE;
 211			adev->vcn.inst[i].fw_shared.gpu_addr -= AMDGPU_VCNFW_LOG_SIZE;
 212			adev->vcn.inst[i].fw_shared.log_offset = log_offset;
 213		}
 214
 215		if (adev->vcn.indirect_sram) {
 216			r = amdgpu_bo_create_kernel(adev, 64 * 2 * 4, PAGE_SIZE,
 217					AMDGPU_GEM_DOMAIN_VRAM |
 218					AMDGPU_GEM_DOMAIN_GTT,
 219					&adev->vcn.inst[i].dpg_sram_bo,
 220					&adev->vcn.inst[i].dpg_sram_gpu_addr,
 221					&adev->vcn.inst[i].dpg_sram_cpu_addr);
 222			if (r) {
 223				dev_err(adev->dev, "VCN %d (%d) failed to allocate DPG bo\n", i, r);
 224				return r;
 225			}
 226		}
 227	}
 228
 229	return 0;
 230}
 231
 232int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
 233{
 234	int i, j;
 235
 236	for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
 237		if (adev->vcn.harvest_config & (1 << j))
 238			continue;
 239
 240		amdgpu_bo_free_kernel(
 241			&adev->vcn.inst[j].dpg_sram_bo,
 242			&adev->vcn.inst[j].dpg_sram_gpu_addr,
 243			(void **)&adev->vcn.inst[j].dpg_sram_cpu_addr);
 244
 245		kvfree(adev->vcn.inst[j].saved_bo);
 246
 247		amdgpu_bo_free_kernel(&adev->vcn.inst[j].vcpu_bo,
 248					  &adev->vcn.inst[j].gpu_addr,
 249					  (void **)&adev->vcn.inst[j].cpu_addr);
 250
 251		amdgpu_ring_fini(&adev->vcn.inst[j].ring_dec);
 252
 253		for (i = 0; i < adev->vcn.num_enc_rings; ++i)
 254			amdgpu_ring_fini(&adev->vcn.inst[j].ring_enc[i]);
 
 
 255	}
 256
 257	amdgpu_ucode_release(&adev->vcn.fw);
 258	mutex_destroy(&adev->vcn.vcn1_jpeg1_workaround);
 259	mutex_destroy(&adev->vcn.vcn_pg_lock);
 260
 261	return 0;
 262}
 263
 264/* from vcn4 and above, only unified queue is used */
 265static bool amdgpu_vcn_using_unified_queue(struct amdgpu_ring *ring)
 266{
 267	struct amdgpu_device *adev = ring->adev;
 268	bool ret = false;
 269
 270	if (amdgpu_ip_version(adev, UVD_HWIP, 0) >= IP_VERSION(4, 0, 0))
 271		ret = true;
 272
 273	return ret;
 274}
 275
 276bool amdgpu_vcn_is_disabled_vcn(struct amdgpu_device *adev, enum vcn_ring_type type, uint32_t vcn_instance)
 277{
 278	bool ret = false;
 279	int vcn_config = adev->vcn.vcn_config[vcn_instance];
 280
 281	if ((type == VCN_ENCODE_RING) && (vcn_config & VCN_BLOCK_ENCODE_DISABLE_MASK))
 282		ret = true;
 283	else if ((type == VCN_DECODE_RING) && (vcn_config & VCN_BLOCK_DECODE_DISABLE_MASK))
 284		ret = true;
 285	else if ((type == VCN_UNIFIED_RING) && (vcn_config & VCN_BLOCK_QUEUE_DISABLE_MASK))
 286		ret = true;
 287
 288	return ret;
 289}
 290
 291int amdgpu_vcn_suspend(struct amdgpu_device *adev)
 292{
 293	unsigned int size;
 294	void *ptr;
 295	int i, idx;
 296
 297	bool in_ras_intr = amdgpu_ras_intr_triggered();
 298
 299	cancel_delayed_work_sync(&adev->vcn.idle_work);
 300
 301	/* err_event_athub will corrupt VCPU buffer, so we need to
 302	 * restore fw data and clear buffer in amdgpu_vcn_resume() */
 303	if (in_ras_intr)
 304		return 0;
 305
 306	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
 307		if (adev->vcn.harvest_config & (1 << i))
 308			continue;
 309		if (adev->vcn.inst[i].vcpu_bo == NULL)
 310			return 0;
 311
 312		size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo);
 313		ptr = adev->vcn.inst[i].cpu_addr;
 314
 315		adev->vcn.inst[i].saved_bo = kvmalloc(size, GFP_KERNEL);
 316		if (!adev->vcn.inst[i].saved_bo)
 317			return -ENOMEM;
 318
 319		if (drm_dev_enter(adev_to_drm(adev), &idx)) {
 320			memcpy_fromio(adev->vcn.inst[i].saved_bo, ptr, size);
 321			drm_dev_exit(idx);
 322		}
 323	}
 
 324	return 0;
 325}
 326
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 327int amdgpu_vcn_resume(struct amdgpu_device *adev)
 328{
 329	unsigned int size;
 330	void *ptr;
 331	int i, idx;
 332
 333	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
 334		if (adev->vcn.harvest_config & (1 << i))
 335			continue;
 336		if (adev->vcn.inst[i].vcpu_bo == NULL)
 337			return -EINVAL;
 338
 339		size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo);
 340		ptr = adev->vcn.inst[i].cpu_addr;
 341
 342		if (adev->vcn.inst[i].saved_bo != NULL) {
 343			if (drm_dev_enter(adev_to_drm(adev), &idx)) {
 344				memcpy_toio(ptr, adev->vcn.inst[i].saved_bo, size);
 345				drm_dev_exit(idx);
 346			}
 347			kvfree(adev->vcn.inst[i].saved_bo);
 348			adev->vcn.inst[i].saved_bo = NULL;
 349		} else {
 350			const struct common_firmware_header *hdr;
 351			unsigned int offset;
 352
 353			hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
 354			if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
 355				offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
 356				if (drm_dev_enter(adev_to_drm(adev), &idx)) {
 357					memcpy_toio(adev->vcn.inst[i].cpu_addr, adev->vcn.fw->data + offset,
 
 358						    le32_to_cpu(hdr->ucode_size_bytes));
 359					drm_dev_exit(idx);
 360				}
 361				size -= le32_to_cpu(hdr->ucode_size_bytes);
 362				ptr += le32_to_cpu(hdr->ucode_size_bytes);
 363			}
 364			memset_io(ptr, 0, size);
 365		}
 366	}
 367	return 0;
 368}
 369
 370static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
 371{
 372	struct amdgpu_device *adev =
 373		container_of(work, struct amdgpu_device, vcn.idle_work.work);
 374	unsigned int fences = 0, fence[AMDGPU_MAX_VCN_INSTANCES] = {0};
 375	unsigned int i, j;
 376	int r = 0;
 377
 378	for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
 379		if (adev->vcn.harvest_config & (1 << j))
 380			continue;
 381
 382		for (i = 0; i < adev->vcn.num_enc_rings; ++i)
 383			fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_enc[i]);
 384
 385		if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)	{
 
 
 386			struct dpg_pause_state new_state;
 387
 388			if (fence[j] ||
 389				unlikely(atomic_read(&adev->vcn.inst[j].dpg_enc_submission_cnt)))
 390				new_state.fw_based = VCN_DPG_STATE__PAUSE;
 391			else
 392				new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
 393
 394			adev->vcn.pause_dpg_mode(adev, j, &new_state);
 395		}
 396
 397		fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_dec);
 398		fences += fence[j];
 399	}
 400
 401	if (!fences && !atomic_read(&adev->vcn.total_submission_cnt)) {
 402		amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
 403		       AMD_PG_STATE_GATE);
 404		r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO,
 405				false);
 406		if (r)
 407			dev_warn(adev->dev, "(%d) failed to disable video power profile mode\n", r);
 408	} else {
 409		schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
 410	}
 411}
 412
 413void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
 414{
 415	struct amdgpu_device *adev = ring->adev;
 416	int r = 0;
 417
 418	atomic_inc(&adev->vcn.total_submission_cnt);
 419
 420	if (!cancel_delayed_work_sync(&adev->vcn.idle_work)) {
 421		r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO,
 422				true);
 423		if (r)
 424			dev_warn(adev->dev, "(%d) failed to switch to video power profile mode\n", r);
 425	}
 426
 427	mutex_lock(&adev->vcn.vcn_pg_lock);
 428	amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
 429	       AMD_PG_STATE_UNGATE);
 430
 431	if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)	{
 
 
 432		struct dpg_pause_state new_state;
 433
 434		if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC) {
 435			atomic_inc(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt);
 436			new_state.fw_based = VCN_DPG_STATE__PAUSE;
 437		} else {
 438			unsigned int fences = 0;
 439			unsigned int i;
 440
 441			for (i = 0; i < adev->vcn.num_enc_rings; ++i)
 442				fences += amdgpu_fence_count_emitted(&adev->vcn.inst[ring->me].ring_enc[i]);
 443
 444			if (fences || atomic_read(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt))
 445				new_state.fw_based = VCN_DPG_STATE__PAUSE;
 446			else
 447				new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
 448		}
 449
 450		adev->vcn.pause_dpg_mode(adev, ring->me, &new_state);
 451	}
 452	mutex_unlock(&adev->vcn.vcn_pg_lock);
 453}
 454
 455void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring)
 456{
 
 
 
 457	if (ring->adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
 458		ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
 
 459		atomic_dec(&ring->adev->vcn.inst[ring->me].dpg_enc_submission_cnt);
 460
 461	atomic_dec(&ring->adev->vcn.total_submission_cnt);
 462
 463	schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
 464}
 465
 466int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
 467{
 468	struct amdgpu_device *adev = ring->adev;
 469	uint32_t tmp = 0;
 470	unsigned int i;
 471	int r;
 472
 473	/* VCN in SRIOV does not support direct register read/write */
 474	if (amdgpu_sriov_vf(adev))
 475		return 0;
 476
 477	WREG32(adev->vcn.inst[ring->me].external.scratch9, 0xCAFEDEAD);
 478	r = amdgpu_ring_alloc(ring, 3);
 479	if (r)
 480		return r;
 481	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.scratch9, 0));
 482	amdgpu_ring_write(ring, 0xDEADBEEF);
 483	amdgpu_ring_commit(ring);
 484	for (i = 0; i < adev->usec_timeout; i++) {
 485		tmp = RREG32(adev->vcn.inst[ring->me].external.scratch9);
 486		if (tmp == 0xDEADBEEF)
 487			break;
 488		udelay(1);
 489	}
 490
 491	if (i >= adev->usec_timeout)
 492		r = -ETIMEDOUT;
 493
 494	return r;
 495}
 496
 497int amdgpu_vcn_dec_sw_ring_test_ring(struct amdgpu_ring *ring)
 498{
 499	struct amdgpu_device *adev = ring->adev;
 500	uint32_t rptr;
 501	unsigned int i;
 502	int r;
 503
 504	if (amdgpu_sriov_vf(adev))
 505		return 0;
 506
 507	r = amdgpu_ring_alloc(ring, 16);
 508	if (r)
 509		return r;
 510
 511	rptr = amdgpu_ring_get_rptr(ring);
 512
 513	amdgpu_ring_write(ring, VCN_DEC_SW_CMD_END);
 514	amdgpu_ring_commit(ring);
 515
 516	for (i = 0; i < adev->usec_timeout; i++) {
 517		if (amdgpu_ring_get_rptr(ring) != rptr)
 518			break;
 519		udelay(1);
 520	}
 521
 522	if (i >= adev->usec_timeout)
 523		r = -ETIMEDOUT;
 524
 525	return r;
 526}
 527
 528static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
 529				   struct amdgpu_ib *ib_msg,
 530				   struct dma_fence **fence)
 531{
 532	u64 addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
 533	struct amdgpu_device *adev = ring->adev;
 534	struct dma_fence *f = NULL;
 535	struct amdgpu_job *job;
 536	struct amdgpu_ib *ib;
 537	int i, r;
 538
 539	r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
 540				     64, AMDGPU_IB_POOL_DIRECT,
 541				     &job);
 542	if (r)
 543		goto err;
 544
 545	ib = &job->ibs[0];
 546	ib->ptr[0] = PACKET0(adev->vcn.internal.data0, 0);
 547	ib->ptr[1] = addr;
 548	ib->ptr[2] = PACKET0(adev->vcn.internal.data1, 0);
 549	ib->ptr[3] = addr >> 32;
 550	ib->ptr[4] = PACKET0(adev->vcn.internal.cmd, 0);
 551	ib->ptr[5] = 0;
 552	for (i = 6; i < 16; i += 2) {
 553		ib->ptr[i] = PACKET0(adev->vcn.internal.nop, 0);
 554		ib->ptr[i+1] = 0;
 555	}
 556	ib->length_dw = 16;
 557
 558	r = amdgpu_job_submit_direct(job, ring, &f);
 559	if (r)
 560		goto err_free;
 561
 562	amdgpu_ib_free(adev, ib_msg, f);
 563
 564	if (fence)
 565		*fence = dma_fence_get(f);
 566	dma_fence_put(f);
 567
 568	return 0;
 569
 570err_free:
 571	amdgpu_job_free(job);
 572err:
 573	amdgpu_ib_free(adev, ib_msg, f);
 574	return r;
 575}
 576
 577static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
 578		struct amdgpu_ib *ib)
 579{
 580	struct amdgpu_device *adev = ring->adev;
 581	uint32_t *msg;
 582	int r, i;
 583
 584	memset(ib, 0, sizeof(*ib));
 585	r = amdgpu_ib_get(adev, NULL, AMDGPU_GPU_PAGE_SIZE * 2,
 586			AMDGPU_IB_POOL_DIRECT,
 587			ib);
 588	if (r)
 589		return r;
 590
 591	msg = (uint32_t *)AMDGPU_GPU_PAGE_ALIGN((unsigned long)ib->ptr);
 592	msg[0] = cpu_to_le32(0x00000028);
 593	msg[1] = cpu_to_le32(0x00000038);
 594	msg[2] = cpu_to_le32(0x00000001);
 595	msg[3] = cpu_to_le32(0x00000000);
 596	msg[4] = cpu_to_le32(handle);
 597	msg[5] = cpu_to_le32(0x00000000);
 598	msg[6] = cpu_to_le32(0x00000001);
 599	msg[7] = cpu_to_le32(0x00000028);
 600	msg[8] = cpu_to_le32(0x00000010);
 601	msg[9] = cpu_to_le32(0x00000000);
 602	msg[10] = cpu_to_le32(0x00000007);
 603	msg[11] = cpu_to_le32(0x00000000);
 604	msg[12] = cpu_to_le32(0x00000780);
 605	msg[13] = cpu_to_le32(0x00000440);
 606	for (i = 14; i < 1024; ++i)
 607		msg[i] = cpu_to_le32(0x0);
 608
 609	return 0;
 610}
 611
 612static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
 613					  struct amdgpu_ib *ib)
 614{
 615	struct amdgpu_device *adev = ring->adev;
 616	uint32_t *msg;
 617	int r, i;
 618
 619	memset(ib, 0, sizeof(*ib));
 620	r = amdgpu_ib_get(adev, NULL, AMDGPU_GPU_PAGE_SIZE * 2,
 621			AMDGPU_IB_POOL_DIRECT,
 622			ib);
 623	if (r)
 624		return r;
 625
 626	msg = (uint32_t *)AMDGPU_GPU_PAGE_ALIGN((unsigned long)ib->ptr);
 627	msg[0] = cpu_to_le32(0x00000028);
 628	msg[1] = cpu_to_le32(0x00000018);
 629	msg[2] = cpu_to_le32(0x00000000);
 630	msg[3] = cpu_to_le32(0x00000002);
 631	msg[4] = cpu_to_le32(handle);
 632	msg[5] = cpu_to_le32(0x00000000);
 633	for (i = 6; i < 1024; ++i)
 634		msg[i] = cpu_to_le32(0x0);
 635
 636	return 0;
 637}
 638
 639int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 640{
 641	struct dma_fence *fence = NULL;
 642	struct amdgpu_ib ib;
 643	long r;
 644
 645	r = amdgpu_vcn_dec_get_create_msg(ring, 1, &ib);
 646	if (r)
 647		goto error;
 648
 649	r = amdgpu_vcn_dec_send_msg(ring, &ib, NULL);
 650	if (r)
 651		goto error;
 652	r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &ib);
 653	if (r)
 654		goto error;
 655
 656	r = amdgpu_vcn_dec_send_msg(ring, &ib, &fence);
 657	if (r)
 658		goto error;
 659
 660	r = dma_fence_wait_timeout(fence, false, timeout);
 661	if (r == 0)
 662		r = -ETIMEDOUT;
 663	else if (r > 0)
 664		r = 0;
 665
 666	dma_fence_put(fence);
 667error:
 668	return r;
 669}
 670
 671static uint32_t *amdgpu_vcn_unified_ring_ib_header(struct amdgpu_ib *ib,
 672						uint32_t ib_pack_in_dw, bool enc)
 673{
 674	uint32_t *ib_checksum;
 675
 676	ib->ptr[ib->length_dw++] = 0x00000010; /* single queue checksum */
 677	ib->ptr[ib->length_dw++] = 0x30000002;
 678	ib_checksum = &ib->ptr[ib->length_dw++];
 679	ib->ptr[ib->length_dw++] = ib_pack_in_dw;
 680
 681	ib->ptr[ib->length_dw++] = 0x00000010; /* engine info */
 682	ib->ptr[ib->length_dw++] = 0x30000001;
 683	ib->ptr[ib->length_dw++] = enc ? 0x2 : 0x3;
 684	ib->ptr[ib->length_dw++] = ib_pack_in_dw * sizeof(uint32_t);
 685
 686	return ib_checksum;
 687}
 688
 689static void amdgpu_vcn_unified_ring_ib_checksum(uint32_t **ib_checksum,
 690						uint32_t ib_pack_in_dw)
 691{
 692	uint32_t i;
 693	uint32_t checksum = 0;
 694
 695	for (i = 0; i < ib_pack_in_dw; i++)
 696		checksum += *(*ib_checksum + 2 + i);
 697
 698	**ib_checksum = checksum;
 699}
 700
 701static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
 702				      struct amdgpu_ib *ib_msg,
 703				      struct dma_fence **fence)
 704{
 705	struct amdgpu_vcn_decode_buffer *decode_buffer = NULL;
 706	unsigned int ib_size_dw = 64;
 707	struct amdgpu_device *adev = ring->adev;
 708	struct dma_fence *f = NULL;
 709	struct amdgpu_job *job;
 710	struct amdgpu_ib *ib;
 711	uint64_t addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
 712	bool sq = amdgpu_vcn_using_unified_queue(ring);
 713	uint32_t *ib_checksum;
 714	uint32_t ib_pack_in_dw;
 715	int i, r;
 716
 717	if (sq)
 718		ib_size_dw += 8;
 719
 720	r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
 721				     ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
 722				     &job);
 723	if (r)
 724		goto err;
 725
 726	ib = &job->ibs[0];
 727	ib->length_dw = 0;
 728
 729	/* single queue headers */
 730	if (sq) {
 731		ib_pack_in_dw = sizeof(struct amdgpu_vcn_decode_buffer) / sizeof(uint32_t)
 732						+ 4 + 2; /* engine info + decoding ib in dw */
 733		ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, ib_pack_in_dw, false);
 734	}
 735
 736	ib->ptr[ib->length_dw++] = sizeof(struct amdgpu_vcn_decode_buffer) + 8;
 737	ib->ptr[ib->length_dw++] = cpu_to_le32(AMDGPU_VCN_IB_FLAG_DECODE_BUFFER);
 738	decode_buffer = (struct amdgpu_vcn_decode_buffer *)&(ib->ptr[ib->length_dw]);
 739	ib->length_dw += sizeof(struct amdgpu_vcn_decode_buffer) / 4;
 740	memset(decode_buffer, 0, sizeof(struct amdgpu_vcn_decode_buffer));
 741
 742	decode_buffer->valid_buf_flag |= cpu_to_le32(AMDGPU_VCN_CMD_FLAG_MSG_BUFFER);
 743	decode_buffer->msg_buffer_address_hi = cpu_to_le32(addr >> 32);
 744	decode_buffer->msg_buffer_address_lo = cpu_to_le32(addr);
 745
 746	for (i = ib->length_dw; i < ib_size_dw; ++i)
 747		ib->ptr[i] = 0x0;
 748
 749	if (sq)
 750		amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, ib_pack_in_dw);
 751
 752	r = amdgpu_job_submit_direct(job, ring, &f);
 753	if (r)
 754		goto err_free;
 755
 756	amdgpu_ib_free(adev, ib_msg, f);
 757
 758	if (fence)
 759		*fence = dma_fence_get(f);
 760	dma_fence_put(f);
 761
 762	return 0;
 763
 764err_free:
 765	amdgpu_job_free(job);
 766err:
 767	amdgpu_ib_free(adev, ib_msg, f);
 768	return r;
 769}
 770
 771int amdgpu_vcn_dec_sw_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 772{
 773	struct dma_fence *fence = NULL;
 774	struct amdgpu_ib ib;
 775	long r;
 776
 777	r = amdgpu_vcn_dec_get_create_msg(ring, 1, &ib);
 778	if (r)
 779		goto error;
 780
 781	r = amdgpu_vcn_dec_sw_send_msg(ring, &ib, NULL);
 782	if (r)
 783		goto error;
 784	r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &ib);
 785	if (r)
 786		goto error;
 787
 788	r = amdgpu_vcn_dec_sw_send_msg(ring, &ib, &fence);
 789	if (r)
 790		goto error;
 791
 792	r = dma_fence_wait_timeout(fence, false, timeout);
 793	if (r == 0)
 794		r = -ETIMEDOUT;
 795	else if (r > 0)
 796		r = 0;
 797
 798	dma_fence_put(fence);
 799error:
 800	return r;
 801}
 802
 803int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
 804{
 805	struct amdgpu_device *adev = ring->adev;
 806	uint32_t rptr;
 807	unsigned int i;
 808	int r;
 809
 810	if (amdgpu_sriov_vf(adev))
 811		return 0;
 812
 813	r = amdgpu_ring_alloc(ring, 16);
 814	if (r)
 815		return r;
 816
 817	rptr = amdgpu_ring_get_rptr(ring);
 818
 819	amdgpu_ring_write(ring, VCN_ENC_CMD_END);
 820	amdgpu_ring_commit(ring);
 821
 822	for (i = 0; i < adev->usec_timeout; i++) {
 823		if (amdgpu_ring_get_rptr(ring) != rptr)
 824			break;
 825		udelay(1);
 826	}
 827
 828	if (i >= adev->usec_timeout)
 829		r = -ETIMEDOUT;
 830
 831	return r;
 832}
 833
 834static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
 835					 struct amdgpu_ib *ib_msg,
 836					 struct dma_fence **fence)
 837{
 838	unsigned int ib_size_dw = 16;
 
 839	struct amdgpu_job *job;
 840	struct amdgpu_ib *ib;
 841	struct dma_fence *f = NULL;
 842	uint32_t *ib_checksum = NULL;
 843	uint64_t addr;
 844	bool sq = amdgpu_vcn_using_unified_queue(ring);
 845	int i, r;
 846
 847	if (sq)
 848		ib_size_dw += 8;
 849
 850	r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
 851				     ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
 852				     &job);
 853	if (r)
 854		return r;
 855
 856	ib = &job->ibs[0];
 857	addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
 858
 859	ib->length_dw = 0;
 860
 861	if (sq)
 862		ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, 0x11, true);
 863
 864	ib->ptr[ib->length_dw++] = 0x00000018;
 865	ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
 866	ib->ptr[ib->length_dw++] = handle;
 867	ib->ptr[ib->length_dw++] = upper_32_bits(addr);
 868	ib->ptr[ib->length_dw++] = addr;
 869	ib->ptr[ib->length_dw++] = 0x0000000b;
 870
 871	ib->ptr[ib->length_dw++] = 0x00000014;
 872	ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
 873	ib->ptr[ib->length_dw++] = 0x0000001c;
 874	ib->ptr[ib->length_dw++] = 0x00000000;
 875	ib->ptr[ib->length_dw++] = 0x00000000;
 876
 877	ib->ptr[ib->length_dw++] = 0x00000008;
 878	ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */
 879
 880	for (i = ib->length_dw; i < ib_size_dw; ++i)
 881		ib->ptr[i] = 0x0;
 882
 883	if (sq)
 884		amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, 0x11);
 885
 886	r = amdgpu_job_submit_direct(job, ring, &f);
 887	if (r)
 888		goto err;
 889
 890	if (fence)
 891		*fence = dma_fence_get(f);
 892	dma_fence_put(f);
 893
 894	return 0;
 895
 896err:
 897	amdgpu_job_free(job);
 898	return r;
 899}
 900
 901static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
 902					  struct amdgpu_ib *ib_msg,
 903					  struct dma_fence **fence)
 904{
 905	unsigned int ib_size_dw = 16;
 
 906	struct amdgpu_job *job;
 907	struct amdgpu_ib *ib;
 908	struct dma_fence *f = NULL;
 909	uint32_t *ib_checksum = NULL;
 910	uint64_t addr;
 911	bool sq = amdgpu_vcn_using_unified_queue(ring);
 912	int i, r;
 913
 914	if (sq)
 915		ib_size_dw += 8;
 916
 917	r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
 918				     ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
 919				     &job);
 920	if (r)
 921		return r;
 922
 923	ib = &job->ibs[0];
 924	addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
 925
 926	ib->length_dw = 0;
 927
 928	if (sq)
 929		ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, 0x11, true);
 930
 931	ib->ptr[ib->length_dw++] = 0x00000018;
 932	ib->ptr[ib->length_dw++] = 0x00000001;
 933	ib->ptr[ib->length_dw++] = handle;
 934	ib->ptr[ib->length_dw++] = upper_32_bits(addr);
 935	ib->ptr[ib->length_dw++] = addr;
 936	ib->ptr[ib->length_dw++] = 0x0000000b;
 937
 938	ib->ptr[ib->length_dw++] = 0x00000014;
 939	ib->ptr[ib->length_dw++] = 0x00000002;
 940	ib->ptr[ib->length_dw++] = 0x0000001c;
 941	ib->ptr[ib->length_dw++] = 0x00000000;
 942	ib->ptr[ib->length_dw++] = 0x00000000;
 943
 944	ib->ptr[ib->length_dw++] = 0x00000008;
 945	ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */
 946
 947	for (i = ib->length_dw; i < ib_size_dw; ++i)
 948		ib->ptr[i] = 0x0;
 949
 950	if (sq)
 951		amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, 0x11);
 952
 953	r = amdgpu_job_submit_direct(job, ring, &f);
 954	if (r)
 955		goto err;
 956
 957	if (fence)
 958		*fence = dma_fence_get(f);
 959	dma_fence_put(f);
 960
 961	return 0;
 962
 963err:
 964	amdgpu_job_free(job);
 965	return r;
 966}
 967
 968int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 969{
 970	struct amdgpu_device *adev = ring->adev;
 971	struct dma_fence *fence = NULL;
 972	struct amdgpu_ib ib;
 973	long r;
 974
 975	memset(&ib, 0, sizeof(ib));
 976	r = amdgpu_ib_get(adev, NULL, (128 << 10) + AMDGPU_GPU_PAGE_SIZE,
 977			AMDGPU_IB_POOL_DIRECT,
 978			&ib);
 979	if (r)
 980		return r;
 981
 982	r = amdgpu_vcn_enc_get_create_msg(ring, 1, &ib, NULL);
 983	if (r)
 984		goto error;
 985
 986	r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, &ib, &fence);
 987	if (r)
 988		goto error;
 989
 990	r = dma_fence_wait_timeout(fence, false, timeout);
 991	if (r == 0)
 992		r = -ETIMEDOUT;
 993	else if (r > 0)
 994		r = 0;
 995
 996error:
 997	amdgpu_ib_free(adev, &ib, fence);
 998	dma_fence_put(fence);
 999
1000	return r;
1001}
1002
1003int amdgpu_vcn_unified_ring_test_ib(struct amdgpu_ring *ring, long timeout)
1004{
1005	struct amdgpu_device *adev = ring->adev;
1006	long r;
1007
1008	if (amdgpu_ip_version(adev, UVD_HWIP, 0) != IP_VERSION(4, 0, 3)) {
1009		r = amdgpu_vcn_enc_ring_test_ib(ring, timeout);
1010		if (r)
1011			goto error;
1012	}
1013
1014	r =  amdgpu_vcn_dec_sw_ring_test_ib(ring, timeout);
1015
1016error:
1017	return r;
1018}
1019
1020enum amdgpu_ring_priority_level amdgpu_vcn_get_enc_ring_prio(int ring)
1021{
1022	switch (ring) {
1023	case 0:
1024		return AMDGPU_RING_PRIO_0;
1025	case 1:
1026		return AMDGPU_RING_PRIO_1;
1027	case 2:
1028		return AMDGPU_RING_PRIO_2;
1029	default:
1030		return AMDGPU_RING_PRIO_0;
1031	}
1032}
1033
1034void amdgpu_vcn_setup_ucode(struct amdgpu_device *adev)
1035{
1036	int i;
1037	unsigned int idx;
1038
1039	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1040		const struct common_firmware_header *hdr;
1041
1042		hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
1043
1044		for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
1045			if (adev->vcn.harvest_config & (1 << i))
1046				continue;
 
 
1047			/* currently only support 2 FW instances */
1048			if (i >= 2) {
1049				dev_info(adev->dev, "More then 2 VCN FW instances!\n");
1050				break;
1051			}
1052			idx = AMDGPU_UCODE_ID_VCN + i;
1053			adev->firmware.ucode[idx].ucode_id = idx;
1054			adev->firmware.ucode[idx].fw = adev->vcn.fw;
1055			adev->firmware.fw_size +=
1056				ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
1057
1058			if (amdgpu_ip_version(adev, UVD_HWIP, 0) ==
1059			    IP_VERSION(4, 0, 3))
1060				break;
1061		}
1062		dev_info(adev->dev, "Will use PSP to load VCN firmware\n");
1063	}
1064}
1065
1066/*
1067 * debugfs for mapping vcn firmware log buffer.
1068 */
1069#if defined(CONFIG_DEBUG_FS)
1070static ssize_t amdgpu_debugfs_vcn_fwlog_read(struct file *f, char __user *buf,
1071					     size_t size, loff_t *pos)
1072{
1073	struct amdgpu_vcn_inst *vcn;
1074	void *log_buf;
1075	volatile struct amdgpu_vcn_fwlog *plog;
1076	unsigned int read_pos, write_pos, available, i, read_bytes = 0;
1077	unsigned int read_num[2] = {0};
1078
1079	vcn = file_inode(f)->i_private;
1080	if (!vcn)
1081		return -ENODEV;
1082
1083	if (!vcn->fw_shared.cpu_addr || !amdgpu_vcnfw_log)
1084		return -EFAULT;
1085
1086	log_buf = vcn->fw_shared.cpu_addr + vcn->fw_shared.mem_size;
1087
1088	plog = (volatile struct amdgpu_vcn_fwlog *)log_buf;
1089	read_pos = plog->rptr;
1090	write_pos = plog->wptr;
1091
1092	if (read_pos > AMDGPU_VCNFW_LOG_SIZE || write_pos > AMDGPU_VCNFW_LOG_SIZE)
1093		return -EFAULT;
1094
1095	if (!size || (read_pos == write_pos))
1096		return 0;
1097
1098	if (write_pos > read_pos) {
1099		available = write_pos - read_pos;
1100		read_num[0] = min_t(size_t, size, available);
1101	} else {
1102		read_num[0] = AMDGPU_VCNFW_LOG_SIZE - read_pos;
1103		available = read_num[0] + write_pos - plog->header_size;
1104		if (size > available)
1105			read_num[1] = write_pos - plog->header_size;
1106		else if (size > read_num[0])
1107			read_num[1] = size - read_num[0];
1108		else
1109			read_num[0] = size;
1110	}
1111
1112	for (i = 0; i < 2; i++) {
1113		if (read_num[i]) {
1114			if (read_pos == AMDGPU_VCNFW_LOG_SIZE)
1115				read_pos = plog->header_size;
1116			if (read_num[i] == copy_to_user((buf + read_bytes),
1117							(log_buf + read_pos), read_num[i]))
1118				return -EFAULT;
1119
1120			read_bytes += read_num[i];
1121			read_pos += read_num[i];
1122		}
1123	}
1124
1125	plog->rptr = read_pos;
1126	*pos += read_bytes;
1127	return read_bytes;
1128}
1129
1130static const struct file_operations amdgpu_debugfs_vcnfwlog_fops = {
1131	.owner = THIS_MODULE,
1132	.read = amdgpu_debugfs_vcn_fwlog_read,
1133	.llseek = default_llseek
1134};
1135#endif
1136
1137void amdgpu_debugfs_vcn_fwlog_init(struct amdgpu_device *adev, uint8_t i,
1138				   struct amdgpu_vcn_inst *vcn)
1139{
1140#if defined(CONFIG_DEBUG_FS)
1141	struct drm_minor *minor = adev_to_drm(adev)->primary;
1142	struct dentry *root = minor->debugfs_root;
1143	char name[32];
1144
1145	sprintf(name, "amdgpu_vcn_%d_fwlog", i);
1146	debugfs_create_file_size(name, S_IFREG | 0444, root, vcn,
1147				 &amdgpu_debugfs_vcnfwlog_fops,
1148				 AMDGPU_VCNFW_LOG_SIZE);
1149#endif
1150}
1151
1152void amdgpu_vcn_fwlog_init(struct amdgpu_vcn_inst *vcn)
1153{
1154#if defined(CONFIG_DEBUG_FS)
1155	volatile uint32_t *flag = vcn->fw_shared.cpu_addr;
1156	void *fw_log_cpu_addr = vcn->fw_shared.cpu_addr + vcn->fw_shared.mem_size;
1157	uint64_t fw_log_gpu_addr = vcn->fw_shared.gpu_addr + vcn->fw_shared.mem_size;
1158	volatile struct amdgpu_vcn_fwlog *log_buf = fw_log_cpu_addr;
1159	volatile struct amdgpu_fw_shared_fw_logging *fw_log = vcn->fw_shared.cpu_addr
1160							 + vcn->fw_shared.log_offset;
1161	*flag |= cpu_to_le32(AMDGPU_VCN_FW_LOGGING_FLAG);
1162	fw_log->is_enabled = 1;
1163	fw_log->addr_lo = cpu_to_le32(fw_log_gpu_addr & 0xFFFFFFFF);
1164	fw_log->addr_hi = cpu_to_le32(fw_log_gpu_addr >> 32);
1165	fw_log->size = cpu_to_le32(AMDGPU_VCNFW_LOG_SIZE);
1166
1167	log_buf->header_size = sizeof(struct amdgpu_vcn_fwlog);
1168	log_buf->buffer_size = AMDGPU_VCNFW_LOG_SIZE;
1169	log_buf->rptr = log_buf->header_size;
1170	log_buf->wptr = log_buf->header_size;
1171	log_buf->wrapped = 0;
1172#endif
1173}
1174
1175int amdgpu_vcn_process_poison_irq(struct amdgpu_device *adev,
1176				struct amdgpu_irq_src *source,
1177				struct amdgpu_iv_entry *entry)
1178{
1179	struct ras_common_if *ras_if = adev->vcn.ras_if;
1180	struct ras_dispatch_if ih_data = {
1181		.entry = entry,
1182	};
1183
1184	if (!ras_if)
1185		return 0;
1186
1187	if (!amdgpu_sriov_vf(adev)) {
1188		ih_data.head = *ras_if;
1189		amdgpu_ras_interrupt_dispatch(adev, &ih_data);
1190	} else {
1191		if (adev->virt.ops && adev->virt.ops->ras_poison_handler)
1192			adev->virt.ops->ras_poison_handler(adev);
1193		else
1194			dev_warn(adev->dev,
1195				"No ras_poison_handler interface in SRIOV for VCN!\n");
1196	}
1197
1198	return 0;
1199}
1200
1201int amdgpu_vcn_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
1202{
1203	int r, i;
1204
1205	r = amdgpu_ras_block_late_init(adev, ras_block);
1206	if (r)
1207		return r;
1208
1209	if (amdgpu_ras_is_supported(adev, ras_block->block)) {
1210		for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
1211			if (adev->vcn.harvest_config & (1 << i) ||
1212			    !adev->vcn.inst[i].ras_poison_irq.funcs)
1213				continue;
1214
1215			r = amdgpu_irq_get(adev, &adev->vcn.inst[i].ras_poison_irq, 0);
1216			if (r)
1217				goto late_fini;
1218		}
1219	}
1220	return 0;
1221
1222late_fini:
1223	amdgpu_ras_block_late_fini(adev, ras_block);
1224	return r;
1225}
1226
1227int amdgpu_vcn_ras_sw_init(struct amdgpu_device *adev)
1228{
1229	int err;
1230	struct amdgpu_vcn_ras *ras;
1231
1232	if (!adev->vcn.ras)
1233		return 0;
1234
1235	ras = adev->vcn.ras;
1236	err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
1237	if (err) {
1238		dev_err(adev->dev, "Failed to register vcn ras block!\n");
1239		return err;
1240	}
1241
1242	strcpy(ras->ras_block.ras_comm.name, "vcn");
1243	ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__VCN;
1244	ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__POISON;
1245	adev->vcn.ras_if = &ras->ras_block.ras_comm;
1246
1247	if (!ras->ras_block.ras_late_init)
1248		ras->ras_block.ras_late_init = amdgpu_vcn_ras_late_init;
1249
1250	return 0;
1251}
1252
1253int amdgpu_vcn_psp_update_sram(struct amdgpu_device *adev, int inst_idx,
1254			       enum AMDGPU_UCODE_ID ucode_id)
1255{
1256	struct amdgpu_firmware_info ucode = {
1257		.ucode_id = (ucode_id ? ucode_id :
1258			    (inst_idx ? AMDGPU_UCODE_ID_VCN1_RAM :
1259					AMDGPU_UCODE_ID_VCN0_RAM)),
1260		.mc_addr = adev->vcn.inst[inst_idx].dpg_sram_gpu_addr,
1261		.ucode_size = ((uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_curr_addr -
1262			      (uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr),
1263	};
1264
1265	return psp_execute_ip_fw_load(&adev->psp, &ucode);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1266}
v6.13.7
   1/*
   2 * Copyright 2016 Advanced Micro Devices, Inc.
   3 * All Rights Reserved.
   4 *
   5 * Permission is hereby granted, free of charge, to any person obtaining a
   6 * copy of this software and associated documentation files (the
   7 * "Software"), to deal in the Software without restriction, including
   8 * without limitation the rights to use, copy, modify, merge, publish,
   9 * distribute, sub license, and/or sell copies of the Software, and to
  10 * permit persons to whom the Software is furnished to do so, subject to
  11 * the following conditions:
  12 *
  13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  20 *
  21 * The above copyright notice and this permission notice (including the
  22 * next paragraph) shall be included in all copies or substantial portions
  23 * of the Software.
  24 *
  25 */
  26
  27#include <linux/firmware.h>
  28#include <linux/module.h>
  29#include <linux/dmi.h>
  30#include <linux/pci.h>
  31#include <linux/debugfs.h>
  32#include <drm/drm_drv.h>
  33
  34#include "amdgpu.h"
  35#include "amdgpu_pm.h"
  36#include "amdgpu_vcn.h"
  37#include "soc15d.h"
  38
  39/* Firmware Names */
  40#define FIRMWARE_RAVEN			"amdgpu/raven_vcn.bin"
  41#define FIRMWARE_PICASSO		"amdgpu/picasso_vcn.bin"
  42#define FIRMWARE_RAVEN2			"amdgpu/raven2_vcn.bin"
  43#define FIRMWARE_ARCTURUS		"amdgpu/arcturus_vcn.bin"
  44#define FIRMWARE_RENOIR			"amdgpu/renoir_vcn.bin"
  45#define FIRMWARE_GREEN_SARDINE		"amdgpu/green_sardine_vcn.bin"
  46#define FIRMWARE_NAVI10			"amdgpu/navi10_vcn.bin"
  47#define FIRMWARE_NAVI14			"amdgpu/navi14_vcn.bin"
  48#define FIRMWARE_NAVI12			"amdgpu/navi12_vcn.bin"
  49#define FIRMWARE_SIENNA_CICHLID		"amdgpu/sienna_cichlid_vcn.bin"
  50#define FIRMWARE_NAVY_FLOUNDER		"amdgpu/navy_flounder_vcn.bin"
  51#define FIRMWARE_VANGOGH		"amdgpu/vangogh_vcn.bin"
  52#define FIRMWARE_DIMGREY_CAVEFISH	"amdgpu/dimgrey_cavefish_vcn.bin"
  53#define FIRMWARE_ALDEBARAN		"amdgpu/aldebaran_vcn.bin"
  54#define FIRMWARE_BEIGE_GOBY		"amdgpu/beige_goby_vcn.bin"
  55#define FIRMWARE_YELLOW_CARP		"amdgpu/yellow_carp_vcn.bin"
  56#define FIRMWARE_VCN_3_1_2		"amdgpu/vcn_3_1_2.bin"
  57#define FIRMWARE_VCN4_0_0		"amdgpu/vcn_4_0_0.bin"
  58#define FIRMWARE_VCN4_0_2		"amdgpu/vcn_4_0_2.bin"
  59#define FIRMWARE_VCN4_0_3		"amdgpu/vcn_4_0_3.bin"
  60#define FIRMWARE_VCN4_0_4		"amdgpu/vcn_4_0_4.bin"
  61#define FIRMWARE_VCN4_0_5		"amdgpu/vcn_4_0_5.bin"
  62#define FIRMWARE_VCN4_0_6		"amdgpu/vcn_4_0_6.bin"
  63#define FIRMWARE_VCN4_0_6_1		"amdgpu/vcn_4_0_6_1.bin"
  64#define FIRMWARE_VCN5_0_0		"amdgpu/vcn_5_0_0.bin"
  65
  66MODULE_FIRMWARE(FIRMWARE_RAVEN);
  67MODULE_FIRMWARE(FIRMWARE_PICASSO);
  68MODULE_FIRMWARE(FIRMWARE_RAVEN2);
  69MODULE_FIRMWARE(FIRMWARE_ARCTURUS);
  70MODULE_FIRMWARE(FIRMWARE_RENOIR);
  71MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE);
  72MODULE_FIRMWARE(FIRMWARE_ALDEBARAN);
  73MODULE_FIRMWARE(FIRMWARE_NAVI10);
  74MODULE_FIRMWARE(FIRMWARE_NAVI14);
  75MODULE_FIRMWARE(FIRMWARE_NAVI12);
  76MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID);
  77MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER);
  78MODULE_FIRMWARE(FIRMWARE_VANGOGH);
  79MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH);
  80MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY);
  81MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP);
  82MODULE_FIRMWARE(FIRMWARE_VCN_3_1_2);
  83MODULE_FIRMWARE(FIRMWARE_VCN4_0_0);
  84MODULE_FIRMWARE(FIRMWARE_VCN4_0_2);
  85MODULE_FIRMWARE(FIRMWARE_VCN4_0_3);
  86MODULE_FIRMWARE(FIRMWARE_VCN4_0_4);
  87MODULE_FIRMWARE(FIRMWARE_VCN4_0_5);
  88MODULE_FIRMWARE(FIRMWARE_VCN4_0_6);
  89MODULE_FIRMWARE(FIRMWARE_VCN4_0_6_1);
  90MODULE_FIRMWARE(FIRMWARE_VCN5_0_0);
  91
  92static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
  93
  94int amdgpu_vcn_early_init(struct amdgpu_device *adev)
  95{
  96	char ucode_prefix[25];
  97	int r, i;
 
  98
  99	amdgpu_ucode_ip_version_decode(adev, UVD_HWIP, ucode_prefix, sizeof(ucode_prefix));
 100	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
 101		if (i == 1 && amdgpu_ip_version(adev, UVD_HWIP, 0) ==  IP_VERSION(4, 0, 6))
 102			r = amdgpu_ucode_request(adev, &adev->vcn.fw[i], "amdgpu/%s_%d.bin", ucode_prefix, i);
 103		else
 104			r = amdgpu_ucode_request(adev, &adev->vcn.fw[i], "amdgpu/%s.bin", ucode_prefix);
 105		if (r) {
 106			amdgpu_ucode_release(&adev->vcn.fw[i]);
 107			return r;
 108		}
 109	}
 110	return r;
 111}
 112
 113int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
 114{
 115	unsigned long bo_size;
 116	const struct common_firmware_header *hdr;
 117	unsigned char fw_check;
 118	unsigned int fw_shared_size, log_offset;
 119	int i, r;
 120
 121	INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler);
 122	mutex_init(&adev->vcn.vcn_pg_lock);
 123	mutex_init(&adev->vcn.vcn1_jpeg1_workaround);
 124	atomic_set(&adev->vcn.total_submission_cnt, 0);
 125	for (i = 0; i < adev->vcn.num_vcn_inst; i++)
 126		atomic_set(&adev->vcn.inst[i].dpg_enc_submission_cnt, 0);
 127
 128	if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
 129	    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
 130		adev->vcn.indirect_sram = true;
 131
 132	/*
 133	 * Some Steam Deck's BIOS versions are incompatible with the
 134	 * indirect SRAM mode, leading to amdgpu being unable to get
 135	 * properly probed (and even potentially crashing the kernel).
 136	 * Hence, check for these versions here - notice this is
 137	 * restricted to Vangogh (Deck's APU).
 138	 */
 139	if (amdgpu_ip_version(adev, UVD_HWIP, 0) == IP_VERSION(3, 0, 2)) {
 140		const char *bios_ver = dmi_get_system_info(DMI_BIOS_VERSION);
 141
 142		if (bios_ver && (!strncmp("F7A0113", bios_ver, 7) ||
 143		     !strncmp("F7A0114", bios_ver, 7))) {
 144			adev->vcn.indirect_sram = false;
 145			dev_info(adev->dev,
 146				"Steam Deck quirk: indirect SRAM disabled on BIOS %s\n", bios_ver);
 147		}
 148	}
 149
 150	/* from vcn4 and above, only unified queue is used */
 151	adev->vcn.using_unified_queue =
 152		amdgpu_ip_version(adev, UVD_HWIP, 0) >= IP_VERSION(4, 0, 0);
 153
 154	hdr = (const struct common_firmware_header *)adev->vcn.fw[0]->data;
 155	adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version);
 156
 157	/* Bit 20-23, it is encode major and non-zero for new naming convention.
 158	 * This field is part of version minor and DRM_DISABLED_FLAG in old naming
 159	 * convention. Since the l:wq!atest version minor is 0x5B and DRM_DISABLED_FLAG
 160	 * is zero in old naming convention, this field is always zero so far.
 161	 * These four bits are used to tell which naming convention is present.
 162	 */
 163	fw_check = (le32_to_cpu(hdr->ucode_version) >> 20) & 0xf;
 164	if (fw_check) {
 165		unsigned int dec_ver, enc_major, enc_minor, vep, fw_rev;
 166
 167		fw_rev = le32_to_cpu(hdr->ucode_version) & 0xfff;
 168		enc_minor = (le32_to_cpu(hdr->ucode_version) >> 12) & 0xff;
 169		enc_major = fw_check;
 170		dec_ver = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xf;
 171		vep = (le32_to_cpu(hdr->ucode_version) >> 28) & 0xf;
 172		DRM_INFO("Found VCN firmware Version ENC: %u.%u DEC: %u VEP: %u Revision: %u\n",
 173			enc_major, enc_minor, dec_ver, vep, fw_rev);
 174	} else {
 175		unsigned int version_major, version_minor, family_id;
 176
 177		family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
 178		version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
 179		version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
 180		DRM_INFO("Found VCN firmware Version: %u.%u Family ID: %u\n",
 181			version_major, version_minor, family_id);
 182	}
 183
 184	bo_size = AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_CONTEXT_SIZE;
 185	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
 186		bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
 187
 188	if (amdgpu_ip_version(adev, UVD_HWIP, 0) >= IP_VERSION(5, 0, 0)) {
 189		fw_shared_size = AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn5_fw_shared));
 190		log_offset = offsetof(struct amdgpu_vcn5_fw_shared, fw_log);
 191	} else if (amdgpu_ip_version(adev, UVD_HWIP, 0) >= IP_VERSION(4, 0, 0)) {
 192		fw_shared_size = AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared));
 193		log_offset = offsetof(struct amdgpu_vcn4_fw_shared, fw_log);
 194	} else {
 195		fw_shared_size = AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared));
 196		log_offset = offsetof(struct amdgpu_fw_shared, fw_log);
 197	}
 198
 199	bo_size += fw_shared_size;
 200
 201	if (amdgpu_vcnfw_log)
 202		bo_size += AMDGPU_VCNFW_LOG_SIZE;
 203
 204	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
 205		if (adev->vcn.harvest_config & (1 << i))
 206			continue;
 207
 208		r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
 209					    AMDGPU_GEM_DOMAIN_VRAM |
 210					    AMDGPU_GEM_DOMAIN_GTT,
 211					    &adev->vcn.inst[i].vcpu_bo,
 212					    &adev->vcn.inst[i].gpu_addr,
 213					    &adev->vcn.inst[i].cpu_addr);
 214		if (r) {
 215			dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", r);
 216			return r;
 217		}
 218
 219		adev->vcn.inst[i].fw_shared.cpu_addr = adev->vcn.inst[i].cpu_addr +
 220				bo_size - fw_shared_size;
 221		adev->vcn.inst[i].fw_shared.gpu_addr = adev->vcn.inst[i].gpu_addr +
 222				bo_size - fw_shared_size;
 223
 224		adev->vcn.inst[i].fw_shared.mem_size = fw_shared_size;
 225
 226		if (amdgpu_vcnfw_log) {
 227			adev->vcn.inst[i].fw_shared.cpu_addr -= AMDGPU_VCNFW_LOG_SIZE;
 228			adev->vcn.inst[i].fw_shared.gpu_addr -= AMDGPU_VCNFW_LOG_SIZE;
 229			adev->vcn.inst[i].fw_shared.log_offset = log_offset;
 230		}
 231
 232		if (adev->vcn.indirect_sram) {
 233			r = amdgpu_bo_create_kernel(adev, 64 * 2 * 4, PAGE_SIZE,
 234					AMDGPU_GEM_DOMAIN_VRAM |
 235					AMDGPU_GEM_DOMAIN_GTT,
 236					&adev->vcn.inst[i].dpg_sram_bo,
 237					&adev->vcn.inst[i].dpg_sram_gpu_addr,
 238					&adev->vcn.inst[i].dpg_sram_cpu_addr);
 239			if (r) {
 240				dev_err(adev->dev, "VCN %d (%d) failed to allocate DPG bo\n", i, r);
 241				return r;
 242			}
 243		}
 244	}
 245
 246	return 0;
 247}
 248
 249int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
 250{
 251	int i, j;
 252
 253	for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
 254		if (adev->vcn.harvest_config & (1 << j))
 255			continue;
 256
 257		amdgpu_bo_free_kernel(
 258			&adev->vcn.inst[j].dpg_sram_bo,
 259			&adev->vcn.inst[j].dpg_sram_gpu_addr,
 260			(void **)&adev->vcn.inst[j].dpg_sram_cpu_addr);
 261
 262		kvfree(adev->vcn.inst[j].saved_bo);
 263
 264		amdgpu_bo_free_kernel(&adev->vcn.inst[j].vcpu_bo,
 265					  &adev->vcn.inst[j].gpu_addr,
 266					  (void **)&adev->vcn.inst[j].cpu_addr);
 267
 268		amdgpu_ring_fini(&adev->vcn.inst[j].ring_dec);
 269
 270		for (i = 0; i < adev->vcn.num_enc_rings; ++i)
 271			amdgpu_ring_fini(&adev->vcn.inst[j].ring_enc[i]);
 272
 273		amdgpu_ucode_release(&adev->vcn.fw[j]);
 274	}
 275
 
 276	mutex_destroy(&adev->vcn.vcn1_jpeg1_workaround);
 277	mutex_destroy(&adev->vcn.vcn_pg_lock);
 278
 279	return 0;
 280}
 281
 
 
 
 
 
 
 
 
 
 
 
 
 282bool amdgpu_vcn_is_disabled_vcn(struct amdgpu_device *adev, enum vcn_ring_type type, uint32_t vcn_instance)
 283{
 284	bool ret = false;
 285	int vcn_config = adev->vcn.vcn_config[vcn_instance];
 286
 287	if ((type == VCN_ENCODE_RING) && (vcn_config & VCN_BLOCK_ENCODE_DISABLE_MASK))
 288		ret = true;
 289	else if ((type == VCN_DECODE_RING) && (vcn_config & VCN_BLOCK_DECODE_DISABLE_MASK))
 290		ret = true;
 291	else if ((type == VCN_UNIFIED_RING) && (vcn_config & VCN_BLOCK_QUEUE_DISABLE_MASK))
 292		ret = true;
 293
 294	return ret;
 295}
 296
 297int amdgpu_vcn_save_vcpu_bo(struct amdgpu_device *adev)
 298{
 299	unsigned int size;
 300	void *ptr;
 301	int i, idx;
 302
 
 
 
 
 
 
 
 
 
 303	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
 304		if (adev->vcn.harvest_config & (1 << i))
 305			continue;
 306		if (adev->vcn.inst[i].vcpu_bo == NULL)
 307			return 0;
 308
 309		size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo);
 310		ptr = adev->vcn.inst[i].cpu_addr;
 311
 312		adev->vcn.inst[i].saved_bo = kvmalloc(size, GFP_KERNEL);
 313		if (!adev->vcn.inst[i].saved_bo)
 314			return -ENOMEM;
 315
 316		if (drm_dev_enter(adev_to_drm(adev), &idx)) {
 317			memcpy_fromio(adev->vcn.inst[i].saved_bo, ptr, size);
 318			drm_dev_exit(idx);
 319		}
 320	}
 321
 322	return 0;
 323}
 324
 325int amdgpu_vcn_suspend(struct amdgpu_device *adev)
 326{
 327	bool in_ras_intr = amdgpu_ras_intr_triggered();
 328
 329	cancel_delayed_work_sync(&adev->vcn.idle_work);
 330
 331	/* err_event_athub will corrupt VCPU buffer, so we need to
 332	 * restore fw data and clear buffer in amdgpu_vcn_resume() */
 333	if (in_ras_intr)
 334		return 0;
 335
 336	return amdgpu_vcn_save_vcpu_bo(adev);
 337}
 338
 339int amdgpu_vcn_resume(struct amdgpu_device *adev)
 340{
 341	unsigned int size;
 342	void *ptr;
 343	int i, idx;
 344
 345	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
 346		if (adev->vcn.harvest_config & (1 << i))
 347			continue;
 348		if (adev->vcn.inst[i].vcpu_bo == NULL)
 349			return -EINVAL;
 350
 351		size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo);
 352		ptr = adev->vcn.inst[i].cpu_addr;
 353
 354		if (adev->vcn.inst[i].saved_bo != NULL) {
 355			if (drm_dev_enter(adev_to_drm(adev), &idx)) {
 356				memcpy_toio(ptr, adev->vcn.inst[i].saved_bo, size);
 357				drm_dev_exit(idx);
 358			}
 359			kvfree(adev->vcn.inst[i].saved_bo);
 360			adev->vcn.inst[i].saved_bo = NULL;
 361		} else {
 362			const struct common_firmware_header *hdr;
 363			unsigned int offset;
 364
 365			hdr = (const struct common_firmware_header *)adev->vcn.fw[i]->data;
 366			if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
 367				offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
 368				if (drm_dev_enter(adev_to_drm(adev), &idx)) {
 369					memcpy_toio(adev->vcn.inst[i].cpu_addr,
 370						    adev->vcn.fw[i]->data + offset,
 371						    le32_to_cpu(hdr->ucode_size_bytes));
 372					drm_dev_exit(idx);
 373				}
 374				size -= le32_to_cpu(hdr->ucode_size_bytes);
 375				ptr += le32_to_cpu(hdr->ucode_size_bytes);
 376			}
 377			memset_io(ptr, 0, size);
 378		}
 379	}
 380	return 0;
 381}
 382
 383static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
 384{
 385	struct amdgpu_device *adev =
 386		container_of(work, struct amdgpu_device, vcn.idle_work.work);
 387	unsigned int fences = 0, fence[AMDGPU_MAX_VCN_INSTANCES] = {0};
 388	unsigned int i, j;
 389	int r = 0;
 390
 391	for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
 392		if (adev->vcn.harvest_config & (1 << j))
 393			continue;
 394
 395		for (i = 0; i < adev->vcn.num_enc_rings; ++i)
 396			fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_enc[i]);
 397
 398		/* Only set DPG pause for VCN3 or below, VCN4 and above will be handled by FW */
 399		if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
 400		    !adev->vcn.using_unified_queue) {
 401			struct dpg_pause_state new_state;
 402
 403			if (fence[j] ||
 404				unlikely(atomic_read(&adev->vcn.inst[j].dpg_enc_submission_cnt)))
 405				new_state.fw_based = VCN_DPG_STATE__PAUSE;
 406			else
 407				new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
 408
 409			adev->vcn.pause_dpg_mode(adev, j, &new_state);
 410		}
 411
 412		fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_dec);
 413		fences += fence[j];
 414	}
 415
 416	if (!fences && !atomic_read(&adev->vcn.total_submission_cnt)) {
 417		amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
 418		       AMD_PG_STATE_GATE);
 419		r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO,
 420				false);
 421		if (r)
 422			dev_warn(adev->dev, "(%d) failed to disable video power profile mode\n", r);
 423	} else {
 424		schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
 425	}
 426}
 427
 428void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
 429{
 430	struct amdgpu_device *adev = ring->adev;
 431	int r = 0;
 432
 433	atomic_inc(&adev->vcn.total_submission_cnt);
 434
 435	if (!cancel_delayed_work_sync(&adev->vcn.idle_work)) {
 436		r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO,
 437				true);
 438		if (r)
 439			dev_warn(adev->dev, "(%d) failed to switch to video power profile mode\n", r);
 440	}
 441
 442	mutex_lock(&adev->vcn.vcn_pg_lock);
 443	amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
 444	       AMD_PG_STATE_UNGATE);
 445
 446	/* Only set DPG pause for VCN3 or below, VCN4 and above will be handled by FW */
 447	if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
 448	    !adev->vcn.using_unified_queue) {
 449		struct dpg_pause_state new_state;
 450
 451		if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC) {
 452			atomic_inc(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt);
 453			new_state.fw_based = VCN_DPG_STATE__PAUSE;
 454		} else {
 455			unsigned int fences = 0;
 456			unsigned int i;
 457
 458			for (i = 0; i < adev->vcn.num_enc_rings; ++i)
 459				fences += amdgpu_fence_count_emitted(&adev->vcn.inst[ring->me].ring_enc[i]);
 460
 461			if (fences || atomic_read(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt))
 462				new_state.fw_based = VCN_DPG_STATE__PAUSE;
 463			else
 464				new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
 465		}
 466
 467		adev->vcn.pause_dpg_mode(adev, ring->me, &new_state);
 468	}
 469	mutex_unlock(&adev->vcn.vcn_pg_lock);
 470}
 471
 472void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring)
 473{
 474	struct amdgpu_device *adev = ring->adev;
 475
 476	/* Only set DPG pause for VCN3 or below, VCN4 and above will be handled by FW */
 477	if (ring->adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
 478	    ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC &&
 479	    !adev->vcn.using_unified_queue)
 480		atomic_dec(&ring->adev->vcn.inst[ring->me].dpg_enc_submission_cnt);
 481
 482	atomic_dec(&ring->adev->vcn.total_submission_cnt);
 483
 484	schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
 485}
 486
 487int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
 488{
 489	struct amdgpu_device *adev = ring->adev;
 490	uint32_t tmp = 0;
 491	unsigned int i;
 492	int r;
 493
 494	/* VCN in SRIOV does not support direct register read/write */
 495	if (amdgpu_sriov_vf(adev))
 496		return 0;
 497
 498	WREG32(adev->vcn.inst[ring->me].external.scratch9, 0xCAFEDEAD);
 499	r = amdgpu_ring_alloc(ring, 3);
 500	if (r)
 501		return r;
 502	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.scratch9, 0));
 503	amdgpu_ring_write(ring, 0xDEADBEEF);
 504	amdgpu_ring_commit(ring);
 505	for (i = 0; i < adev->usec_timeout; i++) {
 506		tmp = RREG32(adev->vcn.inst[ring->me].external.scratch9);
 507		if (tmp == 0xDEADBEEF)
 508			break;
 509		udelay(1);
 510	}
 511
 512	if (i >= adev->usec_timeout)
 513		r = -ETIMEDOUT;
 514
 515	return r;
 516}
 517
 518int amdgpu_vcn_dec_sw_ring_test_ring(struct amdgpu_ring *ring)
 519{
 520	struct amdgpu_device *adev = ring->adev;
 521	uint32_t rptr;
 522	unsigned int i;
 523	int r;
 524
 525	if (amdgpu_sriov_vf(adev))
 526		return 0;
 527
 528	r = amdgpu_ring_alloc(ring, 16);
 529	if (r)
 530		return r;
 531
 532	rptr = amdgpu_ring_get_rptr(ring);
 533
 534	amdgpu_ring_write(ring, VCN_DEC_SW_CMD_END);
 535	amdgpu_ring_commit(ring);
 536
 537	for (i = 0; i < adev->usec_timeout; i++) {
 538		if (amdgpu_ring_get_rptr(ring) != rptr)
 539			break;
 540		udelay(1);
 541	}
 542
 543	if (i >= adev->usec_timeout)
 544		r = -ETIMEDOUT;
 545
 546	return r;
 547}
 548
 549static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
 550				   struct amdgpu_ib *ib_msg,
 551				   struct dma_fence **fence)
 552{
 553	u64 addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
 554	struct amdgpu_device *adev = ring->adev;
 555	struct dma_fence *f = NULL;
 556	struct amdgpu_job *job;
 557	struct amdgpu_ib *ib;
 558	int i, r;
 559
 560	r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
 561				     64, AMDGPU_IB_POOL_DIRECT,
 562				     &job);
 563	if (r)
 564		goto err;
 565
 566	ib = &job->ibs[0];
 567	ib->ptr[0] = PACKET0(adev->vcn.internal.data0, 0);
 568	ib->ptr[1] = addr;
 569	ib->ptr[2] = PACKET0(adev->vcn.internal.data1, 0);
 570	ib->ptr[3] = addr >> 32;
 571	ib->ptr[4] = PACKET0(adev->vcn.internal.cmd, 0);
 572	ib->ptr[5] = 0;
 573	for (i = 6; i < 16; i += 2) {
 574		ib->ptr[i] = PACKET0(adev->vcn.internal.nop, 0);
 575		ib->ptr[i+1] = 0;
 576	}
 577	ib->length_dw = 16;
 578
 579	r = amdgpu_job_submit_direct(job, ring, &f);
 580	if (r)
 581		goto err_free;
 582
 583	amdgpu_ib_free(adev, ib_msg, f);
 584
 585	if (fence)
 586		*fence = dma_fence_get(f);
 587	dma_fence_put(f);
 588
 589	return 0;
 590
 591err_free:
 592	amdgpu_job_free(job);
 593err:
 594	amdgpu_ib_free(adev, ib_msg, f);
 595	return r;
 596}
 597
 598static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
 599		struct amdgpu_ib *ib)
 600{
 601	struct amdgpu_device *adev = ring->adev;
 602	uint32_t *msg;
 603	int r, i;
 604
 605	memset(ib, 0, sizeof(*ib));
 606	r = amdgpu_ib_get(adev, NULL, AMDGPU_GPU_PAGE_SIZE * 2,
 607			AMDGPU_IB_POOL_DIRECT,
 608			ib);
 609	if (r)
 610		return r;
 611
 612	msg = (uint32_t *)AMDGPU_GPU_PAGE_ALIGN((unsigned long)ib->ptr);
 613	msg[0] = cpu_to_le32(0x00000028);
 614	msg[1] = cpu_to_le32(0x00000038);
 615	msg[2] = cpu_to_le32(0x00000001);
 616	msg[3] = cpu_to_le32(0x00000000);
 617	msg[4] = cpu_to_le32(handle);
 618	msg[5] = cpu_to_le32(0x00000000);
 619	msg[6] = cpu_to_le32(0x00000001);
 620	msg[7] = cpu_to_le32(0x00000028);
 621	msg[8] = cpu_to_le32(0x00000010);
 622	msg[9] = cpu_to_le32(0x00000000);
 623	msg[10] = cpu_to_le32(0x00000007);
 624	msg[11] = cpu_to_le32(0x00000000);
 625	msg[12] = cpu_to_le32(0x00000780);
 626	msg[13] = cpu_to_le32(0x00000440);
 627	for (i = 14; i < 1024; ++i)
 628		msg[i] = cpu_to_le32(0x0);
 629
 630	return 0;
 631}
 632
 633static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
 634					  struct amdgpu_ib *ib)
 635{
 636	struct amdgpu_device *adev = ring->adev;
 637	uint32_t *msg;
 638	int r, i;
 639
 640	memset(ib, 0, sizeof(*ib));
 641	r = amdgpu_ib_get(adev, NULL, AMDGPU_GPU_PAGE_SIZE * 2,
 642			AMDGPU_IB_POOL_DIRECT,
 643			ib);
 644	if (r)
 645		return r;
 646
 647	msg = (uint32_t *)AMDGPU_GPU_PAGE_ALIGN((unsigned long)ib->ptr);
 648	msg[0] = cpu_to_le32(0x00000028);
 649	msg[1] = cpu_to_le32(0x00000018);
 650	msg[2] = cpu_to_le32(0x00000000);
 651	msg[3] = cpu_to_le32(0x00000002);
 652	msg[4] = cpu_to_le32(handle);
 653	msg[5] = cpu_to_le32(0x00000000);
 654	for (i = 6; i < 1024; ++i)
 655		msg[i] = cpu_to_le32(0x0);
 656
 657	return 0;
 658}
 659
 660int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 661{
 662	struct dma_fence *fence = NULL;
 663	struct amdgpu_ib ib;
 664	long r;
 665
 666	r = amdgpu_vcn_dec_get_create_msg(ring, 1, &ib);
 667	if (r)
 668		goto error;
 669
 670	r = amdgpu_vcn_dec_send_msg(ring, &ib, NULL);
 671	if (r)
 672		goto error;
 673	r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &ib);
 674	if (r)
 675		goto error;
 676
 677	r = amdgpu_vcn_dec_send_msg(ring, &ib, &fence);
 678	if (r)
 679		goto error;
 680
 681	r = dma_fence_wait_timeout(fence, false, timeout);
 682	if (r == 0)
 683		r = -ETIMEDOUT;
 684	else if (r > 0)
 685		r = 0;
 686
 687	dma_fence_put(fence);
 688error:
 689	return r;
 690}
 691
 692static uint32_t *amdgpu_vcn_unified_ring_ib_header(struct amdgpu_ib *ib,
 693						uint32_t ib_pack_in_dw, bool enc)
 694{
 695	uint32_t *ib_checksum;
 696
 697	ib->ptr[ib->length_dw++] = 0x00000010; /* single queue checksum */
 698	ib->ptr[ib->length_dw++] = 0x30000002;
 699	ib_checksum = &ib->ptr[ib->length_dw++];
 700	ib->ptr[ib->length_dw++] = ib_pack_in_dw;
 701
 702	ib->ptr[ib->length_dw++] = 0x00000010; /* engine info */
 703	ib->ptr[ib->length_dw++] = 0x30000001;
 704	ib->ptr[ib->length_dw++] = enc ? 0x2 : 0x3;
 705	ib->ptr[ib->length_dw++] = ib_pack_in_dw * sizeof(uint32_t);
 706
 707	return ib_checksum;
 708}
 709
 710static void amdgpu_vcn_unified_ring_ib_checksum(uint32_t **ib_checksum,
 711						uint32_t ib_pack_in_dw)
 712{
 713	uint32_t i;
 714	uint32_t checksum = 0;
 715
 716	for (i = 0; i < ib_pack_in_dw; i++)
 717		checksum += *(*ib_checksum + 2 + i);
 718
 719	**ib_checksum = checksum;
 720}
 721
 722static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
 723				      struct amdgpu_ib *ib_msg,
 724				      struct dma_fence **fence)
 725{
 726	struct amdgpu_vcn_decode_buffer *decode_buffer = NULL;
 727	unsigned int ib_size_dw = 64;
 728	struct amdgpu_device *adev = ring->adev;
 729	struct dma_fence *f = NULL;
 730	struct amdgpu_job *job;
 731	struct amdgpu_ib *ib;
 732	uint64_t addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
 
 733	uint32_t *ib_checksum;
 734	uint32_t ib_pack_in_dw;
 735	int i, r;
 736
 737	if (adev->vcn.using_unified_queue)
 738		ib_size_dw += 8;
 739
 740	r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
 741				     ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
 742				     &job);
 743	if (r)
 744		goto err;
 745
 746	ib = &job->ibs[0];
 747	ib->length_dw = 0;
 748
 749	/* single queue headers */
 750	if (adev->vcn.using_unified_queue) {
 751		ib_pack_in_dw = sizeof(struct amdgpu_vcn_decode_buffer) / sizeof(uint32_t)
 752						+ 4 + 2; /* engine info + decoding ib in dw */
 753		ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, ib_pack_in_dw, false);
 754	}
 755
 756	ib->ptr[ib->length_dw++] = sizeof(struct amdgpu_vcn_decode_buffer) + 8;
 757	ib->ptr[ib->length_dw++] = cpu_to_le32(AMDGPU_VCN_IB_FLAG_DECODE_BUFFER);
 758	decode_buffer = (struct amdgpu_vcn_decode_buffer *)&(ib->ptr[ib->length_dw]);
 759	ib->length_dw += sizeof(struct amdgpu_vcn_decode_buffer) / 4;
 760	memset(decode_buffer, 0, sizeof(struct amdgpu_vcn_decode_buffer));
 761
 762	decode_buffer->valid_buf_flag |= cpu_to_le32(AMDGPU_VCN_CMD_FLAG_MSG_BUFFER);
 763	decode_buffer->msg_buffer_address_hi = cpu_to_le32(addr >> 32);
 764	decode_buffer->msg_buffer_address_lo = cpu_to_le32(addr);
 765
 766	for (i = ib->length_dw; i < ib_size_dw; ++i)
 767		ib->ptr[i] = 0x0;
 768
 769	if (adev->vcn.using_unified_queue)
 770		amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, ib_pack_in_dw);
 771
 772	r = amdgpu_job_submit_direct(job, ring, &f);
 773	if (r)
 774		goto err_free;
 775
 776	amdgpu_ib_free(adev, ib_msg, f);
 777
 778	if (fence)
 779		*fence = dma_fence_get(f);
 780	dma_fence_put(f);
 781
 782	return 0;
 783
 784err_free:
 785	amdgpu_job_free(job);
 786err:
 787	amdgpu_ib_free(adev, ib_msg, f);
 788	return r;
 789}
 790
 791int amdgpu_vcn_dec_sw_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 792{
 793	struct dma_fence *fence = NULL;
 794	struct amdgpu_ib ib;
 795	long r;
 796
 797	r = amdgpu_vcn_dec_get_create_msg(ring, 1, &ib);
 798	if (r)
 799		goto error;
 800
 801	r = amdgpu_vcn_dec_sw_send_msg(ring, &ib, NULL);
 802	if (r)
 803		goto error;
 804	r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &ib);
 805	if (r)
 806		goto error;
 807
 808	r = amdgpu_vcn_dec_sw_send_msg(ring, &ib, &fence);
 809	if (r)
 810		goto error;
 811
 812	r = dma_fence_wait_timeout(fence, false, timeout);
 813	if (r == 0)
 814		r = -ETIMEDOUT;
 815	else if (r > 0)
 816		r = 0;
 817
 818	dma_fence_put(fence);
 819error:
 820	return r;
 821}
 822
 823int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
 824{
 825	struct amdgpu_device *adev = ring->adev;
 826	uint32_t rptr;
 827	unsigned int i;
 828	int r;
 829
 830	if (amdgpu_sriov_vf(adev))
 831		return 0;
 832
 833	r = amdgpu_ring_alloc(ring, 16);
 834	if (r)
 835		return r;
 836
 837	rptr = amdgpu_ring_get_rptr(ring);
 838
 839	amdgpu_ring_write(ring, VCN_ENC_CMD_END);
 840	amdgpu_ring_commit(ring);
 841
 842	for (i = 0; i < adev->usec_timeout; i++) {
 843		if (amdgpu_ring_get_rptr(ring) != rptr)
 844			break;
 845		udelay(1);
 846	}
 847
 848	if (i >= adev->usec_timeout)
 849		r = -ETIMEDOUT;
 850
 851	return r;
 852}
 853
 854static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
 855					 struct amdgpu_ib *ib_msg,
 856					 struct dma_fence **fence)
 857{
 858	unsigned int ib_size_dw = 16;
 859	struct amdgpu_device *adev = ring->adev;
 860	struct amdgpu_job *job;
 861	struct amdgpu_ib *ib;
 862	struct dma_fence *f = NULL;
 863	uint32_t *ib_checksum = NULL;
 864	uint64_t addr;
 
 865	int i, r;
 866
 867	if (adev->vcn.using_unified_queue)
 868		ib_size_dw += 8;
 869
 870	r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
 871				     ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
 872				     &job);
 873	if (r)
 874		return r;
 875
 876	ib = &job->ibs[0];
 877	addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
 878
 879	ib->length_dw = 0;
 880
 881	if (adev->vcn.using_unified_queue)
 882		ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, 0x11, true);
 883
 884	ib->ptr[ib->length_dw++] = 0x00000018;
 885	ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
 886	ib->ptr[ib->length_dw++] = handle;
 887	ib->ptr[ib->length_dw++] = upper_32_bits(addr);
 888	ib->ptr[ib->length_dw++] = addr;
 889	ib->ptr[ib->length_dw++] = 0x00000000;
 890
 891	ib->ptr[ib->length_dw++] = 0x00000014;
 892	ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
 893	ib->ptr[ib->length_dw++] = 0x0000001c;
 894	ib->ptr[ib->length_dw++] = 0x00000000;
 895	ib->ptr[ib->length_dw++] = 0x00000000;
 896
 897	ib->ptr[ib->length_dw++] = 0x00000008;
 898	ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */
 899
 900	for (i = ib->length_dw; i < ib_size_dw; ++i)
 901		ib->ptr[i] = 0x0;
 902
 903	if (adev->vcn.using_unified_queue)
 904		amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, 0x11);
 905
 906	r = amdgpu_job_submit_direct(job, ring, &f);
 907	if (r)
 908		goto err;
 909
 910	if (fence)
 911		*fence = dma_fence_get(f);
 912	dma_fence_put(f);
 913
 914	return 0;
 915
 916err:
 917	amdgpu_job_free(job);
 918	return r;
 919}
 920
 921static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
 922					  struct amdgpu_ib *ib_msg,
 923					  struct dma_fence **fence)
 924{
 925	unsigned int ib_size_dw = 16;
 926	struct amdgpu_device *adev = ring->adev;
 927	struct amdgpu_job *job;
 928	struct amdgpu_ib *ib;
 929	struct dma_fence *f = NULL;
 930	uint32_t *ib_checksum = NULL;
 931	uint64_t addr;
 
 932	int i, r;
 933
 934	if (adev->vcn.using_unified_queue)
 935		ib_size_dw += 8;
 936
 937	r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
 938				     ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
 939				     &job);
 940	if (r)
 941		return r;
 942
 943	ib = &job->ibs[0];
 944	addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
 945
 946	ib->length_dw = 0;
 947
 948	if (adev->vcn.using_unified_queue)
 949		ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, 0x11, true);
 950
 951	ib->ptr[ib->length_dw++] = 0x00000018;
 952	ib->ptr[ib->length_dw++] = 0x00000001;
 953	ib->ptr[ib->length_dw++] = handle;
 954	ib->ptr[ib->length_dw++] = upper_32_bits(addr);
 955	ib->ptr[ib->length_dw++] = addr;
 956	ib->ptr[ib->length_dw++] = 0x00000000;
 957
 958	ib->ptr[ib->length_dw++] = 0x00000014;
 959	ib->ptr[ib->length_dw++] = 0x00000002;
 960	ib->ptr[ib->length_dw++] = 0x0000001c;
 961	ib->ptr[ib->length_dw++] = 0x00000000;
 962	ib->ptr[ib->length_dw++] = 0x00000000;
 963
 964	ib->ptr[ib->length_dw++] = 0x00000008;
 965	ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */
 966
 967	for (i = ib->length_dw; i < ib_size_dw; ++i)
 968		ib->ptr[i] = 0x0;
 969
 970	if (adev->vcn.using_unified_queue)
 971		amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, 0x11);
 972
 973	r = amdgpu_job_submit_direct(job, ring, &f);
 974	if (r)
 975		goto err;
 976
 977	if (fence)
 978		*fence = dma_fence_get(f);
 979	dma_fence_put(f);
 980
 981	return 0;
 982
 983err:
 984	amdgpu_job_free(job);
 985	return r;
 986}
 987
 988int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 989{
 990	struct amdgpu_device *adev = ring->adev;
 991	struct dma_fence *fence = NULL;
 992	struct amdgpu_ib ib;
 993	long r;
 994
 995	memset(&ib, 0, sizeof(ib));
 996	r = amdgpu_ib_get(adev, NULL, (128 << 10) + AMDGPU_GPU_PAGE_SIZE,
 997			AMDGPU_IB_POOL_DIRECT,
 998			&ib);
 999	if (r)
1000		return r;
1001
1002	r = amdgpu_vcn_enc_get_create_msg(ring, 1, &ib, NULL);
1003	if (r)
1004		goto error;
1005
1006	r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, &ib, &fence);
1007	if (r)
1008		goto error;
1009
1010	r = dma_fence_wait_timeout(fence, false, timeout);
1011	if (r == 0)
1012		r = -ETIMEDOUT;
1013	else if (r > 0)
1014		r = 0;
1015
1016error:
1017	amdgpu_ib_free(adev, &ib, fence);
1018	dma_fence_put(fence);
1019
1020	return r;
1021}
1022
1023int amdgpu_vcn_unified_ring_test_ib(struct amdgpu_ring *ring, long timeout)
1024{
1025	struct amdgpu_device *adev = ring->adev;
1026	long r;
1027
1028	if (amdgpu_ip_version(adev, UVD_HWIP, 0) != IP_VERSION(4, 0, 3)) {
1029		r = amdgpu_vcn_enc_ring_test_ib(ring, timeout);
1030		if (r)
1031			goto error;
1032	}
1033
1034	r =  amdgpu_vcn_dec_sw_ring_test_ib(ring, timeout);
1035
1036error:
1037	return r;
1038}
1039
1040enum amdgpu_ring_priority_level amdgpu_vcn_get_enc_ring_prio(int ring)
1041{
1042	switch (ring) {
1043	case 0:
1044		return AMDGPU_RING_PRIO_0;
1045	case 1:
1046		return AMDGPU_RING_PRIO_1;
1047	case 2:
1048		return AMDGPU_RING_PRIO_2;
1049	default:
1050		return AMDGPU_RING_PRIO_0;
1051	}
1052}
1053
1054void amdgpu_vcn_setup_ucode(struct amdgpu_device *adev)
1055{
1056	int i;
1057	unsigned int idx;
1058
1059	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1060		const struct common_firmware_header *hdr;
1061
 
 
1062		for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
1063			if (adev->vcn.harvest_config & (1 << i))
1064				continue;
1065
1066			hdr = (const struct common_firmware_header *)adev->vcn.fw[i]->data;
1067			/* currently only support 2 FW instances */
1068			if (i >= 2) {
1069				dev_info(adev->dev, "More then 2 VCN FW instances!\n");
1070				break;
1071			}
1072			idx = AMDGPU_UCODE_ID_VCN + i;
1073			adev->firmware.ucode[idx].ucode_id = idx;
1074			adev->firmware.ucode[idx].fw = adev->vcn.fw[i];
1075			adev->firmware.fw_size +=
1076				ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
1077
1078			if (amdgpu_ip_version(adev, UVD_HWIP, 0) ==
1079			    IP_VERSION(4, 0, 3))
1080				break;
1081		}
 
1082	}
1083}
1084
1085/*
1086 * debugfs for mapping vcn firmware log buffer.
1087 */
1088#if defined(CONFIG_DEBUG_FS)
1089static ssize_t amdgpu_debugfs_vcn_fwlog_read(struct file *f, char __user *buf,
1090					     size_t size, loff_t *pos)
1091{
1092	struct amdgpu_vcn_inst *vcn;
1093	void *log_buf;
1094	volatile struct amdgpu_vcn_fwlog *plog;
1095	unsigned int read_pos, write_pos, available, i, read_bytes = 0;
1096	unsigned int read_num[2] = {0};
1097
1098	vcn = file_inode(f)->i_private;
1099	if (!vcn)
1100		return -ENODEV;
1101
1102	if (!vcn->fw_shared.cpu_addr || !amdgpu_vcnfw_log)
1103		return -EFAULT;
1104
1105	log_buf = vcn->fw_shared.cpu_addr + vcn->fw_shared.mem_size;
1106
1107	plog = (volatile struct amdgpu_vcn_fwlog *)log_buf;
1108	read_pos = plog->rptr;
1109	write_pos = plog->wptr;
1110
1111	if (read_pos > AMDGPU_VCNFW_LOG_SIZE || write_pos > AMDGPU_VCNFW_LOG_SIZE)
1112		return -EFAULT;
1113
1114	if (!size || (read_pos == write_pos))
1115		return 0;
1116
1117	if (write_pos > read_pos) {
1118		available = write_pos - read_pos;
1119		read_num[0] = min_t(size_t, size, available);
1120	} else {
1121		read_num[0] = AMDGPU_VCNFW_LOG_SIZE - read_pos;
1122		available = read_num[0] + write_pos - plog->header_size;
1123		if (size > available)
1124			read_num[1] = write_pos - plog->header_size;
1125		else if (size > read_num[0])
1126			read_num[1] = size - read_num[0];
1127		else
1128			read_num[0] = size;
1129	}
1130
1131	for (i = 0; i < 2; i++) {
1132		if (read_num[i]) {
1133			if (read_pos == AMDGPU_VCNFW_LOG_SIZE)
1134				read_pos = plog->header_size;
1135			if (read_num[i] == copy_to_user((buf + read_bytes),
1136							(log_buf + read_pos), read_num[i]))
1137				return -EFAULT;
1138
1139			read_bytes += read_num[i];
1140			read_pos += read_num[i];
1141		}
1142	}
1143
1144	plog->rptr = read_pos;
1145	*pos += read_bytes;
1146	return read_bytes;
1147}
1148
1149static const struct file_operations amdgpu_debugfs_vcnfwlog_fops = {
1150	.owner = THIS_MODULE,
1151	.read = amdgpu_debugfs_vcn_fwlog_read,
1152	.llseek = default_llseek
1153};
1154#endif
1155
1156void amdgpu_debugfs_vcn_fwlog_init(struct amdgpu_device *adev, uint8_t i,
1157				   struct amdgpu_vcn_inst *vcn)
1158{
1159#if defined(CONFIG_DEBUG_FS)
1160	struct drm_minor *minor = adev_to_drm(adev)->primary;
1161	struct dentry *root = minor->debugfs_root;
1162	char name[32];
1163
1164	sprintf(name, "amdgpu_vcn_%d_fwlog", i);
1165	debugfs_create_file_size(name, S_IFREG | 0444, root, vcn,
1166				 &amdgpu_debugfs_vcnfwlog_fops,
1167				 AMDGPU_VCNFW_LOG_SIZE);
1168#endif
1169}
1170
1171void amdgpu_vcn_fwlog_init(struct amdgpu_vcn_inst *vcn)
1172{
1173#if defined(CONFIG_DEBUG_FS)
1174	volatile uint32_t *flag = vcn->fw_shared.cpu_addr;
1175	void *fw_log_cpu_addr = vcn->fw_shared.cpu_addr + vcn->fw_shared.mem_size;
1176	uint64_t fw_log_gpu_addr = vcn->fw_shared.gpu_addr + vcn->fw_shared.mem_size;
1177	volatile struct amdgpu_vcn_fwlog *log_buf = fw_log_cpu_addr;
1178	volatile struct amdgpu_fw_shared_fw_logging *fw_log = vcn->fw_shared.cpu_addr
1179							 + vcn->fw_shared.log_offset;
1180	*flag |= cpu_to_le32(AMDGPU_VCN_FW_LOGGING_FLAG);
1181	fw_log->is_enabled = 1;
1182	fw_log->addr_lo = cpu_to_le32(fw_log_gpu_addr & 0xFFFFFFFF);
1183	fw_log->addr_hi = cpu_to_le32(fw_log_gpu_addr >> 32);
1184	fw_log->size = cpu_to_le32(AMDGPU_VCNFW_LOG_SIZE);
1185
1186	log_buf->header_size = sizeof(struct amdgpu_vcn_fwlog);
1187	log_buf->buffer_size = AMDGPU_VCNFW_LOG_SIZE;
1188	log_buf->rptr = log_buf->header_size;
1189	log_buf->wptr = log_buf->header_size;
1190	log_buf->wrapped = 0;
1191#endif
1192}
1193
1194int amdgpu_vcn_process_poison_irq(struct amdgpu_device *adev,
1195				struct amdgpu_irq_src *source,
1196				struct amdgpu_iv_entry *entry)
1197{
1198	struct ras_common_if *ras_if = adev->vcn.ras_if;
1199	struct ras_dispatch_if ih_data = {
1200		.entry = entry,
1201	};
1202
1203	if (!ras_if)
1204		return 0;
1205
1206	if (!amdgpu_sriov_vf(adev)) {
1207		ih_data.head = *ras_if;
1208		amdgpu_ras_interrupt_dispatch(adev, &ih_data);
1209	} else {
1210		if (adev->virt.ops && adev->virt.ops->ras_poison_handler)
1211			adev->virt.ops->ras_poison_handler(adev, ras_if->block);
1212		else
1213			dev_warn(adev->dev,
1214				"No ras_poison_handler interface in SRIOV for VCN!\n");
1215	}
1216
1217	return 0;
1218}
1219
1220int amdgpu_vcn_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
1221{
1222	int r, i;
1223
1224	r = amdgpu_ras_block_late_init(adev, ras_block);
1225	if (r)
1226		return r;
1227
1228	if (amdgpu_ras_is_supported(adev, ras_block->block)) {
1229		for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
1230			if (adev->vcn.harvest_config & (1 << i) ||
1231			    !adev->vcn.inst[i].ras_poison_irq.funcs)
1232				continue;
1233
1234			r = amdgpu_irq_get(adev, &adev->vcn.inst[i].ras_poison_irq, 0);
1235			if (r)
1236				goto late_fini;
1237		}
1238	}
1239	return 0;
1240
1241late_fini:
1242	amdgpu_ras_block_late_fini(adev, ras_block);
1243	return r;
1244}
1245
1246int amdgpu_vcn_ras_sw_init(struct amdgpu_device *adev)
1247{
1248	int err;
1249	struct amdgpu_vcn_ras *ras;
1250
1251	if (!adev->vcn.ras)
1252		return 0;
1253
1254	ras = adev->vcn.ras;
1255	err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
1256	if (err) {
1257		dev_err(adev->dev, "Failed to register vcn ras block!\n");
1258		return err;
1259	}
1260
1261	strcpy(ras->ras_block.ras_comm.name, "vcn");
1262	ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__VCN;
1263	ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__POISON;
1264	adev->vcn.ras_if = &ras->ras_block.ras_comm;
1265
1266	if (!ras->ras_block.ras_late_init)
1267		ras->ras_block.ras_late_init = amdgpu_vcn_ras_late_init;
1268
1269	return 0;
1270}
1271
1272int amdgpu_vcn_psp_update_sram(struct amdgpu_device *adev, int inst_idx,
1273			       enum AMDGPU_UCODE_ID ucode_id)
1274{
1275	struct amdgpu_firmware_info ucode = {
1276		.ucode_id = (ucode_id ? ucode_id :
1277			    (inst_idx ? AMDGPU_UCODE_ID_VCN1_RAM :
1278					AMDGPU_UCODE_ID_VCN0_RAM)),
1279		.mc_addr = adev->vcn.inst[inst_idx].dpg_sram_gpu_addr,
1280		.ucode_size = ((uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_curr_addr -
1281			      (uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr),
1282	};
1283
1284	return psp_execute_ip_fw_load(&adev->psp, &ucode);
1285}
1286
1287static ssize_t amdgpu_get_vcn_reset_mask(struct device *dev,
1288						struct device_attribute *attr,
1289						char *buf)
1290{
1291	struct drm_device *ddev = dev_get_drvdata(dev);
1292	struct amdgpu_device *adev = drm_to_adev(ddev);
1293
1294	if (!adev)
1295		return -ENODEV;
1296
1297	return amdgpu_show_reset_mask(buf, adev->vcn.supported_reset);
1298}
1299
1300static DEVICE_ATTR(vcn_reset_mask, 0444,
1301		   amdgpu_get_vcn_reset_mask, NULL);
1302
1303int amdgpu_vcn_sysfs_reset_mask_init(struct amdgpu_device *adev)
1304{
1305	int r = 0;
1306
1307	if (adev->vcn.num_vcn_inst) {
1308		r = device_create_file(adev->dev, &dev_attr_vcn_reset_mask);
1309		if (r)
1310			return r;
1311	}
1312
1313	return r;
1314}
1315
1316void amdgpu_vcn_sysfs_reset_mask_fini(struct amdgpu_device *adev)
1317{
1318	if (adev->dev->kobj.sd) {
1319		if (adev->vcn.num_vcn_inst)
1320			device_remove_file(adev->dev, &dev_attr_vcn_reset_mask);
1321	}
1322}