Linux Audio

Check our new training course

Loading...
v6.2
   1/*
   2 * Copyright 2016 Advanced Micro Devices, Inc.
   3 * All Rights Reserved.
   4 *
   5 * Permission is hereby granted, free of charge, to any person obtaining a
   6 * copy of this software and associated documentation files (the
   7 * "Software"), to deal in the Software without restriction, including
   8 * without limitation the rights to use, copy, modify, merge, publish,
   9 * distribute, sub license, and/or sell copies of the Software, and to
  10 * permit persons to whom the Software is furnished to do so, subject to
  11 * the following conditions:
  12 *
  13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  20 *
  21 * The above copyright notice and this permission notice (including the
  22 * next paragraph) shall be included in all copies or substantial portions
  23 * of the Software.
  24 *
  25 */
  26
  27#include <linux/firmware.h>
  28#include <linux/module.h>
 
  29#include <linux/pci.h>
  30#include <linux/debugfs.h>
  31#include <drm/drm_drv.h>
  32
  33#include "amdgpu.h"
  34#include "amdgpu_pm.h"
  35#include "amdgpu_vcn.h"
  36#include "soc15d.h"
  37
  38/* Firmware Names */
  39#define FIRMWARE_RAVEN		"amdgpu/raven_vcn.bin"
  40#define FIRMWARE_PICASSO	"amdgpu/picasso_vcn.bin"
  41#define FIRMWARE_RAVEN2		"amdgpu/raven2_vcn.bin"
  42#define FIRMWARE_ARCTURUS	"amdgpu/arcturus_vcn.bin"
  43#define FIRMWARE_RENOIR		"amdgpu/renoir_vcn.bin"
  44#define FIRMWARE_GREEN_SARDINE	"amdgpu/green_sardine_vcn.bin"
  45#define FIRMWARE_NAVI10		"amdgpu/navi10_vcn.bin"
  46#define FIRMWARE_NAVI14		"amdgpu/navi14_vcn.bin"
  47#define FIRMWARE_NAVI12		"amdgpu/navi12_vcn.bin"
  48#define FIRMWARE_SIENNA_CICHLID	"amdgpu/sienna_cichlid_vcn.bin"
  49#define FIRMWARE_NAVY_FLOUNDER	"amdgpu/navy_flounder_vcn.bin"
  50#define FIRMWARE_VANGOGH	"amdgpu/vangogh_vcn.bin"
  51#define FIRMWARE_DIMGREY_CAVEFISH	"amdgpu/dimgrey_cavefish_vcn.bin"
  52#define FIRMWARE_ALDEBARAN	"amdgpu/aldebaran_vcn.bin"
  53#define FIRMWARE_BEIGE_GOBY	"amdgpu/beige_goby_vcn.bin"
  54#define FIRMWARE_YELLOW_CARP	"amdgpu/yellow_carp_vcn.bin"
  55#define FIRMWARE_VCN_3_1_2	"amdgpu/vcn_3_1_2.bin"
  56#define FIRMWARE_VCN4_0_0	"amdgpu/vcn_4_0_0.bin"
  57#define FIRMWARE_VCN4_0_2	"amdgpu/vcn_4_0_2.bin"
  58#define FIRMWARE_VCN4_0_4      "amdgpu/vcn_4_0_4.bin"
 
 
 
 
 
  59
  60MODULE_FIRMWARE(FIRMWARE_RAVEN);
  61MODULE_FIRMWARE(FIRMWARE_PICASSO);
  62MODULE_FIRMWARE(FIRMWARE_RAVEN2);
  63MODULE_FIRMWARE(FIRMWARE_ARCTURUS);
  64MODULE_FIRMWARE(FIRMWARE_RENOIR);
  65MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE);
  66MODULE_FIRMWARE(FIRMWARE_ALDEBARAN);
  67MODULE_FIRMWARE(FIRMWARE_NAVI10);
  68MODULE_FIRMWARE(FIRMWARE_NAVI14);
  69MODULE_FIRMWARE(FIRMWARE_NAVI12);
  70MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID);
  71MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER);
  72MODULE_FIRMWARE(FIRMWARE_VANGOGH);
  73MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH);
  74MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY);
  75MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP);
  76MODULE_FIRMWARE(FIRMWARE_VCN_3_1_2);
  77MODULE_FIRMWARE(FIRMWARE_VCN4_0_0);
  78MODULE_FIRMWARE(FIRMWARE_VCN4_0_2);
 
  79MODULE_FIRMWARE(FIRMWARE_VCN4_0_4);
 
 
 
 
  80
  81static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
  82
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  83int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
  84{
  85	unsigned long bo_size;
  86	const char *fw_name;
  87	const struct common_firmware_header *hdr;
  88	unsigned char fw_check;
  89	unsigned int fw_shared_size, log_offset;
  90	int i, r;
  91
  92	INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler);
  93	mutex_init(&adev->vcn.vcn_pg_lock);
  94	mutex_init(&adev->vcn.vcn1_jpeg1_workaround);
  95	atomic_set(&adev->vcn.total_submission_cnt, 0);
  96	for (i = 0; i < adev->vcn.num_vcn_inst; i++)
  97		atomic_set(&adev->vcn.inst[i].dpg_enc_submission_cnt, 0);
  98
  99	switch (adev->ip_versions[UVD_HWIP][0]) {
 100	case IP_VERSION(1, 0, 0):
 101	case IP_VERSION(1, 0, 1):
 102		if (adev->apu_flags & AMD_APU_IS_RAVEN2)
 103			fw_name = FIRMWARE_RAVEN2;
 104		else if (adev->apu_flags & AMD_APU_IS_PICASSO)
 105			fw_name = FIRMWARE_PICASSO;
 106		else
 107			fw_name = FIRMWARE_RAVEN;
 108		break;
 109	case IP_VERSION(2, 5, 0):
 110		fw_name = FIRMWARE_ARCTURUS;
 111		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
 112		    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
 113			adev->vcn.indirect_sram = true;
 114		break;
 115	case IP_VERSION(2, 2, 0):
 116		if (adev->apu_flags & AMD_APU_IS_RENOIR)
 117			fw_name = FIRMWARE_RENOIR;
 118		else
 119			fw_name = FIRMWARE_GREEN_SARDINE;
 120
 121		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
 122		    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
 123			adev->vcn.indirect_sram = true;
 124		break;
 125	case IP_VERSION(2, 6, 0):
 126		fw_name = FIRMWARE_ALDEBARAN;
 127		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
 128		    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
 129			adev->vcn.indirect_sram = true;
 130		break;
 131	case IP_VERSION(2, 0, 0):
 132		fw_name = FIRMWARE_NAVI10;
 133		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
 134		    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
 135			adev->vcn.indirect_sram = true;
 136		break;
 137	case IP_VERSION(2, 0, 2):
 138		if (adev->asic_type == CHIP_NAVI12)
 139			fw_name = FIRMWARE_NAVI12;
 140		else
 141			fw_name = FIRMWARE_NAVI14;
 142		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
 143		    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
 144			adev->vcn.indirect_sram = true;
 145		break;
 146	case IP_VERSION(3, 0, 0):
 147	case IP_VERSION(3, 0, 64):
 148	case IP_VERSION(3, 0, 192):
 149		if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0))
 150			fw_name = FIRMWARE_SIENNA_CICHLID;
 151		else
 152			fw_name = FIRMWARE_NAVY_FLOUNDER;
 153		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
 154		    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
 155			adev->vcn.indirect_sram = true;
 156		break;
 157	case IP_VERSION(3, 0, 2):
 158		fw_name = FIRMWARE_VANGOGH;
 159		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
 160		    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
 161			adev->vcn.indirect_sram = true;
 162		break;
 163	case IP_VERSION(3, 0, 16):
 164		fw_name = FIRMWARE_DIMGREY_CAVEFISH;
 165		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
 166		    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
 167			adev->vcn.indirect_sram = true;
 168		break;
 169	case IP_VERSION(3, 0, 33):
 170		fw_name = FIRMWARE_BEIGE_GOBY;
 171		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
 172		    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
 173			adev->vcn.indirect_sram = true;
 174		break;
 175	case IP_VERSION(3, 1, 1):
 176		fw_name = FIRMWARE_YELLOW_CARP;
 177		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
 178		    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
 179			adev->vcn.indirect_sram = true;
 180		break;
 181	case IP_VERSION(3, 1, 2):
 182		fw_name = FIRMWARE_VCN_3_1_2;
 183		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
 184		    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
 185			adev->vcn.indirect_sram = true;
 186		break;
 187	case IP_VERSION(4, 0, 0):
 188		fw_name = FIRMWARE_VCN4_0_0;
 189		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
 190			(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
 191			adev->vcn.indirect_sram = true;
 192		break;
 193	case IP_VERSION(4, 0, 2):
 194		fw_name = FIRMWARE_VCN4_0_2;
 195		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
 196			(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
 197			adev->vcn.indirect_sram = true;
 198		break;
 199	case IP_VERSION(4, 0, 4):
 200		fw_name = FIRMWARE_VCN4_0_4;
 201		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
 202			(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
 203			adev->vcn.indirect_sram = true;
 204		break;
 205	default:
 206		return -EINVAL;
 207	}
 208
 209	r = request_firmware(&adev->vcn.fw, fw_name, adev->dev);
 210	if (r) {
 211		dev_err(adev->dev, "amdgpu_vcn: Can't load firmware \"%s\"\n",
 212			fw_name);
 213		return r;
 
 214	}
 215
 216	r = amdgpu_ucode_validate(adev->vcn.fw);
 217	if (r) {
 218		dev_err(adev->dev, "amdgpu_vcn: Can't validate firmware \"%s\"\n",
 219			fw_name);
 220		release_firmware(adev->vcn.fw);
 221		adev->vcn.fw = NULL;
 222		return r;
 223	}
 224
 225	hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
 226	adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version);
 227
 228	/* Bit 20-23, it is encode major and non-zero for new naming convention.
 229	 * This field is part of version minor and DRM_DISABLED_FLAG in old naming
 230	 * convention. Since the l:wq!atest version minor is 0x5B and DRM_DISABLED_FLAG
 231	 * is zero in old naming convention, this field is always zero so far.
 232	 * These four bits are used to tell which naming convention is present.
 233	 */
 234	fw_check = (le32_to_cpu(hdr->ucode_version) >> 20) & 0xf;
 235	if (fw_check) {
 236		unsigned int dec_ver, enc_major, enc_minor, vep, fw_rev;
 237
 238		fw_rev = le32_to_cpu(hdr->ucode_version) & 0xfff;
 239		enc_minor = (le32_to_cpu(hdr->ucode_version) >> 12) & 0xff;
 240		enc_major = fw_check;
 241		dec_ver = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xf;
 242		vep = (le32_to_cpu(hdr->ucode_version) >> 28) & 0xf;
 243		DRM_INFO("Found VCN firmware Version ENC: %u.%u DEC: %u VEP: %u Revision: %u\n",
 244			enc_major, enc_minor, dec_ver, vep, fw_rev);
 245	} else {
 246		unsigned int version_major, version_minor, family_id;
 247
 248		family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
 249		version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
 250		version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
 251		DRM_INFO("Found VCN firmware Version: %u.%u Family ID: %u\n",
 252			version_major, version_minor, family_id);
 253	}
 254
 255	bo_size = AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_CONTEXT_SIZE;
 256	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
 257		bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
 258
 259	if (adev->ip_versions[UVD_HWIP][0] >= IP_VERSION(4, 0, 0)){
 
 
 
 260		fw_shared_size = AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared));
 261		log_offset = offsetof(struct amdgpu_vcn4_fw_shared, fw_log);
 262	} else {
 263		fw_shared_size = AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared));
 264		log_offset = offsetof(struct amdgpu_fw_shared, fw_log);
 265	}
 266
 267	bo_size += fw_shared_size;
 268
 269	if (amdgpu_vcnfw_log)
 270		bo_size += AMDGPU_VCNFW_LOG_SIZE;
 271
 272	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
 273		if (adev->vcn.harvest_config & (1 << i))
 274			continue;
 275
 276		r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
 277						AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.inst[i].vcpu_bo,
 278						&adev->vcn.inst[i].gpu_addr, &adev->vcn.inst[i].cpu_addr);
 
 
 
 279		if (r) {
 280			dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", r);
 281			return r;
 282		}
 283
 284		adev->vcn.inst[i].fw_shared.cpu_addr = adev->vcn.inst[i].cpu_addr +
 285				bo_size - fw_shared_size;
 286		adev->vcn.inst[i].fw_shared.gpu_addr = adev->vcn.inst[i].gpu_addr +
 287				bo_size - fw_shared_size;
 288
 289		adev->vcn.inst[i].fw_shared.mem_size = fw_shared_size;
 290
 291		if (amdgpu_vcnfw_log) {
 292			adev->vcn.inst[i].fw_shared.cpu_addr -= AMDGPU_VCNFW_LOG_SIZE;
 293			adev->vcn.inst[i].fw_shared.gpu_addr -= AMDGPU_VCNFW_LOG_SIZE;
 294			adev->vcn.inst[i].fw_shared.log_offset = log_offset;
 295		}
 296
 297		if (adev->vcn.indirect_sram) {
 298			r = amdgpu_bo_create_kernel(adev, 64 * 2 * 4, PAGE_SIZE,
 299					AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.inst[i].dpg_sram_bo,
 300					&adev->vcn.inst[i].dpg_sram_gpu_addr, &adev->vcn.inst[i].dpg_sram_cpu_addr);
 
 
 
 301			if (r) {
 302				dev_err(adev->dev, "VCN %d (%d) failed to allocate DPG bo\n", i, r);
 303				return r;
 304			}
 305		}
 306	}
 307
 308	return 0;
 309}
 310
 311int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
 312{
 313	int i, j;
 314
 315	for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
 316		if (adev->vcn.harvest_config & (1 << j))
 317			continue;
 318
 319		if (adev->vcn.indirect_sram) {
 320			amdgpu_bo_free_kernel(&adev->vcn.inst[j].dpg_sram_bo,
 321						  &adev->vcn.inst[j].dpg_sram_gpu_addr,
 322						  (void **)&adev->vcn.inst[j].dpg_sram_cpu_addr);
 323		}
 324		kvfree(adev->vcn.inst[j].saved_bo);
 325
 326		amdgpu_bo_free_kernel(&adev->vcn.inst[j].vcpu_bo,
 327					  &adev->vcn.inst[j].gpu_addr,
 328					  (void **)&adev->vcn.inst[j].cpu_addr);
 329
 330		amdgpu_ring_fini(&adev->vcn.inst[j].ring_dec);
 331
 332		for (i = 0; i < adev->vcn.num_enc_rings; ++i)
 333			amdgpu_ring_fini(&adev->vcn.inst[j].ring_enc[i]);
 
 
 334	}
 335
 336	release_firmware(adev->vcn.fw);
 337	mutex_destroy(&adev->vcn.vcn1_jpeg1_workaround);
 338	mutex_destroy(&adev->vcn.vcn_pg_lock);
 339
 340	return 0;
 341}
 342
 343/* from vcn4 and above, only unified queue is used */
 344static bool amdgpu_vcn_using_unified_queue(struct amdgpu_ring *ring)
 345{
 346	struct amdgpu_device *adev = ring->adev;
 347	bool ret = false;
 348
 349	if (adev->ip_versions[UVD_HWIP][0] >= IP_VERSION(4, 0, 0))
 350		ret = true;
 351
 352	return ret;
 353}
 354
 355bool amdgpu_vcn_is_disabled_vcn(struct amdgpu_device *adev, enum vcn_ring_type type, uint32_t vcn_instance)
 356{
 357	bool ret = false;
 358	int vcn_config = adev->vcn.vcn_config[vcn_instance];
 359
 360	if ((type == VCN_ENCODE_RING) && (vcn_config & VCN_BLOCK_ENCODE_DISABLE_MASK)) {
 361		ret = true;
 362	} else if ((type == VCN_DECODE_RING) && (vcn_config & VCN_BLOCK_DECODE_DISABLE_MASK)) {
 363		ret = true;
 364	} else if ((type == VCN_UNIFIED_RING) && (vcn_config & VCN_BLOCK_QUEUE_DISABLE_MASK)) {
 365		ret = true;
 366	}
 367
 368	return ret;
 369}
 370
 371int amdgpu_vcn_suspend(struct amdgpu_device *adev)
 372{
 373	unsigned size;
 374	void *ptr;
 375	int i, idx;
 376
 377	cancel_delayed_work_sync(&adev->vcn.idle_work);
 378
 379	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
 380		if (adev->vcn.harvest_config & (1 << i))
 381			continue;
 382		if (adev->vcn.inst[i].vcpu_bo == NULL)
 383			return 0;
 384
 385		size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo);
 386		ptr = adev->vcn.inst[i].cpu_addr;
 387
 388		adev->vcn.inst[i].saved_bo = kvmalloc(size, GFP_KERNEL);
 389		if (!adev->vcn.inst[i].saved_bo)
 390			return -ENOMEM;
 391
 392		if (drm_dev_enter(adev_to_drm(adev), &idx)) {
 393			memcpy_fromio(adev->vcn.inst[i].saved_bo, ptr, size);
 394			drm_dev_exit(idx);
 395		}
 396	}
 
 397	return 0;
 398}
 399
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 400int amdgpu_vcn_resume(struct amdgpu_device *adev)
 401{
 402	unsigned size;
 403	void *ptr;
 404	int i, idx;
 405
 406	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
 407		if (adev->vcn.harvest_config & (1 << i))
 408			continue;
 409		if (adev->vcn.inst[i].vcpu_bo == NULL)
 410			return -EINVAL;
 411
 412		size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo);
 413		ptr = adev->vcn.inst[i].cpu_addr;
 414
 415		if (adev->vcn.inst[i].saved_bo != NULL) {
 416			if (drm_dev_enter(adev_to_drm(adev), &idx)) {
 417				memcpy_toio(ptr, adev->vcn.inst[i].saved_bo, size);
 418				drm_dev_exit(idx);
 419			}
 420			kvfree(adev->vcn.inst[i].saved_bo);
 421			adev->vcn.inst[i].saved_bo = NULL;
 422		} else {
 423			const struct common_firmware_header *hdr;
 424			unsigned offset;
 425
 426			hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
 427			if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
 428				offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
 429				if (drm_dev_enter(adev_to_drm(adev), &idx)) {
 430					memcpy_toio(adev->vcn.inst[i].cpu_addr, adev->vcn.fw->data + offset,
 
 431						    le32_to_cpu(hdr->ucode_size_bytes));
 432					drm_dev_exit(idx);
 433				}
 434				size -= le32_to_cpu(hdr->ucode_size_bytes);
 435				ptr += le32_to_cpu(hdr->ucode_size_bytes);
 436			}
 437			memset_io(ptr, 0, size);
 438		}
 439	}
 440	return 0;
 441}
 442
 443static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
 444{
 445	struct amdgpu_device *adev =
 446		container_of(work, struct amdgpu_device, vcn.idle_work.work);
 447	unsigned int fences = 0, fence[AMDGPU_MAX_VCN_INSTANCES] = {0};
 448	unsigned int i, j;
 449	int r = 0;
 450
 451	for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
 452		if (adev->vcn.harvest_config & (1 << j))
 453			continue;
 454
 455		for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
 456			fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_enc[i]);
 457		}
 458
 459		if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)	{
 
 
 460			struct dpg_pause_state new_state;
 461
 462			if (fence[j] ||
 463				unlikely(atomic_read(&adev->vcn.inst[j].dpg_enc_submission_cnt)))
 464				new_state.fw_based = VCN_DPG_STATE__PAUSE;
 465			else
 466				new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
 467
 468			adev->vcn.pause_dpg_mode(adev, j, &new_state);
 469		}
 470
 471		fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_dec);
 472		fences += fence[j];
 473	}
 474
 475	if (!fences && !atomic_read(&adev->vcn.total_submission_cnt)) {
 476		amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
 477		       AMD_PG_STATE_GATE);
 478		r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO,
 479				false);
 480		if (r)
 481			dev_warn(adev->dev, "(%d) failed to disable video power profile mode\n", r);
 482	} else {
 483		schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
 484	}
 485}
 486
 487void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
 488{
 489	struct amdgpu_device *adev = ring->adev;
 490	int r = 0;
 491
 492	atomic_inc(&adev->vcn.total_submission_cnt);
 493
 494	if (!cancel_delayed_work_sync(&adev->vcn.idle_work)) {
 495		r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO,
 496				true);
 497		if (r)
 498			dev_warn(adev->dev, "(%d) failed to switch to video power profile mode\n", r);
 499	}
 500
 501	mutex_lock(&adev->vcn.vcn_pg_lock);
 502	amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
 503	       AMD_PG_STATE_UNGATE);
 504
 505	if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)	{
 
 
 506		struct dpg_pause_state new_state;
 507
 508		if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC) {
 509			atomic_inc(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt);
 510			new_state.fw_based = VCN_DPG_STATE__PAUSE;
 511		} else {
 512			unsigned int fences = 0;
 513			unsigned int i;
 514
 515			for (i = 0; i < adev->vcn.num_enc_rings; ++i)
 516				fences += amdgpu_fence_count_emitted(&adev->vcn.inst[ring->me].ring_enc[i]);
 517
 518			if (fences || atomic_read(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt))
 519				new_state.fw_based = VCN_DPG_STATE__PAUSE;
 520			else
 521				new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
 522		}
 523
 524		adev->vcn.pause_dpg_mode(adev, ring->me, &new_state);
 525	}
 526	mutex_unlock(&adev->vcn.vcn_pg_lock);
 527}
 528
 529void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring)
 530{
 
 
 
 531	if (ring->adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
 532		ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
 
 533		atomic_dec(&ring->adev->vcn.inst[ring->me].dpg_enc_submission_cnt);
 534
 535	atomic_dec(&ring->adev->vcn.total_submission_cnt);
 536
 537	schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
 538}
 539
 540int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
 541{
 542	struct amdgpu_device *adev = ring->adev;
 543	uint32_t tmp = 0;
 544	unsigned i;
 545	int r;
 546
 547	/* VCN in SRIOV does not support direct register read/write */
 548	if (amdgpu_sriov_vf(adev))
 549		return 0;
 550
 551	WREG32(adev->vcn.inst[ring->me].external.scratch9, 0xCAFEDEAD);
 552	r = amdgpu_ring_alloc(ring, 3);
 553	if (r)
 554		return r;
 555	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.scratch9, 0));
 556	amdgpu_ring_write(ring, 0xDEADBEEF);
 557	amdgpu_ring_commit(ring);
 558	for (i = 0; i < adev->usec_timeout; i++) {
 559		tmp = RREG32(adev->vcn.inst[ring->me].external.scratch9);
 560		if (tmp == 0xDEADBEEF)
 561			break;
 562		udelay(1);
 563	}
 564
 565	if (i >= adev->usec_timeout)
 566		r = -ETIMEDOUT;
 567
 568	return r;
 569}
 570
 571int amdgpu_vcn_dec_sw_ring_test_ring(struct amdgpu_ring *ring)
 572{
 573	struct amdgpu_device *adev = ring->adev;
 574	uint32_t rptr;
 575	unsigned int i;
 576	int r;
 577
 578	if (amdgpu_sriov_vf(adev))
 579		return 0;
 580
 581	r = amdgpu_ring_alloc(ring, 16);
 582	if (r)
 583		return r;
 584
 585	rptr = amdgpu_ring_get_rptr(ring);
 586
 587	amdgpu_ring_write(ring, VCN_DEC_SW_CMD_END);
 588	amdgpu_ring_commit(ring);
 589
 590	for (i = 0; i < adev->usec_timeout; i++) {
 591		if (amdgpu_ring_get_rptr(ring) != rptr)
 592			break;
 593		udelay(1);
 594	}
 595
 596	if (i >= adev->usec_timeout)
 597		r = -ETIMEDOUT;
 598
 599	return r;
 600}
 601
 602static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
 603				   struct amdgpu_ib *ib_msg,
 604				   struct dma_fence **fence)
 605{
 606	u64 addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
 607	struct amdgpu_device *adev = ring->adev;
 608	struct dma_fence *f = NULL;
 609	struct amdgpu_job *job;
 610	struct amdgpu_ib *ib;
 611	int i, r;
 612
 613	r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
 614				     64, AMDGPU_IB_POOL_DIRECT,
 615				     &job);
 616	if (r)
 617		goto err;
 618
 619	ib = &job->ibs[0];
 620	ib->ptr[0] = PACKET0(adev->vcn.internal.data0, 0);
 621	ib->ptr[1] = addr;
 622	ib->ptr[2] = PACKET0(adev->vcn.internal.data1, 0);
 623	ib->ptr[3] = addr >> 32;
 624	ib->ptr[4] = PACKET0(adev->vcn.internal.cmd, 0);
 625	ib->ptr[5] = 0;
 626	for (i = 6; i < 16; i += 2) {
 627		ib->ptr[i] = PACKET0(adev->vcn.internal.nop, 0);
 628		ib->ptr[i+1] = 0;
 629	}
 630	ib->length_dw = 16;
 631
 632	r = amdgpu_job_submit_direct(job, ring, &f);
 633	if (r)
 634		goto err_free;
 635
 636	amdgpu_ib_free(adev, ib_msg, f);
 637
 638	if (fence)
 639		*fence = dma_fence_get(f);
 640	dma_fence_put(f);
 641
 642	return 0;
 643
 644err_free:
 645	amdgpu_job_free(job);
 646err:
 647	amdgpu_ib_free(adev, ib_msg, f);
 648	return r;
 649}
 650
 651static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
 652		struct amdgpu_ib *ib)
 653{
 654	struct amdgpu_device *adev = ring->adev;
 655	uint32_t *msg;
 656	int r, i;
 657
 658	memset(ib, 0, sizeof(*ib));
 659	r = amdgpu_ib_get(adev, NULL, AMDGPU_GPU_PAGE_SIZE * 2,
 660			AMDGPU_IB_POOL_DIRECT,
 661			ib);
 662	if (r)
 663		return r;
 664
 665	msg = (uint32_t *)AMDGPU_GPU_PAGE_ALIGN((unsigned long)ib->ptr);
 666	msg[0] = cpu_to_le32(0x00000028);
 667	msg[1] = cpu_to_le32(0x00000038);
 668	msg[2] = cpu_to_le32(0x00000001);
 669	msg[3] = cpu_to_le32(0x00000000);
 670	msg[4] = cpu_to_le32(handle);
 671	msg[5] = cpu_to_le32(0x00000000);
 672	msg[6] = cpu_to_le32(0x00000001);
 673	msg[7] = cpu_to_le32(0x00000028);
 674	msg[8] = cpu_to_le32(0x00000010);
 675	msg[9] = cpu_to_le32(0x00000000);
 676	msg[10] = cpu_to_le32(0x00000007);
 677	msg[11] = cpu_to_le32(0x00000000);
 678	msg[12] = cpu_to_le32(0x00000780);
 679	msg[13] = cpu_to_le32(0x00000440);
 680	for (i = 14; i < 1024; ++i)
 681		msg[i] = cpu_to_le32(0x0);
 682
 683	return 0;
 684}
 685
 686static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
 687					  struct amdgpu_ib *ib)
 688{
 689	struct amdgpu_device *adev = ring->adev;
 690	uint32_t *msg;
 691	int r, i;
 692
 693	memset(ib, 0, sizeof(*ib));
 694	r = amdgpu_ib_get(adev, NULL, AMDGPU_GPU_PAGE_SIZE * 2,
 695			AMDGPU_IB_POOL_DIRECT,
 696			ib);
 697	if (r)
 698		return r;
 699
 700	msg = (uint32_t *)AMDGPU_GPU_PAGE_ALIGN((unsigned long)ib->ptr);
 701	msg[0] = cpu_to_le32(0x00000028);
 702	msg[1] = cpu_to_le32(0x00000018);
 703	msg[2] = cpu_to_le32(0x00000000);
 704	msg[3] = cpu_to_le32(0x00000002);
 705	msg[4] = cpu_to_le32(handle);
 706	msg[5] = cpu_to_le32(0x00000000);
 707	for (i = 6; i < 1024; ++i)
 708		msg[i] = cpu_to_le32(0x0);
 709
 710	return 0;
 711}
 712
 713int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 714{
 715	struct dma_fence *fence = NULL;
 716	struct amdgpu_ib ib;
 717	long r;
 718
 719	r = amdgpu_vcn_dec_get_create_msg(ring, 1, &ib);
 720	if (r)
 721		goto error;
 722
 723	r = amdgpu_vcn_dec_send_msg(ring, &ib, NULL);
 724	if (r)
 725		goto error;
 726	r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &ib);
 727	if (r)
 728		goto error;
 729
 730	r = amdgpu_vcn_dec_send_msg(ring, &ib, &fence);
 731	if (r)
 732		goto error;
 733
 734	r = dma_fence_wait_timeout(fence, false, timeout);
 735	if (r == 0)
 736		r = -ETIMEDOUT;
 737	else if (r > 0)
 738		r = 0;
 739
 740	dma_fence_put(fence);
 741error:
 742	return r;
 743}
 744
 745static uint32_t *amdgpu_vcn_unified_ring_ib_header(struct amdgpu_ib *ib,
 746						uint32_t ib_pack_in_dw, bool enc)
 747{
 748	uint32_t *ib_checksum;
 749
 750	ib->ptr[ib->length_dw++] = 0x00000010; /* single queue checksum */
 751	ib->ptr[ib->length_dw++] = 0x30000002;
 752	ib_checksum = &ib->ptr[ib->length_dw++];
 753	ib->ptr[ib->length_dw++] = ib_pack_in_dw;
 754
 755	ib->ptr[ib->length_dw++] = 0x00000010; /* engine info */
 756	ib->ptr[ib->length_dw++] = 0x30000001;
 757	ib->ptr[ib->length_dw++] = enc ? 0x2 : 0x3;
 758	ib->ptr[ib->length_dw++] = ib_pack_in_dw * sizeof(uint32_t);
 759
 760	return ib_checksum;
 761}
 762
 763static void amdgpu_vcn_unified_ring_ib_checksum(uint32_t **ib_checksum,
 764						uint32_t ib_pack_in_dw)
 765{
 766	uint32_t i;
 767	uint32_t checksum = 0;
 768
 769	for (i = 0; i < ib_pack_in_dw; i++)
 770		checksum += *(*ib_checksum + 2 + i);
 771
 772	**ib_checksum = checksum;
 773}
 774
 775static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
 776				      struct amdgpu_ib *ib_msg,
 777				      struct dma_fence **fence)
 778{
 779	struct amdgpu_vcn_decode_buffer *decode_buffer = NULL;
 780	unsigned int ib_size_dw = 64;
 781	struct amdgpu_device *adev = ring->adev;
 782	struct dma_fence *f = NULL;
 783	struct amdgpu_job *job;
 784	struct amdgpu_ib *ib;
 785	uint64_t addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
 786	bool sq = amdgpu_vcn_using_unified_queue(ring);
 787	uint32_t *ib_checksum;
 788	uint32_t ib_pack_in_dw;
 789	int i, r;
 790
 791	if (sq)
 792		ib_size_dw += 8;
 793
 794	r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
 795				     ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
 796				     &job);
 797	if (r)
 798		goto err;
 799
 800	ib = &job->ibs[0];
 801	ib->length_dw = 0;
 802
 803	/* single queue headers */
 804	if (sq) {
 805		ib_pack_in_dw = sizeof(struct amdgpu_vcn_decode_buffer) / sizeof(uint32_t)
 806						+ 4 + 2; /* engine info + decoding ib in dw */
 807		ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, ib_pack_in_dw, false);
 808	}
 809
 810	ib->ptr[ib->length_dw++] = sizeof(struct amdgpu_vcn_decode_buffer) + 8;
 811	ib->ptr[ib->length_dw++] = cpu_to_le32(AMDGPU_VCN_IB_FLAG_DECODE_BUFFER);
 812	decode_buffer = (struct amdgpu_vcn_decode_buffer *)&(ib->ptr[ib->length_dw]);
 813	ib->length_dw += sizeof(struct amdgpu_vcn_decode_buffer) / 4;
 814	memset(decode_buffer, 0, sizeof(struct amdgpu_vcn_decode_buffer));
 815
 816	decode_buffer->valid_buf_flag |= cpu_to_le32(AMDGPU_VCN_CMD_FLAG_MSG_BUFFER);
 817	decode_buffer->msg_buffer_address_hi = cpu_to_le32(addr >> 32);
 818	decode_buffer->msg_buffer_address_lo = cpu_to_le32(addr);
 819
 820	for (i = ib->length_dw; i < ib_size_dw; ++i)
 821		ib->ptr[i] = 0x0;
 822
 823	if (sq)
 824		amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, ib_pack_in_dw);
 825
 826	r = amdgpu_job_submit_direct(job, ring, &f);
 827	if (r)
 828		goto err_free;
 829
 830	amdgpu_ib_free(adev, ib_msg, f);
 831
 832	if (fence)
 833		*fence = dma_fence_get(f);
 834	dma_fence_put(f);
 835
 836	return 0;
 837
 838err_free:
 839	amdgpu_job_free(job);
 840err:
 841	amdgpu_ib_free(adev, ib_msg, f);
 842	return r;
 843}
 844
 845int amdgpu_vcn_dec_sw_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 846{
 847	struct dma_fence *fence = NULL;
 848	struct amdgpu_ib ib;
 849	long r;
 850
 851	r = amdgpu_vcn_dec_get_create_msg(ring, 1, &ib);
 852	if (r)
 853		goto error;
 854
 855	r = amdgpu_vcn_dec_sw_send_msg(ring, &ib, NULL);
 856	if (r)
 857		goto error;
 858	r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &ib);
 859	if (r)
 860		goto error;
 861
 862	r = amdgpu_vcn_dec_sw_send_msg(ring, &ib, &fence);
 863	if (r)
 864		goto error;
 865
 866	r = dma_fence_wait_timeout(fence, false, timeout);
 867	if (r == 0)
 868		r = -ETIMEDOUT;
 869	else if (r > 0)
 870		r = 0;
 871
 872	dma_fence_put(fence);
 873error:
 874	return r;
 875}
 876
 877int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
 878{
 879	struct amdgpu_device *adev = ring->adev;
 880	uint32_t rptr;
 881	unsigned i;
 882	int r;
 883
 884	if (amdgpu_sriov_vf(adev))
 885		return 0;
 886
 887	r = amdgpu_ring_alloc(ring, 16);
 888	if (r)
 889		return r;
 890
 891	rptr = amdgpu_ring_get_rptr(ring);
 892
 893	amdgpu_ring_write(ring, VCN_ENC_CMD_END);
 894	amdgpu_ring_commit(ring);
 895
 896	for (i = 0; i < adev->usec_timeout; i++) {
 897		if (amdgpu_ring_get_rptr(ring) != rptr)
 898			break;
 899		udelay(1);
 900	}
 901
 902	if (i >= adev->usec_timeout)
 903		r = -ETIMEDOUT;
 904
 905	return r;
 906}
 907
 908static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
 909					 struct amdgpu_ib *ib_msg,
 910					 struct dma_fence **fence)
 911{
 912	unsigned int ib_size_dw = 16;
 
 913	struct amdgpu_job *job;
 914	struct amdgpu_ib *ib;
 915	struct dma_fence *f = NULL;
 916	uint32_t *ib_checksum = NULL;
 917	uint64_t addr;
 918	bool sq = amdgpu_vcn_using_unified_queue(ring);
 919	int i, r;
 920
 921	if (sq)
 922		ib_size_dw += 8;
 923
 924	r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
 925				     ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
 926				     &job);
 927	if (r)
 928		return r;
 929
 930	ib = &job->ibs[0];
 931	addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
 932
 933	ib->length_dw = 0;
 934
 935	if (sq)
 936		ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, 0x11, true);
 937
 938	ib->ptr[ib->length_dw++] = 0x00000018;
 939	ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
 940	ib->ptr[ib->length_dw++] = handle;
 941	ib->ptr[ib->length_dw++] = upper_32_bits(addr);
 942	ib->ptr[ib->length_dw++] = addr;
 943	ib->ptr[ib->length_dw++] = 0x0000000b;
 944
 945	ib->ptr[ib->length_dw++] = 0x00000014;
 946	ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
 947	ib->ptr[ib->length_dw++] = 0x0000001c;
 948	ib->ptr[ib->length_dw++] = 0x00000000;
 949	ib->ptr[ib->length_dw++] = 0x00000000;
 950
 951	ib->ptr[ib->length_dw++] = 0x00000008;
 952	ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */
 953
 954	for (i = ib->length_dw; i < ib_size_dw; ++i)
 955		ib->ptr[i] = 0x0;
 956
 957	if (sq)
 958		amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, 0x11);
 959
 960	r = amdgpu_job_submit_direct(job, ring, &f);
 961	if (r)
 962		goto err;
 963
 964	if (fence)
 965		*fence = dma_fence_get(f);
 966	dma_fence_put(f);
 967
 968	return 0;
 969
 970err:
 971	amdgpu_job_free(job);
 972	return r;
 973}
 974
 975static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
 976					  struct amdgpu_ib *ib_msg,
 977					  struct dma_fence **fence)
 978{
 979	unsigned int ib_size_dw = 16;
 
 980	struct amdgpu_job *job;
 981	struct amdgpu_ib *ib;
 982	struct dma_fence *f = NULL;
 983	uint32_t *ib_checksum = NULL;
 984	uint64_t addr;
 985	bool sq = amdgpu_vcn_using_unified_queue(ring);
 986	int i, r;
 987
 988	if (sq)
 989		ib_size_dw += 8;
 990
 991	r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
 992				     ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
 993				     &job);
 994	if (r)
 995		return r;
 996
 997	ib = &job->ibs[0];
 998	addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
 999
1000	ib->length_dw = 0;
1001
1002	if (sq)
1003		ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, 0x11, true);
1004
1005	ib->ptr[ib->length_dw++] = 0x00000018;
1006	ib->ptr[ib->length_dw++] = 0x00000001;
1007	ib->ptr[ib->length_dw++] = handle;
1008	ib->ptr[ib->length_dw++] = upper_32_bits(addr);
1009	ib->ptr[ib->length_dw++] = addr;
1010	ib->ptr[ib->length_dw++] = 0x0000000b;
1011
1012	ib->ptr[ib->length_dw++] = 0x00000014;
1013	ib->ptr[ib->length_dw++] = 0x00000002;
1014	ib->ptr[ib->length_dw++] = 0x0000001c;
1015	ib->ptr[ib->length_dw++] = 0x00000000;
1016	ib->ptr[ib->length_dw++] = 0x00000000;
1017
1018	ib->ptr[ib->length_dw++] = 0x00000008;
1019	ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */
1020
1021	for (i = ib->length_dw; i < ib_size_dw; ++i)
1022		ib->ptr[i] = 0x0;
1023
1024	if (sq)
1025		amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, 0x11);
1026
1027	r = amdgpu_job_submit_direct(job, ring, &f);
1028	if (r)
1029		goto err;
1030
1031	if (fence)
1032		*fence = dma_fence_get(f);
1033	dma_fence_put(f);
1034
1035	return 0;
1036
1037err:
1038	amdgpu_job_free(job);
1039	return r;
1040}
1041
1042int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
1043{
1044	struct amdgpu_device *adev = ring->adev;
1045	struct dma_fence *fence = NULL;
1046	struct amdgpu_ib ib;
1047	long r;
1048
1049	memset(&ib, 0, sizeof(ib));
1050	r = amdgpu_ib_get(adev, NULL, (128 << 10) + AMDGPU_GPU_PAGE_SIZE,
1051			AMDGPU_IB_POOL_DIRECT,
1052			&ib);
1053	if (r)
1054		return r;
1055
1056	r = amdgpu_vcn_enc_get_create_msg(ring, 1, &ib, NULL);
1057	if (r)
1058		goto error;
1059
1060	r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, &ib, &fence);
1061	if (r)
1062		goto error;
1063
1064	r = dma_fence_wait_timeout(fence, false, timeout);
1065	if (r == 0)
1066		r = -ETIMEDOUT;
1067	else if (r > 0)
1068		r = 0;
1069
1070error:
1071	amdgpu_ib_free(adev, &ib, fence);
1072	dma_fence_put(fence);
1073
1074	return r;
1075}
1076
1077int amdgpu_vcn_unified_ring_test_ib(struct amdgpu_ring *ring, long timeout)
1078{
 
1079	long r;
1080
1081	r = amdgpu_vcn_enc_ring_test_ib(ring, timeout);
1082	if (r)
1083		goto error;
 
 
1084
1085	r =  amdgpu_vcn_dec_sw_ring_test_ib(ring, timeout);
1086
1087error:
1088	return r;
1089}
1090
1091enum amdgpu_ring_priority_level amdgpu_vcn_get_enc_ring_prio(int ring)
1092{
1093	switch(ring) {
1094	case 0:
1095		return AMDGPU_RING_PRIO_0;
1096	case 1:
1097		return AMDGPU_RING_PRIO_1;
1098	case 2:
1099		return AMDGPU_RING_PRIO_2;
1100	default:
1101		return AMDGPU_RING_PRIO_0;
1102	}
1103}
1104
1105void amdgpu_vcn_setup_ucode(struct amdgpu_device *adev)
1106{
1107	int i;
1108	unsigned int idx;
1109
1110	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1111		const struct common_firmware_header *hdr;
1112		hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
1113
1114		for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
1115			if (adev->vcn.harvest_config & (1 << i))
1116				continue;
 
 
1117			/* currently only support 2 FW instances */
1118			if (i >= 2) {
1119				dev_info(adev->dev, "More then 2 VCN FW instances!\n");
1120				break;
1121			}
1122			idx = AMDGPU_UCODE_ID_VCN + i;
1123			adev->firmware.ucode[idx].ucode_id = idx;
1124			adev->firmware.ucode[idx].fw = adev->vcn.fw;
1125			adev->firmware.fw_size +=
1126				ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
 
 
 
 
1127		}
1128		dev_info(adev->dev, "Will use PSP to load VCN firmware\n");
1129	}
1130}
1131
1132/*
1133 * debugfs for mapping vcn firmware log buffer.
1134 */
1135#if defined(CONFIG_DEBUG_FS)
1136static ssize_t amdgpu_debugfs_vcn_fwlog_read(struct file *f, char __user *buf,
1137                                             size_t size, loff_t *pos)
1138{
1139	struct amdgpu_vcn_inst *vcn;
1140	void *log_buf;
1141	volatile struct amdgpu_vcn_fwlog *plog;
1142	unsigned int read_pos, write_pos, available, i, read_bytes = 0;
1143	unsigned int read_num[2] = {0};
1144
1145	vcn = file_inode(f)->i_private;
1146	if (!vcn)
1147		return -ENODEV;
1148
1149	if (!vcn->fw_shared.cpu_addr || !amdgpu_vcnfw_log)
1150		return -EFAULT;
1151
1152	log_buf = vcn->fw_shared.cpu_addr + vcn->fw_shared.mem_size;
1153
1154	plog = (volatile struct amdgpu_vcn_fwlog *)log_buf;
1155	read_pos = plog->rptr;
1156	write_pos = plog->wptr;
1157
1158	if (read_pos > AMDGPU_VCNFW_LOG_SIZE || write_pos > AMDGPU_VCNFW_LOG_SIZE)
1159		return -EFAULT;
1160
1161	if (!size || (read_pos == write_pos))
1162		return 0;
1163
1164	if (write_pos > read_pos) {
1165		available = write_pos - read_pos;
1166		read_num[0] = min(size, (size_t)available);
1167	} else {
1168		read_num[0] = AMDGPU_VCNFW_LOG_SIZE - read_pos;
1169		available = read_num[0] + write_pos - plog->header_size;
1170		if (size > available)
1171			read_num[1] = write_pos - plog->header_size;
1172		else if (size > read_num[0])
1173			read_num[1] = size - read_num[0];
1174		else
1175			read_num[0] = size;
1176	}
1177
1178	for (i = 0; i < 2; i++) {
1179		if (read_num[i]) {
1180			if (read_pos == AMDGPU_VCNFW_LOG_SIZE)
1181				read_pos = plog->header_size;
1182			if (read_num[i] == copy_to_user((buf + read_bytes),
1183			                                (log_buf + read_pos), read_num[i]))
1184				return -EFAULT;
1185
1186			read_bytes += read_num[i];
1187			read_pos += read_num[i];
1188		}
1189	}
1190
1191	plog->rptr = read_pos;
1192	*pos += read_bytes;
1193	return read_bytes;
1194}
1195
1196static const struct file_operations amdgpu_debugfs_vcnfwlog_fops = {
1197	.owner = THIS_MODULE,
1198	.read = amdgpu_debugfs_vcn_fwlog_read,
1199	.llseek = default_llseek
1200};
1201#endif
1202
1203void amdgpu_debugfs_vcn_fwlog_init(struct amdgpu_device *adev, uint8_t i,
1204                                   struct amdgpu_vcn_inst *vcn)
1205{
1206#if defined(CONFIG_DEBUG_FS)
1207	struct drm_minor *minor = adev_to_drm(adev)->primary;
1208	struct dentry *root = minor->debugfs_root;
1209	char name[32];
1210
1211	sprintf(name, "amdgpu_vcn_%d_fwlog", i);
1212	debugfs_create_file_size(name, S_IFREG | S_IRUGO, root, vcn,
1213				 &amdgpu_debugfs_vcnfwlog_fops,
1214				 AMDGPU_VCNFW_LOG_SIZE);
1215#endif
1216}
1217
1218void amdgpu_vcn_fwlog_init(struct amdgpu_vcn_inst *vcn)
1219{
1220#if defined(CONFIG_DEBUG_FS)
1221	volatile uint32_t *flag = vcn->fw_shared.cpu_addr;
1222	void *fw_log_cpu_addr = vcn->fw_shared.cpu_addr + vcn->fw_shared.mem_size;
1223	uint64_t fw_log_gpu_addr = vcn->fw_shared.gpu_addr + vcn->fw_shared.mem_size;
1224	volatile struct amdgpu_vcn_fwlog *log_buf = fw_log_cpu_addr;
1225	volatile struct amdgpu_fw_shared_fw_logging *fw_log = vcn->fw_shared.cpu_addr
1226                                                         + vcn->fw_shared.log_offset;
1227	*flag |= cpu_to_le32(AMDGPU_VCN_FW_LOGGING_FLAG);
1228	fw_log->is_enabled = 1;
1229	fw_log->addr_lo = cpu_to_le32(fw_log_gpu_addr & 0xFFFFFFFF);
1230	fw_log->addr_hi = cpu_to_le32(fw_log_gpu_addr >> 32);
1231	fw_log->size = cpu_to_le32(AMDGPU_VCNFW_LOG_SIZE);
1232
1233	log_buf->header_size = sizeof(struct amdgpu_vcn_fwlog);
1234	log_buf->buffer_size = AMDGPU_VCNFW_LOG_SIZE;
1235	log_buf->rptr = log_buf->header_size;
1236	log_buf->wptr = log_buf->header_size;
1237	log_buf->wrapped = 0;
1238#endif
1239}
1240
1241int amdgpu_vcn_process_poison_irq(struct amdgpu_device *adev,
1242				struct amdgpu_irq_src *source,
1243				struct amdgpu_iv_entry *entry)
1244{
1245	struct ras_common_if *ras_if = adev->vcn.ras_if;
1246	struct ras_dispatch_if ih_data = {
1247		.entry = entry,
1248	};
1249
1250	if (!ras_if)
1251		return 0;
1252
1253	ih_data.head = *ras_if;
1254	amdgpu_ras_interrupt_dispatch(adev, &ih_data);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1255
 
 
 
 
 
 
 
 
 
 
 
1256	return 0;
 
 
 
 
1257}
1258
1259void amdgpu_vcn_set_ras_funcs(struct amdgpu_device *adev)
1260{
 
 
 
1261	if (!adev->vcn.ras)
1262		return;
1263
1264	amdgpu_ras_register_ras_block(adev, &adev->vcn.ras->ras_block);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1265
1266	strcpy(adev->vcn.ras->ras_block.ras_comm.name, "vcn");
1267	adev->vcn.ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__VCN;
1268	adev->vcn.ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__POISON;
1269	adev->vcn.ras_if = &adev->vcn.ras->ras_block.ras_comm;
1270
1271	/* If don't define special ras_late_init function, use default ras_late_init */
1272	if (!adev->vcn.ras->ras_block.ras_late_init)
1273		adev->vcn.ras->ras_block.ras_late_init = amdgpu_ras_block_late_init;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1274}
v6.13.7
   1/*
   2 * Copyright 2016 Advanced Micro Devices, Inc.
   3 * All Rights Reserved.
   4 *
   5 * Permission is hereby granted, free of charge, to any person obtaining a
   6 * copy of this software and associated documentation files (the
   7 * "Software"), to deal in the Software without restriction, including
   8 * without limitation the rights to use, copy, modify, merge, publish,
   9 * distribute, sub license, and/or sell copies of the Software, and to
  10 * permit persons to whom the Software is furnished to do so, subject to
  11 * the following conditions:
  12 *
  13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  20 *
  21 * The above copyright notice and this permission notice (including the
  22 * next paragraph) shall be included in all copies or substantial portions
  23 * of the Software.
  24 *
  25 */
  26
  27#include <linux/firmware.h>
  28#include <linux/module.h>
  29#include <linux/dmi.h>
  30#include <linux/pci.h>
  31#include <linux/debugfs.h>
  32#include <drm/drm_drv.h>
  33
  34#include "amdgpu.h"
  35#include "amdgpu_pm.h"
  36#include "amdgpu_vcn.h"
  37#include "soc15d.h"
  38
  39/* Firmware Names */
  40#define FIRMWARE_RAVEN			"amdgpu/raven_vcn.bin"
  41#define FIRMWARE_PICASSO		"amdgpu/picasso_vcn.bin"
  42#define FIRMWARE_RAVEN2			"amdgpu/raven2_vcn.bin"
  43#define FIRMWARE_ARCTURUS		"amdgpu/arcturus_vcn.bin"
  44#define FIRMWARE_RENOIR			"amdgpu/renoir_vcn.bin"
  45#define FIRMWARE_GREEN_SARDINE		"amdgpu/green_sardine_vcn.bin"
  46#define FIRMWARE_NAVI10			"amdgpu/navi10_vcn.bin"
  47#define FIRMWARE_NAVI14			"amdgpu/navi14_vcn.bin"
  48#define FIRMWARE_NAVI12			"amdgpu/navi12_vcn.bin"
  49#define FIRMWARE_SIENNA_CICHLID		"amdgpu/sienna_cichlid_vcn.bin"
  50#define FIRMWARE_NAVY_FLOUNDER		"amdgpu/navy_flounder_vcn.bin"
  51#define FIRMWARE_VANGOGH		"amdgpu/vangogh_vcn.bin"
  52#define FIRMWARE_DIMGREY_CAVEFISH	"amdgpu/dimgrey_cavefish_vcn.bin"
  53#define FIRMWARE_ALDEBARAN		"amdgpu/aldebaran_vcn.bin"
  54#define FIRMWARE_BEIGE_GOBY		"amdgpu/beige_goby_vcn.bin"
  55#define FIRMWARE_YELLOW_CARP		"amdgpu/yellow_carp_vcn.bin"
  56#define FIRMWARE_VCN_3_1_2		"amdgpu/vcn_3_1_2.bin"
  57#define FIRMWARE_VCN4_0_0		"amdgpu/vcn_4_0_0.bin"
  58#define FIRMWARE_VCN4_0_2		"amdgpu/vcn_4_0_2.bin"
  59#define FIRMWARE_VCN4_0_3		"amdgpu/vcn_4_0_3.bin"
  60#define FIRMWARE_VCN4_0_4		"amdgpu/vcn_4_0_4.bin"
  61#define FIRMWARE_VCN4_0_5		"amdgpu/vcn_4_0_5.bin"
  62#define FIRMWARE_VCN4_0_6		"amdgpu/vcn_4_0_6.bin"
  63#define FIRMWARE_VCN4_0_6_1		"amdgpu/vcn_4_0_6_1.bin"
  64#define FIRMWARE_VCN5_0_0		"amdgpu/vcn_5_0_0.bin"
  65
  66MODULE_FIRMWARE(FIRMWARE_RAVEN);
  67MODULE_FIRMWARE(FIRMWARE_PICASSO);
  68MODULE_FIRMWARE(FIRMWARE_RAVEN2);
  69MODULE_FIRMWARE(FIRMWARE_ARCTURUS);
  70MODULE_FIRMWARE(FIRMWARE_RENOIR);
  71MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE);
  72MODULE_FIRMWARE(FIRMWARE_ALDEBARAN);
  73MODULE_FIRMWARE(FIRMWARE_NAVI10);
  74MODULE_FIRMWARE(FIRMWARE_NAVI14);
  75MODULE_FIRMWARE(FIRMWARE_NAVI12);
  76MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID);
  77MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER);
  78MODULE_FIRMWARE(FIRMWARE_VANGOGH);
  79MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH);
  80MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY);
  81MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP);
  82MODULE_FIRMWARE(FIRMWARE_VCN_3_1_2);
  83MODULE_FIRMWARE(FIRMWARE_VCN4_0_0);
  84MODULE_FIRMWARE(FIRMWARE_VCN4_0_2);
  85MODULE_FIRMWARE(FIRMWARE_VCN4_0_3);
  86MODULE_FIRMWARE(FIRMWARE_VCN4_0_4);
  87MODULE_FIRMWARE(FIRMWARE_VCN4_0_5);
  88MODULE_FIRMWARE(FIRMWARE_VCN4_0_6);
  89MODULE_FIRMWARE(FIRMWARE_VCN4_0_6_1);
  90MODULE_FIRMWARE(FIRMWARE_VCN5_0_0);
  91
  92static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
  93
  94int amdgpu_vcn_early_init(struct amdgpu_device *adev)
  95{
  96	char ucode_prefix[25];
  97	int r, i;
  98
  99	amdgpu_ucode_ip_version_decode(adev, UVD_HWIP, ucode_prefix, sizeof(ucode_prefix));
 100	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
 101		if (i == 1 && amdgpu_ip_version(adev, UVD_HWIP, 0) ==  IP_VERSION(4, 0, 6))
 102			r = amdgpu_ucode_request(adev, &adev->vcn.fw[i], "amdgpu/%s_%d.bin", ucode_prefix, i);
 103		else
 104			r = amdgpu_ucode_request(adev, &adev->vcn.fw[i], "amdgpu/%s.bin", ucode_prefix);
 105		if (r) {
 106			amdgpu_ucode_release(&adev->vcn.fw[i]);
 107			return r;
 108		}
 109	}
 110	return r;
 111}
 112
 113int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
 114{
 115	unsigned long bo_size;
 
 116	const struct common_firmware_header *hdr;
 117	unsigned char fw_check;
 118	unsigned int fw_shared_size, log_offset;
 119	int i, r;
 120
 121	INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler);
 122	mutex_init(&adev->vcn.vcn_pg_lock);
 123	mutex_init(&adev->vcn.vcn1_jpeg1_workaround);
 124	atomic_set(&adev->vcn.total_submission_cnt, 0);
 125	for (i = 0; i < adev->vcn.num_vcn_inst; i++)
 126		atomic_set(&adev->vcn.inst[i].dpg_enc_submission_cnt, 0);
 127
 128	if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
 129	    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
 130		adev->vcn.indirect_sram = true;
 131
 132	/*
 133	 * Some Steam Deck's BIOS versions are incompatible with the
 134	 * indirect SRAM mode, leading to amdgpu being unable to get
 135	 * properly probed (and even potentially crashing the kernel).
 136	 * Hence, check for these versions here - notice this is
 137	 * restricted to Vangogh (Deck's APU).
 138	 */
 139	if (amdgpu_ip_version(adev, UVD_HWIP, 0) == IP_VERSION(3, 0, 2)) {
 140		const char *bios_ver = dmi_get_system_info(DMI_BIOS_VERSION);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 141
 142		if (bios_ver && (!strncmp("F7A0113", bios_ver, 7) ||
 143		     !strncmp("F7A0114", bios_ver, 7))) {
 144			adev->vcn.indirect_sram = false;
 145			dev_info(adev->dev,
 146				"Steam Deck quirk: indirect SRAM disabled on BIOS %s\n", bios_ver);
 147		}
 148	}
 149
 150	/* from vcn4 and above, only unified queue is used */
 151	adev->vcn.using_unified_queue =
 152		amdgpu_ip_version(adev, UVD_HWIP, 0) >= IP_VERSION(4, 0, 0);
 
 
 
 
 
 153
 154	hdr = (const struct common_firmware_header *)adev->vcn.fw[0]->data;
 155	adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version);
 156
 157	/* Bit 20-23, it is encode major and non-zero for new naming convention.
 158	 * This field is part of version minor and DRM_DISABLED_FLAG in old naming
 159	 * convention. Since the l:wq!atest version minor is 0x5B and DRM_DISABLED_FLAG
 160	 * is zero in old naming convention, this field is always zero so far.
 161	 * These four bits are used to tell which naming convention is present.
 162	 */
 163	fw_check = (le32_to_cpu(hdr->ucode_version) >> 20) & 0xf;
 164	if (fw_check) {
 165		unsigned int dec_ver, enc_major, enc_minor, vep, fw_rev;
 166
 167		fw_rev = le32_to_cpu(hdr->ucode_version) & 0xfff;
 168		enc_minor = (le32_to_cpu(hdr->ucode_version) >> 12) & 0xff;
 169		enc_major = fw_check;
 170		dec_ver = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xf;
 171		vep = (le32_to_cpu(hdr->ucode_version) >> 28) & 0xf;
 172		DRM_INFO("Found VCN firmware Version ENC: %u.%u DEC: %u VEP: %u Revision: %u\n",
 173			enc_major, enc_minor, dec_ver, vep, fw_rev);
 174	} else {
 175		unsigned int version_major, version_minor, family_id;
 176
 177		family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
 178		version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
 179		version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
 180		DRM_INFO("Found VCN firmware Version: %u.%u Family ID: %u\n",
 181			version_major, version_minor, family_id);
 182	}
 183
 184	bo_size = AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_CONTEXT_SIZE;
 185	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
 186		bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
 187
 188	if (amdgpu_ip_version(adev, UVD_HWIP, 0) >= IP_VERSION(5, 0, 0)) {
 189		fw_shared_size = AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn5_fw_shared));
 190		log_offset = offsetof(struct amdgpu_vcn5_fw_shared, fw_log);
 191	} else if (amdgpu_ip_version(adev, UVD_HWIP, 0) >= IP_VERSION(4, 0, 0)) {
 192		fw_shared_size = AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared));
 193		log_offset = offsetof(struct amdgpu_vcn4_fw_shared, fw_log);
 194	} else {
 195		fw_shared_size = AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared));
 196		log_offset = offsetof(struct amdgpu_fw_shared, fw_log);
 197	}
 198
 199	bo_size += fw_shared_size;
 200
 201	if (amdgpu_vcnfw_log)
 202		bo_size += AMDGPU_VCNFW_LOG_SIZE;
 203
 204	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
 205		if (adev->vcn.harvest_config & (1 << i))
 206			continue;
 207
 208		r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
 209					    AMDGPU_GEM_DOMAIN_VRAM |
 210					    AMDGPU_GEM_DOMAIN_GTT,
 211					    &adev->vcn.inst[i].vcpu_bo,
 212					    &adev->vcn.inst[i].gpu_addr,
 213					    &adev->vcn.inst[i].cpu_addr);
 214		if (r) {
 215			dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", r);
 216			return r;
 217		}
 218
 219		adev->vcn.inst[i].fw_shared.cpu_addr = adev->vcn.inst[i].cpu_addr +
 220				bo_size - fw_shared_size;
 221		adev->vcn.inst[i].fw_shared.gpu_addr = adev->vcn.inst[i].gpu_addr +
 222				bo_size - fw_shared_size;
 223
 224		adev->vcn.inst[i].fw_shared.mem_size = fw_shared_size;
 225
 226		if (amdgpu_vcnfw_log) {
 227			adev->vcn.inst[i].fw_shared.cpu_addr -= AMDGPU_VCNFW_LOG_SIZE;
 228			adev->vcn.inst[i].fw_shared.gpu_addr -= AMDGPU_VCNFW_LOG_SIZE;
 229			adev->vcn.inst[i].fw_shared.log_offset = log_offset;
 230		}
 231
 232		if (adev->vcn.indirect_sram) {
 233			r = amdgpu_bo_create_kernel(adev, 64 * 2 * 4, PAGE_SIZE,
 234					AMDGPU_GEM_DOMAIN_VRAM |
 235					AMDGPU_GEM_DOMAIN_GTT,
 236					&adev->vcn.inst[i].dpg_sram_bo,
 237					&adev->vcn.inst[i].dpg_sram_gpu_addr,
 238					&adev->vcn.inst[i].dpg_sram_cpu_addr);
 239			if (r) {
 240				dev_err(adev->dev, "VCN %d (%d) failed to allocate DPG bo\n", i, r);
 241				return r;
 242			}
 243		}
 244	}
 245
 246	return 0;
 247}
 248
 249int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
 250{
 251	int i, j;
 252
 253	for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
 254		if (adev->vcn.harvest_config & (1 << j))
 255			continue;
 256
 257		amdgpu_bo_free_kernel(
 258			&adev->vcn.inst[j].dpg_sram_bo,
 259			&adev->vcn.inst[j].dpg_sram_gpu_addr,
 260			(void **)&adev->vcn.inst[j].dpg_sram_cpu_addr);
 261
 262		kvfree(adev->vcn.inst[j].saved_bo);
 263
 264		amdgpu_bo_free_kernel(&adev->vcn.inst[j].vcpu_bo,
 265					  &adev->vcn.inst[j].gpu_addr,
 266					  (void **)&adev->vcn.inst[j].cpu_addr);
 267
 268		amdgpu_ring_fini(&adev->vcn.inst[j].ring_dec);
 269
 270		for (i = 0; i < adev->vcn.num_enc_rings; ++i)
 271			amdgpu_ring_fini(&adev->vcn.inst[j].ring_enc[i]);
 272
 273		amdgpu_ucode_release(&adev->vcn.fw[j]);
 274	}
 275
 
 276	mutex_destroy(&adev->vcn.vcn1_jpeg1_workaround);
 277	mutex_destroy(&adev->vcn.vcn_pg_lock);
 278
 279	return 0;
 280}
 281
 
 
 
 
 
 
 
 
 
 
 
 
 282bool amdgpu_vcn_is_disabled_vcn(struct amdgpu_device *adev, enum vcn_ring_type type, uint32_t vcn_instance)
 283{
 284	bool ret = false;
 285	int vcn_config = adev->vcn.vcn_config[vcn_instance];
 286
 287	if ((type == VCN_ENCODE_RING) && (vcn_config & VCN_BLOCK_ENCODE_DISABLE_MASK))
 288		ret = true;
 289	else if ((type == VCN_DECODE_RING) && (vcn_config & VCN_BLOCK_DECODE_DISABLE_MASK))
 290		ret = true;
 291	else if ((type == VCN_UNIFIED_RING) && (vcn_config & VCN_BLOCK_QUEUE_DISABLE_MASK))
 292		ret = true;
 
 293
 294	return ret;
 295}
 296
 297int amdgpu_vcn_save_vcpu_bo(struct amdgpu_device *adev)
 298{
 299	unsigned int size;
 300	void *ptr;
 301	int i, idx;
 302
 
 
 303	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
 304		if (adev->vcn.harvest_config & (1 << i))
 305			continue;
 306		if (adev->vcn.inst[i].vcpu_bo == NULL)
 307			return 0;
 308
 309		size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo);
 310		ptr = adev->vcn.inst[i].cpu_addr;
 311
 312		adev->vcn.inst[i].saved_bo = kvmalloc(size, GFP_KERNEL);
 313		if (!adev->vcn.inst[i].saved_bo)
 314			return -ENOMEM;
 315
 316		if (drm_dev_enter(adev_to_drm(adev), &idx)) {
 317			memcpy_fromio(adev->vcn.inst[i].saved_bo, ptr, size);
 318			drm_dev_exit(idx);
 319		}
 320	}
 321
 322	return 0;
 323}
 324
 325int amdgpu_vcn_suspend(struct amdgpu_device *adev)
 326{
 327	bool in_ras_intr = amdgpu_ras_intr_triggered();
 328
 329	cancel_delayed_work_sync(&adev->vcn.idle_work);
 330
 331	/* err_event_athub will corrupt VCPU buffer, so we need to
 332	 * restore fw data and clear buffer in amdgpu_vcn_resume() */
 333	if (in_ras_intr)
 334		return 0;
 335
 336	return amdgpu_vcn_save_vcpu_bo(adev);
 337}
 338
 339int amdgpu_vcn_resume(struct amdgpu_device *adev)
 340{
 341	unsigned int size;
 342	void *ptr;
 343	int i, idx;
 344
 345	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
 346		if (adev->vcn.harvest_config & (1 << i))
 347			continue;
 348		if (adev->vcn.inst[i].vcpu_bo == NULL)
 349			return -EINVAL;
 350
 351		size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo);
 352		ptr = adev->vcn.inst[i].cpu_addr;
 353
 354		if (adev->vcn.inst[i].saved_bo != NULL) {
 355			if (drm_dev_enter(adev_to_drm(adev), &idx)) {
 356				memcpy_toio(ptr, adev->vcn.inst[i].saved_bo, size);
 357				drm_dev_exit(idx);
 358			}
 359			kvfree(adev->vcn.inst[i].saved_bo);
 360			adev->vcn.inst[i].saved_bo = NULL;
 361		} else {
 362			const struct common_firmware_header *hdr;
 363			unsigned int offset;
 364
 365			hdr = (const struct common_firmware_header *)adev->vcn.fw[i]->data;
 366			if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
 367				offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
 368				if (drm_dev_enter(adev_to_drm(adev), &idx)) {
 369					memcpy_toio(adev->vcn.inst[i].cpu_addr,
 370						    adev->vcn.fw[i]->data + offset,
 371						    le32_to_cpu(hdr->ucode_size_bytes));
 372					drm_dev_exit(idx);
 373				}
 374				size -= le32_to_cpu(hdr->ucode_size_bytes);
 375				ptr += le32_to_cpu(hdr->ucode_size_bytes);
 376			}
 377			memset_io(ptr, 0, size);
 378		}
 379	}
 380	return 0;
 381}
 382
 383static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
 384{
 385	struct amdgpu_device *adev =
 386		container_of(work, struct amdgpu_device, vcn.idle_work.work);
 387	unsigned int fences = 0, fence[AMDGPU_MAX_VCN_INSTANCES] = {0};
 388	unsigned int i, j;
 389	int r = 0;
 390
 391	for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
 392		if (adev->vcn.harvest_config & (1 << j))
 393			continue;
 394
 395		for (i = 0; i < adev->vcn.num_enc_rings; ++i)
 396			fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_enc[i]);
 
 397
 398		/* Only set DPG pause for VCN3 or below, VCN4 and above will be handled by FW */
 399		if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
 400		    !adev->vcn.using_unified_queue) {
 401			struct dpg_pause_state new_state;
 402
 403			if (fence[j] ||
 404				unlikely(atomic_read(&adev->vcn.inst[j].dpg_enc_submission_cnt)))
 405				new_state.fw_based = VCN_DPG_STATE__PAUSE;
 406			else
 407				new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
 408
 409			adev->vcn.pause_dpg_mode(adev, j, &new_state);
 410		}
 411
 412		fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_dec);
 413		fences += fence[j];
 414	}
 415
 416	if (!fences && !atomic_read(&adev->vcn.total_submission_cnt)) {
 417		amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
 418		       AMD_PG_STATE_GATE);
 419		r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO,
 420				false);
 421		if (r)
 422			dev_warn(adev->dev, "(%d) failed to disable video power profile mode\n", r);
 423	} else {
 424		schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
 425	}
 426}
 427
 428void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
 429{
 430	struct amdgpu_device *adev = ring->adev;
 431	int r = 0;
 432
 433	atomic_inc(&adev->vcn.total_submission_cnt);
 434
 435	if (!cancel_delayed_work_sync(&adev->vcn.idle_work)) {
 436		r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO,
 437				true);
 438		if (r)
 439			dev_warn(adev->dev, "(%d) failed to switch to video power profile mode\n", r);
 440	}
 441
 442	mutex_lock(&adev->vcn.vcn_pg_lock);
 443	amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
 444	       AMD_PG_STATE_UNGATE);
 445
 446	/* Only set DPG pause for VCN3 or below, VCN4 and above will be handled by FW */
 447	if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
 448	    !adev->vcn.using_unified_queue) {
 449		struct dpg_pause_state new_state;
 450
 451		if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC) {
 452			atomic_inc(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt);
 453			new_state.fw_based = VCN_DPG_STATE__PAUSE;
 454		} else {
 455			unsigned int fences = 0;
 456			unsigned int i;
 457
 458			for (i = 0; i < adev->vcn.num_enc_rings; ++i)
 459				fences += amdgpu_fence_count_emitted(&adev->vcn.inst[ring->me].ring_enc[i]);
 460
 461			if (fences || atomic_read(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt))
 462				new_state.fw_based = VCN_DPG_STATE__PAUSE;
 463			else
 464				new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
 465		}
 466
 467		adev->vcn.pause_dpg_mode(adev, ring->me, &new_state);
 468	}
 469	mutex_unlock(&adev->vcn.vcn_pg_lock);
 470}
 471
 472void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring)
 473{
 474	struct amdgpu_device *adev = ring->adev;
 475
 476	/* Only set DPG pause for VCN3 or below, VCN4 and above will be handled by FW */
 477	if (ring->adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
 478	    ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC &&
 479	    !adev->vcn.using_unified_queue)
 480		atomic_dec(&ring->adev->vcn.inst[ring->me].dpg_enc_submission_cnt);
 481
 482	atomic_dec(&ring->adev->vcn.total_submission_cnt);
 483
 484	schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
 485}
 486
 487int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
 488{
 489	struct amdgpu_device *adev = ring->adev;
 490	uint32_t tmp = 0;
 491	unsigned int i;
 492	int r;
 493
 494	/* VCN in SRIOV does not support direct register read/write */
 495	if (amdgpu_sriov_vf(adev))
 496		return 0;
 497
 498	WREG32(adev->vcn.inst[ring->me].external.scratch9, 0xCAFEDEAD);
 499	r = amdgpu_ring_alloc(ring, 3);
 500	if (r)
 501		return r;
 502	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.scratch9, 0));
 503	amdgpu_ring_write(ring, 0xDEADBEEF);
 504	amdgpu_ring_commit(ring);
 505	for (i = 0; i < adev->usec_timeout; i++) {
 506		tmp = RREG32(adev->vcn.inst[ring->me].external.scratch9);
 507		if (tmp == 0xDEADBEEF)
 508			break;
 509		udelay(1);
 510	}
 511
 512	if (i >= adev->usec_timeout)
 513		r = -ETIMEDOUT;
 514
 515	return r;
 516}
 517
 518int amdgpu_vcn_dec_sw_ring_test_ring(struct amdgpu_ring *ring)
 519{
 520	struct amdgpu_device *adev = ring->adev;
 521	uint32_t rptr;
 522	unsigned int i;
 523	int r;
 524
 525	if (amdgpu_sriov_vf(adev))
 526		return 0;
 527
 528	r = amdgpu_ring_alloc(ring, 16);
 529	if (r)
 530		return r;
 531
 532	rptr = amdgpu_ring_get_rptr(ring);
 533
 534	amdgpu_ring_write(ring, VCN_DEC_SW_CMD_END);
 535	amdgpu_ring_commit(ring);
 536
 537	for (i = 0; i < adev->usec_timeout; i++) {
 538		if (amdgpu_ring_get_rptr(ring) != rptr)
 539			break;
 540		udelay(1);
 541	}
 542
 543	if (i >= adev->usec_timeout)
 544		r = -ETIMEDOUT;
 545
 546	return r;
 547}
 548
 549static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
 550				   struct amdgpu_ib *ib_msg,
 551				   struct dma_fence **fence)
 552{
 553	u64 addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
 554	struct amdgpu_device *adev = ring->adev;
 555	struct dma_fence *f = NULL;
 556	struct amdgpu_job *job;
 557	struct amdgpu_ib *ib;
 558	int i, r;
 559
 560	r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
 561				     64, AMDGPU_IB_POOL_DIRECT,
 562				     &job);
 563	if (r)
 564		goto err;
 565
 566	ib = &job->ibs[0];
 567	ib->ptr[0] = PACKET0(adev->vcn.internal.data0, 0);
 568	ib->ptr[1] = addr;
 569	ib->ptr[2] = PACKET0(adev->vcn.internal.data1, 0);
 570	ib->ptr[3] = addr >> 32;
 571	ib->ptr[4] = PACKET0(adev->vcn.internal.cmd, 0);
 572	ib->ptr[5] = 0;
 573	for (i = 6; i < 16; i += 2) {
 574		ib->ptr[i] = PACKET0(adev->vcn.internal.nop, 0);
 575		ib->ptr[i+1] = 0;
 576	}
 577	ib->length_dw = 16;
 578
 579	r = amdgpu_job_submit_direct(job, ring, &f);
 580	if (r)
 581		goto err_free;
 582
 583	amdgpu_ib_free(adev, ib_msg, f);
 584
 585	if (fence)
 586		*fence = dma_fence_get(f);
 587	dma_fence_put(f);
 588
 589	return 0;
 590
 591err_free:
 592	amdgpu_job_free(job);
 593err:
 594	amdgpu_ib_free(adev, ib_msg, f);
 595	return r;
 596}
 597
 598static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
 599		struct amdgpu_ib *ib)
 600{
 601	struct amdgpu_device *adev = ring->adev;
 602	uint32_t *msg;
 603	int r, i;
 604
 605	memset(ib, 0, sizeof(*ib));
 606	r = amdgpu_ib_get(adev, NULL, AMDGPU_GPU_PAGE_SIZE * 2,
 607			AMDGPU_IB_POOL_DIRECT,
 608			ib);
 609	if (r)
 610		return r;
 611
 612	msg = (uint32_t *)AMDGPU_GPU_PAGE_ALIGN((unsigned long)ib->ptr);
 613	msg[0] = cpu_to_le32(0x00000028);
 614	msg[1] = cpu_to_le32(0x00000038);
 615	msg[2] = cpu_to_le32(0x00000001);
 616	msg[3] = cpu_to_le32(0x00000000);
 617	msg[4] = cpu_to_le32(handle);
 618	msg[5] = cpu_to_le32(0x00000000);
 619	msg[6] = cpu_to_le32(0x00000001);
 620	msg[7] = cpu_to_le32(0x00000028);
 621	msg[8] = cpu_to_le32(0x00000010);
 622	msg[9] = cpu_to_le32(0x00000000);
 623	msg[10] = cpu_to_le32(0x00000007);
 624	msg[11] = cpu_to_le32(0x00000000);
 625	msg[12] = cpu_to_le32(0x00000780);
 626	msg[13] = cpu_to_le32(0x00000440);
 627	for (i = 14; i < 1024; ++i)
 628		msg[i] = cpu_to_le32(0x0);
 629
 630	return 0;
 631}
 632
 633static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
 634					  struct amdgpu_ib *ib)
 635{
 636	struct amdgpu_device *adev = ring->adev;
 637	uint32_t *msg;
 638	int r, i;
 639
 640	memset(ib, 0, sizeof(*ib));
 641	r = amdgpu_ib_get(adev, NULL, AMDGPU_GPU_PAGE_SIZE * 2,
 642			AMDGPU_IB_POOL_DIRECT,
 643			ib);
 644	if (r)
 645		return r;
 646
 647	msg = (uint32_t *)AMDGPU_GPU_PAGE_ALIGN((unsigned long)ib->ptr);
 648	msg[0] = cpu_to_le32(0x00000028);
 649	msg[1] = cpu_to_le32(0x00000018);
 650	msg[2] = cpu_to_le32(0x00000000);
 651	msg[3] = cpu_to_le32(0x00000002);
 652	msg[4] = cpu_to_le32(handle);
 653	msg[5] = cpu_to_le32(0x00000000);
 654	for (i = 6; i < 1024; ++i)
 655		msg[i] = cpu_to_le32(0x0);
 656
 657	return 0;
 658}
 659
 660int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 661{
 662	struct dma_fence *fence = NULL;
 663	struct amdgpu_ib ib;
 664	long r;
 665
 666	r = amdgpu_vcn_dec_get_create_msg(ring, 1, &ib);
 667	if (r)
 668		goto error;
 669
 670	r = amdgpu_vcn_dec_send_msg(ring, &ib, NULL);
 671	if (r)
 672		goto error;
 673	r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &ib);
 674	if (r)
 675		goto error;
 676
 677	r = amdgpu_vcn_dec_send_msg(ring, &ib, &fence);
 678	if (r)
 679		goto error;
 680
 681	r = dma_fence_wait_timeout(fence, false, timeout);
 682	if (r == 0)
 683		r = -ETIMEDOUT;
 684	else if (r > 0)
 685		r = 0;
 686
 687	dma_fence_put(fence);
 688error:
 689	return r;
 690}
 691
 692static uint32_t *amdgpu_vcn_unified_ring_ib_header(struct amdgpu_ib *ib,
 693						uint32_t ib_pack_in_dw, bool enc)
 694{
 695	uint32_t *ib_checksum;
 696
 697	ib->ptr[ib->length_dw++] = 0x00000010; /* single queue checksum */
 698	ib->ptr[ib->length_dw++] = 0x30000002;
 699	ib_checksum = &ib->ptr[ib->length_dw++];
 700	ib->ptr[ib->length_dw++] = ib_pack_in_dw;
 701
 702	ib->ptr[ib->length_dw++] = 0x00000010; /* engine info */
 703	ib->ptr[ib->length_dw++] = 0x30000001;
 704	ib->ptr[ib->length_dw++] = enc ? 0x2 : 0x3;
 705	ib->ptr[ib->length_dw++] = ib_pack_in_dw * sizeof(uint32_t);
 706
 707	return ib_checksum;
 708}
 709
 710static void amdgpu_vcn_unified_ring_ib_checksum(uint32_t **ib_checksum,
 711						uint32_t ib_pack_in_dw)
 712{
 713	uint32_t i;
 714	uint32_t checksum = 0;
 715
 716	for (i = 0; i < ib_pack_in_dw; i++)
 717		checksum += *(*ib_checksum + 2 + i);
 718
 719	**ib_checksum = checksum;
 720}
 721
 722static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
 723				      struct amdgpu_ib *ib_msg,
 724				      struct dma_fence **fence)
 725{
 726	struct amdgpu_vcn_decode_buffer *decode_buffer = NULL;
 727	unsigned int ib_size_dw = 64;
 728	struct amdgpu_device *adev = ring->adev;
 729	struct dma_fence *f = NULL;
 730	struct amdgpu_job *job;
 731	struct amdgpu_ib *ib;
 732	uint64_t addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
 
 733	uint32_t *ib_checksum;
 734	uint32_t ib_pack_in_dw;
 735	int i, r;
 736
 737	if (adev->vcn.using_unified_queue)
 738		ib_size_dw += 8;
 739
 740	r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
 741				     ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
 742				     &job);
 743	if (r)
 744		goto err;
 745
 746	ib = &job->ibs[0];
 747	ib->length_dw = 0;
 748
 749	/* single queue headers */
 750	if (adev->vcn.using_unified_queue) {
 751		ib_pack_in_dw = sizeof(struct amdgpu_vcn_decode_buffer) / sizeof(uint32_t)
 752						+ 4 + 2; /* engine info + decoding ib in dw */
 753		ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, ib_pack_in_dw, false);
 754	}
 755
 756	ib->ptr[ib->length_dw++] = sizeof(struct amdgpu_vcn_decode_buffer) + 8;
 757	ib->ptr[ib->length_dw++] = cpu_to_le32(AMDGPU_VCN_IB_FLAG_DECODE_BUFFER);
 758	decode_buffer = (struct amdgpu_vcn_decode_buffer *)&(ib->ptr[ib->length_dw]);
 759	ib->length_dw += sizeof(struct amdgpu_vcn_decode_buffer) / 4;
 760	memset(decode_buffer, 0, sizeof(struct amdgpu_vcn_decode_buffer));
 761
 762	decode_buffer->valid_buf_flag |= cpu_to_le32(AMDGPU_VCN_CMD_FLAG_MSG_BUFFER);
 763	decode_buffer->msg_buffer_address_hi = cpu_to_le32(addr >> 32);
 764	decode_buffer->msg_buffer_address_lo = cpu_to_le32(addr);
 765
 766	for (i = ib->length_dw; i < ib_size_dw; ++i)
 767		ib->ptr[i] = 0x0;
 768
 769	if (adev->vcn.using_unified_queue)
 770		amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, ib_pack_in_dw);
 771
 772	r = amdgpu_job_submit_direct(job, ring, &f);
 773	if (r)
 774		goto err_free;
 775
 776	amdgpu_ib_free(adev, ib_msg, f);
 777
 778	if (fence)
 779		*fence = dma_fence_get(f);
 780	dma_fence_put(f);
 781
 782	return 0;
 783
 784err_free:
 785	amdgpu_job_free(job);
 786err:
 787	amdgpu_ib_free(adev, ib_msg, f);
 788	return r;
 789}
 790
 791int amdgpu_vcn_dec_sw_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 792{
 793	struct dma_fence *fence = NULL;
 794	struct amdgpu_ib ib;
 795	long r;
 796
 797	r = amdgpu_vcn_dec_get_create_msg(ring, 1, &ib);
 798	if (r)
 799		goto error;
 800
 801	r = amdgpu_vcn_dec_sw_send_msg(ring, &ib, NULL);
 802	if (r)
 803		goto error;
 804	r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &ib);
 805	if (r)
 806		goto error;
 807
 808	r = amdgpu_vcn_dec_sw_send_msg(ring, &ib, &fence);
 809	if (r)
 810		goto error;
 811
 812	r = dma_fence_wait_timeout(fence, false, timeout);
 813	if (r == 0)
 814		r = -ETIMEDOUT;
 815	else if (r > 0)
 816		r = 0;
 817
 818	dma_fence_put(fence);
 819error:
 820	return r;
 821}
 822
 823int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
 824{
 825	struct amdgpu_device *adev = ring->adev;
 826	uint32_t rptr;
 827	unsigned int i;
 828	int r;
 829
 830	if (amdgpu_sriov_vf(adev))
 831		return 0;
 832
 833	r = amdgpu_ring_alloc(ring, 16);
 834	if (r)
 835		return r;
 836
 837	rptr = amdgpu_ring_get_rptr(ring);
 838
 839	amdgpu_ring_write(ring, VCN_ENC_CMD_END);
 840	amdgpu_ring_commit(ring);
 841
 842	for (i = 0; i < adev->usec_timeout; i++) {
 843		if (amdgpu_ring_get_rptr(ring) != rptr)
 844			break;
 845		udelay(1);
 846	}
 847
 848	if (i >= adev->usec_timeout)
 849		r = -ETIMEDOUT;
 850
 851	return r;
 852}
 853
 854static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
 855					 struct amdgpu_ib *ib_msg,
 856					 struct dma_fence **fence)
 857{
 858	unsigned int ib_size_dw = 16;
 859	struct amdgpu_device *adev = ring->adev;
 860	struct amdgpu_job *job;
 861	struct amdgpu_ib *ib;
 862	struct dma_fence *f = NULL;
 863	uint32_t *ib_checksum = NULL;
 864	uint64_t addr;
 
 865	int i, r;
 866
 867	if (adev->vcn.using_unified_queue)
 868		ib_size_dw += 8;
 869
 870	r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
 871				     ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
 872				     &job);
 873	if (r)
 874		return r;
 875
 876	ib = &job->ibs[0];
 877	addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
 878
 879	ib->length_dw = 0;
 880
 881	if (adev->vcn.using_unified_queue)
 882		ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, 0x11, true);
 883
 884	ib->ptr[ib->length_dw++] = 0x00000018;
 885	ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
 886	ib->ptr[ib->length_dw++] = handle;
 887	ib->ptr[ib->length_dw++] = upper_32_bits(addr);
 888	ib->ptr[ib->length_dw++] = addr;
 889	ib->ptr[ib->length_dw++] = 0x00000000;
 890
 891	ib->ptr[ib->length_dw++] = 0x00000014;
 892	ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
 893	ib->ptr[ib->length_dw++] = 0x0000001c;
 894	ib->ptr[ib->length_dw++] = 0x00000000;
 895	ib->ptr[ib->length_dw++] = 0x00000000;
 896
 897	ib->ptr[ib->length_dw++] = 0x00000008;
 898	ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */
 899
 900	for (i = ib->length_dw; i < ib_size_dw; ++i)
 901		ib->ptr[i] = 0x0;
 902
 903	if (adev->vcn.using_unified_queue)
 904		amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, 0x11);
 905
 906	r = amdgpu_job_submit_direct(job, ring, &f);
 907	if (r)
 908		goto err;
 909
 910	if (fence)
 911		*fence = dma_fence_get(f);
 912	dma_fence_put(f);
 913
 914	return 0;
 915
 916err:
 917	amdgpu_job_free(job);
 918	return r;
 919}
 920
 921static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
 922					  struct amdgpu_ib *ib_msg,
 923					  struct dma_fence **fence)
 924{
 925	unsigned int ib_size_dw = 16;
 926	struct amdgpu_device *adev = ring->adev;
 927	struct amdgpu_job *job;
 928	struct amdgpu_ib *ib;
 929	struct dma_fence *f = NULL;
 930	uint32_t *ib_checksum = NULL;
 931	uint64_t addr;
 
 932	int i, r;
 933
 934	if (adev->vcn.using_unified_queue)
 935		ib_size_dw += 8;
 936
 937	r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
 938				     ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
 939				     &job);
 940	if (r)
 941		return r;
 942
 943	ib = &job->ibs[0];
 944	addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
 945
 946	ib->length_dw = 0;
 947
 948	if (adev->vcn.using_unified_queue)
 949		ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, 0x11, true);
 950
 951	ib->ptr[ib->length_dw++] = 0x00000018;
 952	ib->ptr[ib->length_dw++] = 0x00000001;
 953	ib->ptr[ib->length_dw++] = handle;
 954	ib->ptr[ib->length_dw++] = upper_32_bits(addr);
 955	ib->ptr[ib->length_dw++] = addr;
 956	ib->ptr[ib->length_dw++] = 0x00000000;
 957
 958	ib->ptr[ib->length_dw++] = 0x00000014;
 959	ib->ptr[ib->length_dw++] = 0x00000002;
 960	ib->ptr[ib->length_dw++] = 0x0000001c;
 961	ib->ptr[ib->length_dw++] = 0x00000000;
 962	ib->ptr[ib->length_dw++] = 0x00000000;
 963
 964	ib->ptr[ib->length_dw++] = 0x00000008;
 965	ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */
 966
 967	for (i = ib->length_dw; i < ib_size_dw; ++i)
 968		ib->ptr[i] = 0x0;
 969
 970	if (adev->vcn.using_unified_queue)
 971		amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, 0x11);
 972
 973	r = amdgpu_job_submit_direct(job, ring, &f);
 974	if (r)
 975		goto err;
 976
 977	if (fence)
 978		*fence = dma_fence_get(f);
 979	dma_fence_put(f);
 980
 981	return 0;
 982
 983err:
 984	amdgpu_job_free(job);
 985	return r;
 986}
 987
 988int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 989{
 990	struct amdgpu_device *adev = ring->adev;
 991	struct dma_fence *fence = NULL;
 992	struct amdgpu_ib ib;
 993	long r;
 994
 995	memset(&ib, 0, sizeof(ib));
 996	r = amdgpu_ib_get(adev, NULL, (128 << 10) + AMDGPU_GPU_PAGE_SIZE,
 997			AMDGPU_IB_POOL_DIRECT,
 998			&ib);
 999	if (r)
1000		return r;
1001
1002	r = amdgpu_vcn_enc_get_create_msg(ring, 1, &ib, NULL);
1003	if (r)
1004		goto error;
1005
1006	r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, &ib, &fence);
1007	if (r)
1008		goto error;
1009
1010	r = dma_fence_wait_timeout(fence, false, timeout);
1011	if (r == 0)
1012		r = -ETIMEDOUT;
1013	else if (r > 0)
1014		r = 0;
1015
1016error:
1017	amdgpu_ib_free(adev, &ib, fence);
1018	dma_fence_put(fence);
1019
1020	return r;
1021}
1022
1023int amdgpu_vcn_unified_ring_test_ib(struct amdgpu_ring *ring, long timeout)
1024{
1025	struct amdgpu_device *adev = ring->adev;
1026	long r;
1027
1028	if (amdgpu_ip_version(adev, UVD_HWIP, 0) != IP_VERSION(4, 0, 3)) {
1029		r = amdgpu_vcn_enc_ring_test_ib(ring, timeout);
1030		if (r)
1031			goto error;
1032	}
1033
1034	r =  amdgpu_vcn_dec_sw_ring_test_ib(ring, timeout);
1035
1036error:
1037	return r;
1038}
1039
1040enum amdgpu_ring_priority_level amdgpu_vcn_get_enc_ring_prio(int ring)
1041{
1042	switch (ring) {
1043	case 0:
1044		return AMDGPU_RING_PRIO_0;
1045	case 1:
1046		return AMDGPU_RING_PRIO_1;
1047	case 2:
1048		return AMDGPU_RING_PRIO_2;
1049	default:
1050		return AMDGPU_RING_PRIO_0;
1051	}
1052}
1053
1054void amdgpu_vcn_setup_ucode(struct amdgpu_device *adev)
1055{
1056	int i;
1057	unsigned int idx;
1058
1059	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1060		const struct common_firmware_header *hdr;
 
1061
1062		for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
1063			if (adev->vcn.harvest_config & (1 << i))
1064				continue;
1065
1066			hdr = (const struct common_firmware_header *)adev->vcn.fw[i]->data;
1067			/* currently only support 2 FW instances */
1068			if (i >= 2) {
1069				dev_info(adev->dev, "More then 2 VCN FW instances!\n");
1070				break;
1071			}
1072			idx = AMDGPU_UCODE_ID_VCN + i;
1073			adev->firmware.ucode[idx].ucode_id = idx;
1074			adev->firmware.ucode[idx].fw = adev->vcn.fw[i];
1075			adev->firmware.fw_size +=
1076				ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
1077
1078			if (amdgpu_ip_version(adev, UVD_HWIP, 0) ==
1079			    IP_VERSION(4, 0, 3))
1080				break;
1081		}
 
1082	}
1083}
1084
1085/*
1086 * debugfs for mapping vcn firmware log buffer.
1087 */
1088#if defined(CONFIG_DEBUG_FS)
1089static ssize_t amdgpu_debugfs_vcn_fwlog_read(struct file *f, char __user *buf,
1090					     size_t size, loff_t *pos)
1091{
1092	struct amdgpu_vcn_inst *vcn;
1093	void *log_buf;
1094	volatile struct amdgpu_vcn_fwlog *plog;
1095	unsigned int read_pos, write_pos, available, i, read_bytes = 0;
1096	unsigned int read_num[2] = {0};
1097
1098	vcn = file_inode(f)->i_private;
1099	if (!vcn)
1100		return -ENODEV;
1101
1102	if (!vcn->fw_shared.cpu_addr || !amdgpu_vcnfw_log)
1103		return -EFAULT;
1104
1105	log_buf = vcn->fw_shared.cpu_addr + vcn->fw_shared.mem_size;
1106
1107	plog = (volatile struct amdgpu_vcn_fwlog *)log_buf;
1108	read_pos = plog->rptr;
1109	write_pos = plog->wptr;
1110
1111	if (read_pos > AMDGPU_VCNFW_LOG_SIZE || write_pos > AMDGPU_VCNFW_LOG_SIZE)
1112		return -EFAULT;
1113
1114	if (!size || (read_pos == write_pos))
1115		return 0;
1116
1117	if (write_pos > read_pos) {
1118		available = write_pos - read_pos;
1119		read_num[0] = min_t(size_t, size, available);
1120	} else {
1121		read_num[0] = AMDGPU_VCNFW_LOG_SIZE - read_pos;
1122		available = read_num[0] + write_pos - plog->header_size;
1123		if (size > available)
1124			read_num[1] = write_pos - plog->header_size;
1125		else if (size > read_num[0])
1126			read_num[1] = size - read_num[0];
1127		else
1128			read_num[0] = size;
1129	}
1130
1131	for (i = 0; i < 2; i++) {
1132		if (read_num[i]) {
1133			if (read_pos == AMDGPU_VCNFW_LOG_SIZE)
1134				read_pos = plog->header_size;
1135			if (read_num[i] == copy_to_user((buf + read_bytes),
1136							(log_buf + read_pos), read_num[i]))
1137				return -EFAULT;
1138
1139			read_bytes += read_num[i];
1140			read_pos += read_num[i];
1141		}
1142	}
1143
1144	plog->rptr = read_pos;
1145	*pos += read_bytes;
1146	return read_bytes;
1147}
1148
1149static const struct file_operations amdgpu_debugfs_vcnfwlog_fops = {
1150	.owner = THIS_MODULE,
1151	.read = amdgpu_debugfs_vcn_fwlog_read,
1152	.llseek = default_llseek
1153};
1154#endif
1155
1156void amdgpu_debugfs_vcn_fwlog_init(struct amdgpu_device *adev, uint8_t i,
1157				   struct amdgpu_vcn_inst *vcn)
1158{
1159#if defined(CONFIG_DEBUG_FS)
1160	struct drm_minor *minor = adev_to_drm(adev)->primary;
1161	struct dentry *root = minor->debugfs_root;
1162	char name[32];
1163
1164	sprintf(name, "amdgpu_vcn_%d_fwlog", i);
1165	debugfs_create_file_size(name, S_IFREG | 0444, root, vcn,
1166				 &amdgpu_debugfs_vcnfwlog_fops,
1167				 AMDGPU_VCNFW_LOG_SIZE);
1168#endif
1169}
1170
1171void amdgpu_vcn_fwlog_init(struct amdgpu_vcn_inst *vcn)
1172{
1173#if defined(CONFIG_DEBUG_FS)
1174	volatile uint32_t *flag = vcn->fw_shared.cpu_addr;
1175	void *fw_log_cpu_addr = vcn->fw_shared.cpu_addr + vcn->fw_shared.mem_size;
1176	uint64_t fw_log_gpu_addr = vcn->fw_shared.gpu_addr + vcn->fw_shared.mem_size;
1177	volatile struct amdgpu_vcn_fwlog *log_buf = fw_log_cpu_addr;
1178	volatile struct amdgpu_fw_shared_fw_logging *fw_log = vcn->fw_shared.cpu_addr
1179							 + vcn->fw_shared.log_offset;
1180	*flag |= cpu_to_le32(AMDGPU_VCN_FW_LOGGING_FLAG);
1181	fw_log->is_enabled = 1;
1182	fw_log->addr_lo = cpu_to_le32(fw_log_gpu_addr & 0xFFFFFFFF);
1183	fw_log->addr_hi = cpu_to_le32(fw_log_gpu_addr >> 32);
1184	fw_log->size = cpu_to_le32(AMDGPU_VCNFW_LOG_SIZE);
1185
1186	log_buf->header_size = sizeof(struct amdgpu_vcn_fwlog);
1187	log_buf->buffer_size = AMDGPU_VCNFW_LOG_SIZE;
1188	log_buf->rptr = log_buf->header_size;
1189	log_buf->wptr = log_buf->header_size;
1190	log_buf->wrapped = 0;
1191#endif
1192}
1193
1194int amdgpu_vcn_process_poison_irq(struct amdgpu_device *adev,
1195				struct amdgpu_irq_src *source,
1196				struct amdgpu_iv_entry *entry)
1197{
1198	struct ras_common_if *ras_if = adev->vcn.ras_if;
1199	struct ras_dispatch_if ih_data = {
1200		.entry = entry,
1201	};
1202
1203	if (!ras_if)
1204		return 0;
1205
1206	if (!amdgpu_sriov_vf(adev)) {
1207		ih_data.head = *ras_if;
1208		amdgpu_ras_interrupt_dispatch(adev, &ih_data);
1209	} else {
1210		if (adev->virt.ops && adev->virt.ops->ras_poison_handler)
1211			adev->virt.ops->ras_poison_handler(adev, ras_if->block);
1212		else
1213			dev_warn(adev->dev,
1214				"No ras_poison_handler interface in SRIOV for VCN!\n");
1215	}
1216
1217	return 0;
1218}
1219
1220int amdgpu_vcn_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
1221{
1222	int r, i;
1223
1224	r = amdgpu_ras_block_late_init(adev, ras_block);
1225	if (r)
1226		return r;
1227
1228	if (amdgpu_ras_is_supported(adev, ras_block->block)) {
1229		for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
1230			if (adev->vcn.harvest_config & (1 << i) ||
1231			    !adev->vcn.inst[i].ras_poison_irq.funcs)
1232				continue;
1233
1234			r = amdgpu_irq_get(adev, &adev->vcn.inst[i].ras_poison_irq, 0);
1235			if (r)
1236				goto late_fini;
1237		}
1238	}
1239	return 0;
1240
1241late_fini:
1242	amdgpu_ras_block_late_fini(adev, ras_block);
1243	return r;
1244}
1245
1246int amdgpu_vcn_ras_sw_init(struct amdgpu_device *adev)
1247{
1248	int err;
1249	struct amdgpu_vcn_ras *ras;
1250
1251	if (!adev->vcn.ras)
1252		return 0;
1253
1254	ras = adev->vcn.ras;
1255	err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
1256	if (err) {
1257		dev_err(adev->dev, "Failed to register vcn ras block!\n");
1258		return err;
1259	}
1260
1261	strcpy(ras->ras_block.ras_comm.name, "vcn");
1262	ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__VCN;
1263	ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__POISON;
1264	adev->vcn.ras_if = &ras->ras_block.ras_comm;
1265
1266	if (!ras->ras_block.ras_late_init)
1267		ras->ras_block.ras_late_init = amdgpu_vcn_ras_late_init;
1268
1269	return 0;
1270}
1271
1272int amdgpu_vcn_psp_update_sram(struct amdgpu_device *adev, int inst_idx,
1273			       enum AMDGPU_UCODE_ID ucode_id)
1274{
1275	struct amdgpu_firmware_info ucode = {
1276		.ucode_id = (ucode_id ? ucode_id :
1277			    (inst_idx ? AMDGPU_UCODE_ID_VCN1_RAM :
1278					AMDGPU_UCODE_ID_VCN0_RAM)),
1279		.mc_addr = adev->vcn.inst[inst_idx].dpg_sram_gpu_addr,
1280		.ucode_size = ((uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_curr_addr -
1281			      (uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr),
1282	};
1283
1284	return psp_execute_ip_fw_load(&adev->psp, &ucode);
1285}
1286
1287static ssize_t amdgpu_get_vcn_reset_mask(struct device *dev,
1288						struct device_attribute *attr,
1289						char *buf)
1290{
1291	struct drm_device *ddev = dev_get_drvdata(dev);
1292	struct amdgpu_device *adev = drm_to_adev(ddev);
1293
1294	if (!adev)
1295		return -ENODEV;
1296
1297	return amdgpu_show_reset_mask(buf, adev->vcn.supported_reset);
1298}
1299
1300static DEVICE_ATTR(vcn_reset_mask, 0444,
1301		   amdgpu_get_vcn_reset_mask, NULL);
1302
1303int amdgpu_vcn_sysfs_reset_mask_init(struct amdgpu_device *adev)
1304{
1305	int r = 0;
1306
1307	if (adev->vcn.num_vcn_inst) {
1308		r = device_create_file(adev->dev, &dev_attr_vcn_reset_mask);
1309		if (r)
1310			return r;
1311	}
1312
1313	return r;
1314}
1315
1316void amdgpu_vcn_sysfs_reset_mask_fini(struct amdgpu_device *adev)
1317{
1318	if (adev->dev->kobj.sd) {
1319		if (adev->vcn.num_vcn_inst)
1320			device_remove_file(adev->dev, &dev_attr_vcn_reset_mask);
1321	}
1322}