Linux Audio

Check our new training course

Loading...
v6.2
   1/*
   2 * Copyright 2016 Advanced Micro Devices, Inc.
   3 * All Rights Reserved.
   4 *
   5 * Permission is hereby granted, free of charge, to any person obtaining a
   6 * copy of this software and associated documentation files (the
   7 * "Software"), to deal in the Software without restriction, including
   8 * without limitation the rights to use, copy, modify, merge, publish,
   9 * distribute, sub license, and/or sell copies of the Software, and to
  10 * permit persons to whom the Software is furnished to do so, subject to
  11 * the following conditions:
  12 *
  13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  20 *
  21 * The above copyright notice and this permission notice (including the
  22 * next paragraph) shall be included in all copies or substantial portions
  23 * of the Software.
  24 *
  25 */
  26
  27#include <linux/firmware.h>
  28#include <linux/module.h>
  29#include <linux/pci.h>
  30#include <linux/debugfs.h>
  31#include <drm/drm_drv.h>
  32
  33#include "amdgpu.h"
  34#include "amdgpu_pm.h"
  35#include "amdgpu_vcn.h"
  36#include "soc15d.h"
 
 
 
 
 
 
 
  37
  38/* Firmware Names */
  39#define FIRMWARE_RAVEN		"amdgpu/raven_vcn.bin"
  40#define FIRMWARE_PICASSO	"amdgpu/picasso_vcn.bin"
  41#define FIRMWARE_RAVEN2		"amdgpu/raven2_vcn.bin"
  42#define FIRMWARE_ARCTURUS	"amdgpu/arcturus_vcn.bin"
  43#define FIRMWARE_RENOIR		"amdgpu/renoir_vcn.bin"
  44#define FIRMWARE_GREEN_SARDINE	"amdgpu/green_sardine_vcn.bin"
  45#define FIRMWARE_NAVI10		"amdgpu/navi10_vcn.bin"
  46#define FIRMWARE_NAVI14		"amdgpu/navi14_vcn.bin"
  47#define FIRMWARE_NAVI12		"amdgpu/navi12_vcn.bin"
  48#define FIRMWARE_SIENNA_CICHLID	"amdgpu/sienna_cichlid_vcn.bin"
  49#define FIRMWARE_NAVY_FLOUNDER	"amdgpu/navy_flounder_vcn.bin"
  50#define FIRMWARE_VANGOGH	"amdgpu/vangogh_vcn.bin"
  51#define FIRMWARE_DIMGREY_CAVEFISH	"amdgpu/dimgrey_cavefish_vcn.bin"
  52#define FIRMWARE_ALDEBARAN	"amdgpu/aldebaran_vcn.bin"
  53#define FIRMWARE_BEIGE_GOBY	"amdgpu/beige_goby_vcn.bin"
  54#define FIRMWARE_YELLOW_CARP	"amdgpu/yellow_carp_vcn.bin"
  55#define FIRMWARE_VCN_3_1_2	"amdgpu/vcn_3_1_2.bin"
  56#define FIRMWARE_VCN4_0_0	"amdgpu/vcn_4_0_0.bin"
  57#define FIRMWARE_VCN4_0_2	"amdgpu/vcn_4_0_2.bin"
  58#define FIRMWARE_VCN4_0_4      "amdgpu/vcn_4_0_4.bin"
  59
  60MODULE_FIRMWARE(FIRMWARE_RAVEN);
  61MODULE_FIRMWARE(FIRMWARE_PICASSO);
  62MODULE_FIRMWARE(FIRMWARE_RAVEN2);
  63MODULE_FIRMWARE(FIRMWARE_ARCTURUS);
  64MODULE_FIRMWARE(FIRMWARE_RENOIR);
  65MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE);
  66MODULE_FIRMWARE(FIRMWARE_ALDEBARAN);
  67MODULE_FIRMWARE(FIRMWARE_NAVI10);
  68MODULE_FIRMWARE(FIRMWARE_NAVI14);
  69MODULE_FIRMWARE(FIRMWARE_NAVI12);
  70MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID);
  71MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER);
  72MODULE_FIRMWARE(FIRMWARE_VANGOGH);
  73MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH);
  74MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY);
  75MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP);
  76MODULE_FIRMWARE(FIRMWARE_VCN_3_1_2);
  77MODULE_FIRMWARE(FIRMWARE_VCN4_0_0);
  78MODULE_FIRMWARE(FIRMWARE_VCN4_0_2);
  79MODULE_FIRMWARE(FIRMWARE_VCN4_0_4);
  80
  81static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
  82
  83int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
  84{
  85	unsigned long bo_size;
  86	const char *fw_name;
  87	const struct common_firmware_header *hdr;
  88	unsigned char fw_check;
  89	unsigned int fw_shared_size, log_offset;
  90	int i, r;
  91
  92	INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler);
  93	mutex_init(&adev->vcn.vcn_pg_lock);
  94	mutex_init(&adev->vcn.vcn1_jpeg1_workaround);
  95	atomic_set(&adev->vcn.total_submission_cnt, 0);
  96	for (i = 0; i < adev->vcn.num_vcn_inst; i++)
  97		atomic_set(&adev->vcn.inst[i].dpg_enc_submission_cnt, 0);
  98
  99	switch (adev->ip_versions[UVD_HWIP][0]) {
 100	case IP_VERSION(1, 0, 0):
 101	case IP_VERSION(1, 0, 1):
 102		if (adev->apu_flags & AMD_APU_IS_RAVEN2)
 103			fw_name = FIRMWARE_RAVEN2;
 104		else if (adev->apu_flags & AMD_APU_IS_PICASSO)
 105			fw_name = FIRMWARE_PICASSO;
 106		else
 107			fw_name = FIRMWARE_RAVEN;
 108		break;
 109	case IP_VERSION(2, 5, 0):
 110		fw_name = FIRMWARE_ARCTURUS;
 111		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
 112		    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
 113			adev->vcn.indirect_sram = true;
 114		break;
 115	case IP_VERSION(2, 2, 0):
 116		if (adev->apu_flags & AMD_APU_IS_RENOIR)
 117			fw_name = FIRMWARE_RENOIR;
 118		else
 119			fw_name = FIRMWARE_GREEN_SARDINE;
 120
 121		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
 122		    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
 123			adev->vcn.indirect_sram = true;
 124		break;
 125	case IP_VERSION(2, 6, 0):
 126		fw_name = FIRMWARE_ALDEBARAN;
 127		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
 128		    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
 129			adev->vcn.indirect_sram = true;
 130		break;
 131	case IP_VERSION(2, 0, 0):
 132		fw_name = FIRMWARE_NAVI10;
 133		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
 134		    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
 135			adev->vcn.indirect_sram = true;
 136		break;
 137	case IP_VERSION(2, 0, 2):
 138		if (adev->asic_type == CHIP_NAVI12)
 139			fw_name = FIRMWARE_NAVI12;
 140		else
 141			fw_name = FIRMWARE_NAVI14;
 142		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
 143		    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
 144			adev->vcn.indirect_sram = true;
 145		break;
 146	case IP_VERSION(3, 0, 0):
 147	case IP_VERSION(3, 0, 64):
 148	case IP_VERSION(3, 0, 192):
 149		if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0))
 150			fw_name = FIRMWARE_SIENNA_CICHLID;
 151		else
 152			fw_name = FIRMWARE_NAVY_FLOUNDER;
 153		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
 154		    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
 155			adev->vcn.indirect_sram = true;
 156		break;
 157	case IP_VERSION(3, 0, 2):
 158		fw_name = FIRMWARE_VANGOGH;
 159		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
 160		    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
 161			adev->vcn.indirect_sram = true;
 162		break;
 163	case IP_VERSION(3, 0, 16):
 164		fw_name = FIRMWARE_DIMGREY_CAVEFISH;
 165		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
 166		    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
 167			adev->vcn.indirect_sram = true;
 168		break;
 169	case IP_VERSION(3, 0, 33):
 170		fw_name = FIRMWARE_BEIGE_GOBY;
 171		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
 172		    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
 173			adev->vcn.indirect_sram = true;
 174		break;
 175	case IP_VERSION(3, 1, 1):
 176		fw_name = FIRMWARE_YELLOW_CARP;
 177		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
 178		    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
 179			adev->vcn.indirect_sram = true;
 180		break;
 181	case IP_VERSION(3, 1, 2):
 182		fw_name = FIRMWARE_VCN_3_1_2;
 183		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
 184		    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
 185			adev->vcn.indirect_sram = true;
 186		break;
 187	case IP_VERSION(4, 0, 0):
 188		fw_name = FIRMWARE_VCN4_0_0;
 189		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
 190			(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
 191			adev->vcn.indirect_sram = true;
 192		break;
 193	case IP_VERSION(4, 0, 2):
 194		fw_name = FIRMWARE_VCN4_0_2;
 195		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
 196			(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
 197			adev->vcn.indirect_sram = true;
 198		break;
 199	case IP_VERSION(4, 0, 4):
 200		fw_name = FIRMWARE_VCN4_0_4;
 201		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
 202			(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
 203			adev->vcn.indirect_sram = true;
 204		break;
 205	default:
 206		return -EINVAL;
 207	}
 208
 209	r = request_firmware(&adev->vcn.fw, fw_name, adev->dev);
 210	if (r) {
 211		dev_err(adev->dev, "amdgpu_vcn: Can't load firmware \"%s\"\n",
 212			fw_name);
 213		return r;
 214	}
 215
 216	r = amdgpu_ucode_validate(adev->vcn.fw);
 217	if (r) {
 218		dev_err(adev->dev, "amdgpu_vcn: Can't validate firmware \"%s\"\n",
 219			fw_name);
 220		release_firmware(adev->vcn.fw);
 221		adev->vcn.fw = NULL;
 222		return r;
 223	}
 224
 225	hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
 226	adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version);
 227
 228	/* Bit 20-23, it is encode major and non-zero for new naming convention.
 229	 * This field is part of version minor and DRM_DISABLED_FLAG in old naming
 230	 * convention. Since the l:wq!atest version minor is 0x5B and DRM_DISABLED_FLAG
 231	 * is zero in old naming convention, this field is always zero so far.
 232	 * These four bits are used to tell which naming convention is present.
 233	 */
 234	fw_check = (le32_to_cpu(hdr->ucode_version) >> 20) & 0xf;
 235	if (fw_check) {
 236		unsigned int dec_ver, enc_major, enc_minor, vep, fw_rev;
 237
 238		fw_rev = le32_to_cpu(hdr->ucode_version) & 0xfff;
 239		enc_minor = (le32_to_cpu(hdr->ucode_version) >> 12) & 0xff;
 240		enc_major = fw_check;
 241		dec_ver = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xf;
 242		vep = (le32_to_cpu(hdr->ucode_version) >> 28) & 0xf;
 243		DRM_INFO("Found VCN firmware Version ENC: %u.%u DEC: %u VEP: %u Revision: %u\n",
 244			enc_major, enc_minor, dec_ver, vep, fw_rev);
 245	} else {
 246		unsigned int version_major, version_minor, family_id;
 247
 248		family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
 249		version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
 250		version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
 251		DRM_INFO("Found VCN firmware Version: %u.%u Family ID: %u\n",
 252			version_major, version_minor, family_id);
 253	}
 254
 255	bo_size = AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_CONTEXT_SIZE;
 256	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
 257		bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
 258
 259	if (adev->ip_versions[UVD_HWIP][0] >= IP_VERSION(4, 0, 0)){
 260		fw_shared_size = AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared));
 261		log_offset = offsetof(struct amdgpu_vcn4_fw_shared, fw_log);
 262	} else {
 263		fw_shared_size = AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared));
 264		log_offset = offsetof(struct amdgpu_fw_shared, fw_log);
 265	}
 266
 267	bo_size += fw_shared_size;
 268
 269	if (amdgpu_vcnfw_log)
 270		bo_size += AMDGPU_VCNFW_LOG_SIZE;
 271
 272	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
 273		if (adev->vcn.harvest_config & (1 << i))
 274			continue;
 275
 276		r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
 277						AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.inst[i].vcpu_bo,
 278						&adev->vcn.inst[i].gpu_addr, &adev->vcn.inst[i].cpu_addr);
 279		if (r) {
 280			dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", r);
 281			return r;
 282		}
 
 283
 284		adev->vcn.inst[i].fw_shared.cpu_addr = adev->vcn.inst[i].cpu_addr +
 285				bo_size - fw_shared_size;
 286		adev->vcn.inst[i].fw_shared.gpu_addr = adev->vcn.inst[i].gpu_addr +
 287				bo_size - fw_shared_size;
 288
 289		adev->vcn.inst[i].fw_shared.mem_size = fw_shared_size;
 290
 291		if (amdgpu_vcnfw_log) {
 292			adev->vcn.inst[i].fw_shared.cpu_addr -= AMDGPU_VCNFW_LOG_SIZE;
 293			adev->vcn.inst[i].fw_shared.gpu_addr -= AMDGPU_VCNFW_LOG_SIZE;
 294			adev->vcn.inst[i].fw_shared.log_offset = log_offset;
 295		}
 296
 297		if (adev->vcn.indirect_sram) {
 298			r = amdgpu_bo_create_kernel(adev, 64 * 2 * 4, PAGE_SIZE,
 299					AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.inst[i].dpg_sram_bo,
 300					&adev->vcn.inst[i].dpg_sram_gpu_addr, &adev->vcn.inst[i].dpg_sram_cpu_addr);
 301			if (r) {
 302				dev_err(adev->dev, "VCN %d (%d) failed to allocate DPG bo\n", i, r);
 303				return r;
 304			}
 305		}
 306	}
 307
 308	return 0;
 309}
 310
 311int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
 312{
 313	int i, j;
 314
 
 
 
 
 
 
 315	for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
 316		if (adev->vcn.harvest_config & (1 << j))
 317			continue;
 318
 319		if (adev->vcn.indirect_sram) {
 320			amdgpu_bo_free_kernel(&adev->vcn.inst[j].dpg_sram_bo,
 321						  &adev->vcn.inst[j].dpg_sram_gpu_addr,
 322						  (void **)&adev->vcn.inst[j].dpg_sram_cpu_addr);
 323		}
 324		kvfree(adev->vcn.inst[j].saved_bo);
 325
 326		amdgpu_bo_free_kernel(&adev->vcn.inst[j].vcpu_bo,
 327					  &adev->vcn.inst[j].gpu_addr,
 328					  (void **)&adev->vcn.inst[j].cpu_addr);
 329
 330		amdgpu_ring_fini(&adev->vcn.inst[j].ring_dec);
 331
 332		for (i = 0; i < adev->vcn.num_enc_rings; ++i)
 333			amdgpu_ring_fini(&adev->vcn.inst[j].ring_enc[i]);
 
 
 334	}
 335
 336	release_firmware(adev->vcn.fw);
 337	mutex_destroy(&adev->vcn.vcn1_jpeg1_workaround);
 338	mutex_destroy(&adev->vcn.vcn_pg_lock);
 339
 340	return 0;
 341}
 342
 343/* from vcn4 and above, only unified queue is used */
 344static bool amdgpu_vcn_using_unified_queue(struct amdgpu_ring *ring)
 345{
 346	struct amdgpu_device *adev = ring->adev;
 347	bool ret = false;
 348
 349	if (adev->ip_versions[UVD_HWIP][0] >= IP_VERSION(4, 0, 0))
 350		ret = true;
 351
 352	return ret;
 353}
 354
 355bool amdgpu_vcn_is_disabled_vcn(struct amdgpu_device *adev, enum vcn_ring_type type, uint32_t vcn_instance)
 356{
 357	bool ret = false;
 358	int vcn_config = adev->vcn.vcn_config[vcn_instance];
 359
 360	if ((type == VCN_ENCODE_RING) && (vcn_config & VCN_BLOCK_ENCODE_DISABLE_MASK)) {
 361		ret = true;
 362	} else if ((type == VCN_DECODE_RING) && (vcn_config & VCN_BLOCK_DECODE_DISABLE_MASK)) {
 363		ret = true;
 364	} else if ((type == VCN_UNIFIED_RING) && (vcn_config & VCN_BLOCK_QUEUE_DISABLE_MASK)) {
 365		ret = true;
 366	}
 367
 368	return ret;
 369}
 370
 371int amdgpu_vcn_suspend(struct amdgpu_device *adev)
 372{
 373	unsigned size;
 374	void *ptr;
 375	int i, idx;
 376
 377	cancel_delayed_work_sync(&adev->vcn.idle_work);
 378
 379	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
 380		if (adev->vcn.harvest_config & (1 << i))
 381			continue;
 382		if (adev->vcn.inst[i].vcpu_bo == NULL)
 383			return 0;
 384
 385		size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo);
 386		ptr = adev->vcn.inst[i].cpu_addr;
 387
 388		adev->vcn.inst[i].saved_bo = kvmalloc(size, GFP_KERNEL);
 389		if (!adev->vcn.inst[i].saved_bo)
 390			return -ENOMEM;
 391
 392		if (drm_dev_enter(adev_to_drm(adev), &idx)) {
 393			memcpy_fromio(adev->vcn.inst[i].saved_bo, ptr, size);
 394			drm_dev_exit(idx);
 395		}
 396	}
 397	return 0;
 398}
 399
 400int amdgpu_vcn_resume(struct amdgpu_device *adev)
 401{
 402	unsigned size;
 403	void *ptr;
 404	int i, idx;
 405
 406	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
 407		if (adev->vcn.harvest_config & (1 << i))
 408			continue;
 409		if (adev->vcn.inst[i].vcpu_bo == NULL)
 410			return -EINVAL;
 411
 412		size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo);
 413		ptr = adev->vcn.inst[i].cpu_addr;
 414
 415		if (adev->vcn.inst[i].saved_bo != NULL) {
 416			if (drm_dev_enter(adev_to_drm(adev), &idx)) {
 417				memcpy_toio(ptr, adev->vcn.inst[i].saved_bo, size);
 418				drm_dev_exit(idx);
 419			}
 420			kvfree(adev->vcn.inst[i].saved_bo);
 421			adev->vcn.inst[i].saved_bo = NULL;
 422		} else {
 423			const struct common_firmware_header *hdr;
 424			unsigned offset;
 425
 426			hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
 427			if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
 428				offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
 429				if (drm_dev_enter(adev_to_drm(adev), &idx)) {
 430					memcpy_toio(adev->vcn.inst[i].cpu_addr, adev->vcn.fw->data + offset,
 431						    le32_to_cpu(hdr->ucode_size_bytes));
 432					drm_dev_exit(idx);
 433				}
 434				size -= le32_to_cpu(hdr->ucode_size_bytes);
 435				ptr += le32_to_cpu(hdr->ucode_size_bytes);
 436			}
 437			memset_io(ptr, 0, size);
 438		}
 439	}
 440	return 0;
 441}
 442
 443static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
 444{
 445	struct amdgpu_device *adev =
 446		container_of(work, struct amdgpu_device, vcn.idle_work.work);
 447	unsigned int fences = 0, fence[AMDGPU_MAX_VCN_INSTANCES] = {0};
 448	unsigned int i, j;
 449	int r = 0;
 450
 451	for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
 452		if (adev->vcn.harvest_config & (1 << j))
 453			continue;
 454
 455		for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
 456			fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_enc[i]);
 457		}
 458
 459		if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)	{
 460			struct dpg_pause_state new_state;
 461
 462			if (fence[j] ||
 463				unlikely(atomic_read(&adev->vcn.inst[j].dpg_enc_submission_cnt)))
 464				new_state.fw_based = VCN_DPG_STATE__PAUSE;
 465			else
 466				new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
 467
 468			adev->vcn.pause_dpg_mode(adev, j, &new_state);
 
 
 
 
 
 469		}
 470
 
 471		fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_dec);
 472		fences += fence[j];
 473	}
 474
 475	if (!fences && !atomic_read(&adev->vcn.total_submission_cnt)) {
 476		amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
 477		       AMD_PG_STATE_GATE);
 478		r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO,
 479				false);
 480		if (r)
 481			dev_warn(adev->dev, "(%d) failed to disable video power profile mode\n", r);
 482	} else {
 483		schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
 484	}
 485}
 486
 487void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
 488{
 489	struct amdgpu_device *adev = ring->adev;
 490	int r = 0;
 491
 492	atomic_inc(&adev->vcn.total_submission_cnt);
 493
 494	if (!cancel_delayed_work_sync(&adev->vcn.idle_work)) {
 495		r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO,
 496				true);
 497		if (r)
 498			dev_warn(adev->dev, "(%d) failed to switch to video power profile mode\n", r);
 499	}
 500
 501	mutex_lock(&adev->vcn.vcn_pg_lock);
 502	amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
 503	       AMD_PG_STATE_UNGATE);
 504
 505	if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)	{
 506		struct dpg_pause_state new_state;
 
 
 507
 508		if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC) {
 509			atomic_inc(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt);
 
 
 510			new_state.fw_based = VCN_DPG_STATE__PAUSE;
 511		} else {
 512			unsigned int fences = 0;
 513			unsigned int i;
 514
 515			for (i = 0; i < adev->vcn.num_enc_rings; ++i)
 516				fences += amdgpu_fence_count_emitted(&adev->vcn.inst[ring->me].ring_enc[i]);
 
 
 517
 518			if (fences || atomic_read(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt))
 519				new_state.fw_based = VCN_DPG_STATE__PAUSE;
 520			else
 521				new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
 522		}
 523
 524		adev->vcn.pause_dpg_mode(adev, ring->me, &new_state);
 525	}
 526	mutex_unlock(&adev->vcn.vcn_pg_lock);
 527}
 528
 529void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring)
 530{
 531	if (ring->adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
 532		ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
 533		atomic_dec(&ring->adev->vcn.inst[ring->me].dpg_enc_submission_cnt);
 534
 535	atomic_dec(&ring->adev->vcn.total_submission_cnt);
 536
 537	schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
 538}
 539
 540int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
 541{
 542	struct amdgpu_device *adev = ring->adev;
 543	uint32_t tmp = 0;
 544	unsigned i;
 545	int r;
 546
 547	/* VCN in SRIOV does not support direct register read/write */
 548	if (amdgpu_sriov_vf(adev))
 549		return 0;
 550
 551	WREG32(adev->vcn.inst[ring->me].external.scratch9, 0xCAFEDEAD);
 552	r = amdgpu_ring_alloc(ring, 3);
 553	if (r)
 554		return r;
 555	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.scratch9, 0));
 556	amdgpu_ring_write(ring, 0xDEADBEEF);
 557	amdgpu_ring_commit(ring);
 558	for (i = 0; i < adev->usec_timeout; i++) {
 559		tmp = RREG32(adev->vcn.inst[ring->me].external.scratch9);
 560		if (tmp == 0xDEADBEEF)
 561			break;
 562		udelay(1);
 563	}
 564
 565	if (i >= adev->usec_timeout)
 566		r = -ETIMEDOUT;
 567
 568	return r;
 569}
 570
 571int amdgpu_vcn_dec_sw_ring_test_ring(struct amdgpu_ring *ring)
 572{
 573	struct amdgpu_device *adev = ring->adev;
 574	uint32_t rptr;
 575	unsigned int i;
 576	int r;
 577
 578	if (amdgpu_sriov_vf(adev))
 579		return 0;
 580
 581	r = amdgpu_ring_alloc(ring, 16);
 582	if (r)
 583		return r;
 584
 585	rptr = amdgpu_ring_get_rptr(ring);
 586
 587	amdgpu_ring_write(ring, VCN_DEC_SW_CMD_END);
 588	amdgpu_ring_commit(ring);
 589
 590	for (i = 0; i < adev->usec_timeout; i++) {
 591		if (amdgpu_ring_get_rptr(ring) != rptr)
 592			break;
 593		udelay(1);
 594	}
 595
 596	if (i >= adev->usec_timeout)
 597		r = -ETIMEDOUT;
 598
 599	return r;
 600}
 601
 602static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
 603				   struct amdgpu_ib *ib_msg,
 604				   struct dma_fence **fence)
 605{
 606	u64 addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
 607	struct amdgpu_device *adev = ring->adev;
 608	struct dma_fence *f = NULL;
 609	struct amdgpu_job *job;
 610	struct amdgpu_ib *ib;
 
 611	int i, r;
 612
 613	r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
 614				     64, AMDGPU_IB_POOL_DIRECT,
 615				     &job);
 616	if (r)
 617		goto err;
 618
 619	ib = &job->ibs[0];
 
 620	ib->ptr[0] = PACKET0(adev->vcn.internal.data0, 0);
 621	ib->ptr[1] = addr;
 622	ib->ptr[2] = PACKET0(adev->vcn.internal.data1, 0);
 623	ib->ptr[3] = addr >> 32;
 624	ib->ptr[4] = PACKET0(adev->vcn.internal.cmd, 0);
 625	ib->ptr[5] = 0;
 626	for (i = 6; i < 16; i += 2) {
 627		ib->ptr[i] = PACKET0(adev->vcn.internal.nop, 0);
 628		ib->ptr[i+1] = 0;
 629	}
 630	ib->length_dw = 16;
 631
 632	r = amdgpu_job_submit_direct(job, ring, &f);
 633	if (r)
 634		goto err_free;
 635
 636	amdgpu_ib_free(adev, ib_msg, f);
 
 
 637
 638	if (fence)
 639		*fence = dma_fence_get(f);
 640	dma_fence_put(f);
 641
 642	return 0;
 643
 644err_free:
 645	amdgpu_job_free(job);
 
 646err:
 647	amdgpu_ib_free(adev, ib_msg, f);
 
 648	return r;
 649}
 650
 651static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
 652		struct amdgpu_ib *ib)
 653{
 654	struct amdgpu_device *adev = ring->adev;
 
 655	uint32_t *msg;
 656	int r, i;
 657
 658	memset(ib, 0, sizeof(*ib));
 659	r = amdgpu_ib_get(adev, NULL, AMDGPU_GPU_PAGE_SIZE * 2,
 660			AMDGPU_IB_POOL_DIRECT,
 661			ib);
 662	if (r)
 663		return r;
 664
 665	msg = (uint32_t *)AMDGPU_GPU_PAGE_ALIGN((unsigned long)ib->ptr);
 666	msg[0] = cpu_to_le32(0x00000028);
 667	msg[1] = cpu_to_le32(0x00000038);
 668	msg[2] = cpu_to_le32(0x00000001);
 669	msg[3] = cpu_to_le32(0x00000000);
 670	msg[4] = cpu_to_le32(handle);
 671	msg[5] = cpu_to_le32(0x00000000);
 672	msg[6] = cpu_to_le32(0x00000001);
 673	msg[7] = cpu_to_le32(0x00000028);
 674	msg[8] = cpu_to_le32(0x00000010);
 675	msg[9] = cpu_to_le32(0x00000000);
 676	msg[10] = cpu_to_le32(0x00000007);
 677	msg[11] = cpu_to_le32(0x00000000);
 678	msg[12] = cpu_to_le32(0x00000780);
 679	msg[13] = cpu_to_le32(0x00000440);
 680	for (i = 14; i < 1024; ++i)
 681		msg[i] = cpu_to_le32(0x0);
 682
 683	return 0;
 684}
 685
 686static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
 687					  struct amdgpu_ib *ib)
 688{
 689	struct amdgpu_device *adev = ring->adev;
 
 690	uint32_t *msg;
 691	int r, i;
 692
 693	memset(ib, 0, sizeof(*ib));
 694	r = amdgpu_ib_get(adev, NULL, AMDGPU_GPU_PAGE_SIZE * 2,
 695			AMDGPU_IB_POOL_DIRECT,
 696			ib);
 697	if (r)
 698		return r;
 699
 700	msg = (uint32_t *)AMDGPU_GPU_PAGE_ALIGN((unsigned long)ib->ptr);
 701	msg[0] = cpu_to_le32(0x00000028);
 702	msg[1] = cpu_to_le32(0x00000018);
 703	msg[2] = cpu_to_le32(0x00000000);
 704	msg[3] = cpu_to_le32(0x00000002);
 705	msg[4] = cpu_to_le32(handle);
 706	msg[5] = cpu_to_le32(0x00000000);
 707	for (i = 6; i < 1024; ++i)
 708		msg[i] = cpu_to_le32(0x0);
 709
 710	return 0;
 711}
 712
 713int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 714{
 715	struct dma_fence *fence = NULL;
 716	struct amdgpu_ib ib;
 717	long r;
 718
 719	r = amdgpu_vcn_dec_get_create_msg(ring, 1, &ib);
 720	if (r)
 721		goto error;
 722
 723	r = amdgpu_vcn_dec_send_msg(ring, &ib, NULL);
 724	if (r)
 725		goto error;
 726	r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &ib);
 727	if (r)
 728		goto error;
 729
 730	r = amdgpu_vcn_dec_send_msg(ring, &ib, &fence);
 731	if (r)
 732		goto error;
 733
 734	r = dma_fence_wait_timeout(fence, false, timeout);
 735	if (r == 0)
 736		r = -ETIMEDOUT;
 737	else if (r > 0)
 738		r = 0;
 739
 740	dma_fence_put(fence);
 741error:
 742	return r;
 743}
 744
 745static uint32_t *amdgpu_vcn_unified_ring_ib_header(struct amdgpu_ib *ib,
 746						uint32_t ib_pack_in_dw, bool enc)
 747{
 748	uint32_t *ib_checksum;
 749
 750	ib->ptr[ib->length_dw++] = 0x00000010; /* single queue checksum */
 751	ib->ptr[ib->length_dw++] = 0x30000002;
 752	ib_checksum = &ib->ptr[ib->length_dw++];
 753	ib->ptr[ib->length_dw++] = ib_pack_in_dw;
 754
 755	ib->ptr[ib->length_dw++] = 0x00000010; /* engine info */
 756	ib->ptr[ib->length_dw++] = 0x30000001;
 757	ib->ptr[ib->length_dw++] = enc ? 0x2 : 0x3;
 758	ib->ptr[ib->length_dw++] = ib_pack_in_dw * sizeof(uint32_t);
 759
 760	return ib_checksum;
 761}
 762
 763static void amdgpu_vcn_unified_ring_ib_checksum(uint32_t **ib_checksum,
 764						uint32_t ib_pack_in_dw)
 765{
 766	uint32_t i;
 767	uint32_t checksum = 0;
 768
 769	for (i = 0; i < ib_pack_in_dw; i++)
 770		checksum += *(*ib_checksum + 2 + i);
 771
 772	**ib_checksum = checksum;
 773}
 774
 775static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
 776				      struct amdgpu_ib *ib_msg,
 777				      struct dma_fence **fence)
 778{
 779	struct amdgpu_vcn_decode_buffer *decode_buffer = NULL;
 780	unsigned int ib_size_dw = 64;
 781	struct amdgpu_device *adev = ring->adev;
 782	struct dma_fence *f = NULL;
 783	struct amdgpu_job *job;
 784	struct amdgpu_ib *ib;
 785	uint64_t addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
 786	bool sq = amdgpu_vcn_using_unified_queue(ring);
 787	uint32_t *ib_checksum;
 788	uint32_t ib_pack_in_dw;
 789	int i, r;
 790
 791	if (sq)
 792		ib_size_dw += 8;
 793
 794	r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
 795				     ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
 796				     &job);
 797	if (r)
 798		goto err;
 799
 800	ib = &job->ibs[0];
 801	ib->length_dw = 0;
 802
 803	/* single queue headers */
 804	if (sq) {
 805		ib_pack_in_dw = sizeof(struct amdgpu_vcn_decode_buffer) / sizeof(uint32_t)
 806						+ 4 + 2; /* engine info + decoding ib in dw */
 807		ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, ib_pack_in_dw, false);
 808	}
 809
 810	ib->ptr[ib->length_dw++] = sizeof(struct amdgpu_vcn_decode_buffer) + 8;
 811	ib->ptr[ib->length_dw++] = cpu_to_le32(AMDGPU_VCN_IB_FLAG_DECODE_BUFFER);
 812	decode_buffer = (struct amdgpu_vcn_decode_buffer *)&(ib->ptr[ib->length_dw]);
 813	ib->length_dw += sizeof(struct amdgpu_vcn_decode_buffer) / 4;
 814	memset(decode_buffer, 0, sizeof(struct amdgpu_vcn_decode_buffer));
 815
 816	decode_buffer->valid_buf_flag |= cpu_to_le32(AMDGPU_VCN_CMD_FLAG_MSG_BUFFER);
 817	decode_buffer->msg_buffer_address_hi = cpu_to_le32(addr >> 32);
 818	decode_buffer->msg_buffer_address_lo = cpu_to_le32(addr);
 819
 820	for (i = ib->length_dw; i < ib_size_dw; ++i)
 821		ib->ptr[i] = 0x0;
 822
 823	if (sq)
 824		amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, ib_pack_in_dw);
 825
 826	r = amdgpu_job_submit_direct(job, ring, &f);
 827	if (r)
 828		goto err_free;
 829
 830	amdgpu_ib_free(adev, ib_msg, f);
 831
 832	if (fence)
 833		*fence = dma_fence_get(f);
 834	dma_fence_put(f);
 835
 836	return 0;
 837
 838err_free:
 839	amdgpu_job_free(job);
 840err:
 841	amdgpu_ib_free(adev, ib_msg, f);
 842	return r;
 843}
 844
 845int amdgpu_vcn_dec_sw_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 846{
 847	struct dma_fence *fence = NULL;
 848	struct amdgpu_ib ib;
 849	long r;
 850
 851	r = amdgpu_vcn_dec_get_create_msg(ring, 1, &ib);
 852	if (r)
 853		goto error;
 854
 855	r = amdgpu_vcn_dec_sw_send_msg(ring, &ib, NULL);
 856	if (r)
 857		goto error;
 858	r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &ib);
 859	if (r)
 860		goto error;
 861
 862	r = amdgpu_vcn_dec_sw_send_msg(ring, &ib, &fence);
 863	if (r)
 864		goto error;
 865
 866	r = dma_fence_wait_timeout(fence, false, timeout);
 867	if (r == 0)
 868		r = -ETIMEDOUT;
 869	else if (r > 0)
 870		r = 0;
 871
 872	dma_fence_put(fence);
 873error:
 874	return r;
 875}
 876
 877int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
 878{
 879	struct amdgpu_device *adev = ring->adev;
 880	uint32_t rptr;
 881	unsigned i;
 882	int r;
 883
 884	if (amdgpu_sriov_vf(adev))
 885		return 0;
 886
 887	r = amdgpu_ring_alloc(ring, 16);
 888	if (r)
 889		return r;
 890
 891	rptr = amdgpu_ring_get_rptr(ring);
 892
 893	amdgpu_ring_write(ring, VCN_ENC_CMD_END);
 894	amdgpu_ring_commit(ring);
 895
 896	for (i = 0; i < adev->usec_timeout; i++) {
 897		if (amdgpu_ring_get_rptr(ring) != rptr)
 898			break;
 899		udelay(1);
 900	}
 901
 902	if (i >= adev->usec_timeout)
 903		r = -ETIMEDOUT;
 904
 905	return r;
 906}
 907
 908static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
 909					 struct amdgpu_ib *ib_msg,
 910					 struct dma_fence **fence)
 911{
 912	unsigned int ib_size_dw = 16;
 913	struct amdgpu_job *job;
 914	struct amdgpu_ib *ib;
 915	struct dma_fence *f = NULL;
 916	uint32_t *ib_checksum = NULL;
 917	uint64_t addr;
 918	bool sq = amdgpu_vcn_using_unified_queue(ring);
 919	int i, r;
 920
 921	if (sq)
 922		ib_size_dw += 8;
 923
 924	r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
 925				     ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
 926				     &job);
 927	if (r)
 928		return r;
 929
 930	ib = &job->ibs[0];
 931	addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
 932
 933	ib->length_dw = 0;
 934
 935	if (sq)
 936		ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, 0x11, true);
 937
 938	ib->ptr[ib->length_dw++] = 0x00000018;
 939	ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
 940	ib->ptr[ib->length_dw++] = handle;
 941	ib->ptr[ib->length_dw++] = upper_32_bits(addr);
 942	ib->ptr[ib->length_dw++] = addr;
 943	ib->ptr[ib->length_dw++] = 0x0000000b;
 944
 945	ib->ptr[ib->length_dw++] = 0x00000014;
 946	ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
 947	ib->ptr[ib->length_dw++] = 0x0000001c;
 948	ib->ptr[ib->length_dw++] = 0x00000000;
 949	ib->ptr[ib->length_dw++] = 0x00000000;
 950
 951	ib->ptr[ib->length_dw++] = 0x00000008;
 952	ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */
 953
 954	for (i = ib->length_dw; i < ib_size_dw; ++i)
 955		ib->ptr[i] = 0x0;
 956
 957	if (sq)
 958		amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, 0x11);
 959
 960	r = amdgpu_job_submit_direct(job, ring, &f);
 961	if (r)
 962		goto err;
 963
 964	if (fence)
 965		*fence = dma_fence_get(f);
 966	dma_fence_put(f);
 967
 968	return 0;
 969
 970err:
 971	amdgpu_job_free(job);
 972	return r;
 973}
 974
 975static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
 976					  struct amdgpu_ib *ib_msg,
 977					  struct dma_fence **fence)
 978{
 979	unsigned int ib_size_dw = 16;
 980	struct amdgpu_job *job;
 981	struct amdgpu_ib *ib;
 982	struct dma_fence *f = NULL;
 983	uint32_t *ib_checksum = NULL;
 984	uint64_t addr;
 985	bool sq = amdgpu_vcn_using_unified_queue(ring);
 986	int i, r;
 987
 988	if (sq)
 989		ib_size_dw += 8;
 990
 991	r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
 992				     ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
 993				     &job);
 994	if (r)
 995		return r;
 996
 997	ib = &job->ibs[0];
 998	addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
 999
1000	ib->length_dw = 0;
1001
1002	if (sq)
1003		ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, 0x11, true);
1004
1005	ib->ptr[ib->length_dw++] = 0x00000018;
1006	ib->ptr[ib->length_dw++] = 0x00000001;
1007	ib->ptr[ib->length_dw++] = handle;
1008	ib->ptr[ib->length_dw++] = upper_32_bits(addr);
1009	ib->ptr[ib->length_dw++] = addr;
1010	ib->ptr[ib->length_dw++] = 0x0000000b;
1011
1012	ib->ptr[ib->length_dw++] = 0x00000014;
1013	ib->ptr[ib->length_dw++] = 0x00000002;
1014	ib->ptr[ib->length_dw++] = 0x0000001c;
1015	ib->ptr[ib->length_dw++] = 0x00000000;
1016	ib->ptr[ib->length_dw++] = 0x00000000;
1017
1018	ib->ptr[ib->length_dw++] = 0x00000008;
1019	ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */
1020
1021	for (i = ib->length_dw; i < ib_size_dw; ++i)
1022		ib->ptr[i] = 0x0;
1023
1024	if (sq)
1025		amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, 0x11);
1026
1027	r = amdgpu_job_submit_direct(job, ring, &f);
1028	if (r)
1029		goto err;
1030
1031	if (fence)
1032		*fence = dma_fence_get(f);
1033	dma_fence_put(f);
1034
1035	return 0;
1036
1037err:
1038	amdgpu_job_free(job);
1039	return r;
1040}
1041
1042int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
1043{
1044	struct amdgpu_device *adev = ring->adev;
1045	struct dma_fence *fence = NULL;
1046	struct amdgpu_ib ib;
1047	long r;
1048
1049	memset(&ib, 0, sizeof(ib));
1050	r = amdgpu_ib_get(adev, NULL, (128 << 10) + AMDGPU_GPU_PAGE_SIZE,
1051			AMDGPU_IB_POOL_DIRECT,
1052			&ib);
1053	if (r)
1054		return r;
1055
1056	r = amdgpu_vcn_enc_get_create_msg(ring, 1, &ib, NULL);
1057	if (r)
1058		goto error;
1059
1060	r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, &ib, &fence);
1061	if (r)
1062		goto error;
1063
1064	r = dma_fence_wait_timeout(fence, false, timeout);
1065	if (r == 0)
1066		r = -ETIMEDOUT;
1067	else if (r > 0)
1068		r = 0;
1069
1070error:
1071	amdgpu_ib_free(adev, &ib, fence);
1072	dma_fence_put(fence);
1073
 
1074	return r;
1075}
1076
1077int amdgpu_vcn_unified_ring_test_ib(struct amdgpu_ring *ring, long timeout)
1078{
1079	long r;
 
 
 
1080
1081	r = amdgpu_vcn_enc_ring_test_ib(ring, timeout);
 
1082	if (r)
1083		goto error;
1084
1085	r =  amdgpu_vcn_dec_sw_ring_test_ib(ring, timeout);
1086
1087error:
1088	return r;
1089}
1090
1091enum amdgpu_ring_priority_level amdgpu_vcn_get_enc_ring_prio(int ring)
1092{
1093	switch(ring) {
1094	case 0:
1095		return AMDGPU_RING_PRIO_0;
1096	case 1:
1097		return AMDGPU_RING_PRIO_1;
1098	case 2:
1099		return AMDGPU_RING_PRIO_2;
1100	default:
1101		return AMDGPU_RING_PRIO_0;
1102	}
1103}
1104
1105void amdgpu_vcn_setup_ucode(struct amdgpu_device *adev)
1106{
1107	int i;
1108	unsigned int idx;
1109
1110	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1111		const struct common_firmware_header *hdr;
1112		hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
1113
1114		for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
1115			if (adev->vcn.harvest_config & (1 << i))
1116				continue;
1117			/* currently only support 2 FW instances */
1118			if (i >= 2) {
1119				dev_info(adev->dev, "More then 2 VCN FW instances!\n");
1120				break;
1121			}
1122			idx = AMDGPU_UCODE_ID_VCN + i;
1123			adev->firmware.ucode[idx].ucode_id = idx;
1124			adev->firmware.ucode[idx].fw = adev->vcn.fw;
1125			adev->firmware.fw_size +=
1126				ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
1127		}
1128		dev_info(adev->dev, "Will use PSP to load VCN firmware\n");
1129	}
1130}
1131
1132/*
1133 * debugfs for mapping vcn firmware log buffer.
1134 */
1135#if defined(CONFIG_DEBUG_FS)
1136static ssize_t amdgpu_debugfs_vcn_fwlog_read(struct file *f, char __user *buf,
1137                                             size_t size, loff_t *pos)
1138{
1139	struct amdgpu_vcn_inst *vcn;
1140	void *log_buf;
1141	volatile struct amdgpu_vcn_fwlog *plog;
1142	unsigned int read_pos, write_pos, available, i, read_bytes = 0;
1143	unsigned int read_num[2] = {0};
1144
1145	vcn = file_inode(f)->i_private;
1146	if (!vcn)
1147		return -ENODEV;
1148
1149	if (!vcn->fw_shared.cpu_addr || !amdgpu_vcnfw_log)
1150		return -EFAULT;
1151
1152	log_buf = vcn->fw_shared.cpu_addr + vcn->fw_shared.mem_size;
1153
1154	plog = (volatile struct amdgpu_vcn_fwlog *)log_buf;
1155	read_pos = plog->rptr;
1156	write_pos = plog->wptr;
1157
1158	if (read_pos > AMDGPU_VCNFW_LOG_SIZE || write_pos > AMDGPU_VCNFW_LOG_SIZE)
1159		return -EFAULT;
1160
1161	if (!size || (read_pos == write_pos))
1162		return 0;
1163
1164	if (write_pos > read_pos) {
1165		available = write_pos - read_pos;
1166		read_num[0] = min(size, (size_t)available);
1167	} else {
1168		read_num[0] = AMDGPU_VCNFW_LOG_SIZE - read_pos;
1169		available = read_num[0] + write_pos - plog->header_size;
1170		if (size > available)
1171			read_num[1] = write_pos - plog->header_size;
1172		else if (size > read_num[0])
1173			read_num[1] = size - read_num[0];
1174		else
1175			read_num[0] = size;
1176	}
1177
1178	for (i = 0; i < 2; i++) {
1179		if (read_num[i]) {
1180			if (read_pos == AMDGPU_VCNFW_LOG_SIZE)
1181				read_pos = plog->header_size;
1182			if (read_num[i] == copy_to_user((buf + read_bytes),
1183			                                (log_buf + read_pos), read_num[i]))
1184				return -EFAULT;
1185
1186			read_bytes += read_num[i];
1187			read_pos += read_num[i];
1188		}
 
 
 
 
1189	}
 
1190
1191	plog->rptr = read_pos;
1192	*pos += read_bytes;
1193	return read_bytes;
1194}
1195
1196static const struct file_operations amdgpu_debugfs_vcnfwlog_fops = {
1197	.owner = THIS_MODULE,
1198	.read = amdgpu_debugfs_vcn_fwlog_read,
1199	.llseek = default_llseek
1200};
1201#endif
1202
1203void amdgpu_debugfs_vcn_fwlog_init(struct amdgpu_device *adev, uint8_t i,
1204                                   struct amdgpu_vcn_inst *vcn)
1205{
1206#if defined(CONFIG_DEBUG_FS)
1207	struct drm_minor *minor = adev_to_drm(adev)->primary;
1208	struct dentry *root = minor->debugfs_root;
1209	char name[32];
1210
1211	sprintf(name, "amdgpu_vcn_%d_fwlog", i);
1212	debugfs_create_file_size(name, S_IFREG | S_IRUGO, root, vcn,
1213				 &amdgpu_debugfs_vcnfwlog_fops,
1214				 AMDGPU_VCNFW_LOG_SIZE);
1215#endif
1216}
1217
1218void amdgpu_vcn_fwlog_init(struct amdgpu_vcn_inst *vcn)
1219{
1220#if defined(CONFIG_DEBUG_FS)
1221	volatile uint32_t *flag = vcn->fw_shared.cpu_addr;
1222	void *fw_log_cpu_addr = vcn->fw_shared.cpu_addr + vcn->fw_shared.mem_size;
1223	uint64_t fw_log_gpu_addr = vcn->fw_shared.gpu_addr + vcn->fw_shared.mem_size;
1224	volatile struct amdgpu_vcn_fwlog *log_buf = fw_log_cpu_addr;
1225	volatile struct amdgpu_fw_shared_fw_logging *fw_log = vcn->fw_shared.cpu_addr
1226                                                         + vcn->fw_shared.log_offset;
1227	*flag |= cpu_to_le32(AMDGPU_VCN_FW_LOGGING_FLAG);
1228	fw_log->is_enabled = 1;
1229	fw_log->addr_lo = cpu_to_le32(fw_log_gpu_addr & 0xFFFFFFFF);
1230	fw_log->addr_hi = cpu_to_le32(fw_log_gpu_addr >> 32);
1231	fw_log->size = cpu_to_le32(AMDGPU_VCNFW_LOG_SIZE);
1232
1233	log_buf->header_size = sizeof(struct amdgpu_vcn_fwlog);
1234	log_buf->buffer_size = AMDGPU_VCNFW_LOG_SIZE;
1235	log_buf->rptr = log_buf->header_size;
1236	log_buf->wptr = log_buf->header_size;
1237	log_buf->wrapped = 0;
1238#endif
1239}
1240
1241int amdgpu_vcn_process_poison_irq(struct amdgpu_device *adev,
1242				struct amdgpu_irq_src *source,
1243				struct amdgpu_iv_entry *entry)
1244{
1245	struct ras_common_if *ras_if = adev->vcn.ras_if;
1246	struct ras_dispatch_if ih_data = {
1247		.entry = entry,
1248	};
1249
1250	if (!ras_if)
1251		return 0;
1252
1253	ih_data.head = *ras_if;
1254	amdgpu_ras_interrupt_dispatch(adev, &ih_data);
 
1255
1256	return 0;
1257}
 
 
 
 
 
 
 
1258
1259void amdgpu_vcn_set_ras_funcs(struct amdgpu_device *adev)
1260{
1261	if (!adev->vcn.ras)
1262		return;
 
 
1263
1264	amdgpu_ras_register_ras_block(adev, &adev->vcn.ras->ras_block);
 
1265
1266	strcpy(adev->vcn.ras->ras_block.ras_comm.name, "vcn");
1267	adev->vcn.ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__VCN;
1268	adev->vcn.ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__POISON;
1269	adev->vcn.ras_if = &adev->vcn.ras->ras_block.ras_comm;
1270
1271	/* If don't define special ras_late_init function, use default ras_late_init */
1272	if (!adev->vcn.ras->ras_block.ras_late_init)
1273		adev->vcn.ras->ras_block.ras_late_init = amdgpu_ras_block_late_init;
1274}
v5.4
  1/*
  2 * Copyright 2016 Advanced Micro Devices, Inc.
  3 * All Rights Reserved.
  4 *
  5 * Permission is hereby granted, free of charge, to any person obtaining a
  6 * copy of this software and associated documentation files (the
  7 * "Software"), to deal in the Software without restriction, including
  8 * without limitation the rights to use, copy, modify, merge, publish,
  9 * distribute, sub license, and/or sell copies of the Software, and to
 10 * permit persons to whom the Software is furnished to do so, subject to
 11 * the following conditions:
 12 *
 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 20 *
 21 * The above copyright notice and this permission notice (including the
 22 * next paragraph) shall be included in all copies or substantial portions
 23 * of the Software.
 24 *
 25 */
 26
 27#include <linux/firmware.h>
 28#include <linux/module.h>
 29#include <linux/pci.h>
 30
 31#include <drm/drm.h>
 32
 33#include "amdgpu.h"
 34#include "amdgpu_pm.h"
 35#include "amdgpu_vcn.h"
 36#include "soc15d.h"
 37#include "soc15_common.h"
 38
 39#include "vcn/vcn_1_0_offset.h"
 40#include "vcn/vcn_1_0_sh_mask.h"
 41
 42/* 1 second timeout */
 43#define VCN_IDLE_TIMEOUT	msecs_to_jiffies(1000)
 44
 45/* Firmware Names */
 46#define FIRMWARE_RAVEN		"amdgpu/raven_vcn.bin"
 47#define FIRMWARE_PICASSO	"amdgpu/picasso_vcn.bin"
 48#define FIRMWARE_RAVEN2		"amdgpu/raven2_vcn.bin"
 49#define FIRMWARE_ARCTURUS 	"amdgpu/arcturus_vcn.bin"
 50#define FIRMWARE_RENOIR 	"amdgpu/renoir_vcn.bin"
 51#define FIRMWARE_NAVI10 	"amdgpu/navi10_vcn.bin"
 52#define FIRMWARE_NAVI14 	"amdgpu/navi14_vcn.bin"
 53#define FIRMWARE_NAVI12 	"amdgpu/navi12_vcn.bin"
 
 
 
 
 
 
 
 
 
 
 
 
 54
 55MODULE_FIRMWARE(FIRMWARE_RAVEN);
 56MODULE_FIRMWARE(FIRMWARE_PICASSO);
 57MODULE_FIRMWARE(FIRMWARE_RAVEN2);
 58MODULE_FIRMWARE(FIRMWARE_ARCTURUS);
 59MODULE_FIRMWARE(FIRMWARE_RENOIR);
 
 
 60MODULE_FIRMWARE(FIRMWARE_NAVI10);
 61MODULE_FIRMWARE(FIRMWARE_NAVI14);
 62MODULE_FIRMWARE(FIRMWARE_NAVI12);
 
 
 
 
 
 
 
 
 
 
 63
 64static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
 65
 66int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
 67{
 68	unsigned long bo_size;
 69	const char *fw_name;
 70	const struct common_firmware_header *hdr;
 71	unsigned char fw_check;
 
 72	int i, r;
 73
 74	INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler);
 75
 76	switch (adev->asic_type) {
 77	case CHIP_RAVEN:
 78		if (adev->rev_id >= 8)
 
 
 
 
 
 
 79			fw_name = FIRMWARE_RAVEN2;
 80		else if (adev->pdev->device == 0x15d8)
 81			fw_name = FIRMWARE_PICASSO;
 82		else
 83			fw_name = FIRMWARE_RAVEN;
 84		break;
 85	case CHIP_ARCTURUS:
 86		fw_name = FIRMWARE_ARCTURUS;
 
 
 
 87		break;
 88	case CHIP_RENOIR:
 89		fw_name = FIRMWARE_RENOIR;
 
 
 
 
 90		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
 91		    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
 92			adev->vcn.indirect_sram = true;
 93		break;
 94	case CHIP_NAVI10:
 
 
 
 
 
 
 95		fw_name = FIRMWARE_NAVI10;
 96		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
 97		    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
 98			adev->vcn.indirect_sram = true;
 99		break;
100	case CHIP_NAVI14:
101		fw_name = FIRMWARE_NAVI14;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
102		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
103		    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
104			adev->vcn.indirect_sram = true;
105		break;
106	case CHIP_NAVI12:
107		fw_name = FIRMWARE_NAVI12;
108		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
109		    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
110			adev->vcn.indirect_sram = true;
111		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
112	default:
113		return -EINVAL;
114	}
115
116	r = request_firmware(&adev->vcn.fw, fw_name, adev->dev);
117	if (r) {
118		dev_err(adev->dev, "amdgpu_vcn: Can't load firmware \"%s\"\n",
119			fw_name);
120		return r;
121	}
122
123	r = amdgpu_ucode_validate(adev->vcn.fw);
124	if (r) {
125		dev_err(adev->dev, "amdgpu_vcn: Can't validate firmware \"%s\"\n",
126			fw_name);
127		release_firmware(adev->vcn.fw);
128		adev->vcn.fw = NULL;
129		return r;
130	}
131
132	hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
133	adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version);
134
135	/* Bit 20-23, it is encode major and non-zero for new naming convention.
136	 * This field is part of version minor and DRM_DISABLED_FLAG in old naming
137	 * convention. Since the l:wq!atest version minor is 0x5B and DRM_DISABLED_FLAG
138	 * is zero in old naming convention, this field is always zero so far.
139	 * These four bits are used to tell which naming convention is present.
140	 */
141	fw_check = (le32_to_cpu(hdr->ucode_version) >> 20) & 0xf;
142	if (fw_check) {
143		unsigned int dec_ver, enc_major, enc_minor, vep, fw_rev;
144
145		fw_rev = le32_to_cpu(hdr->ucode_version) & 0xfff;
146		enc_minor = (le32_to_cpu(hdr->ucode_version) >> 12) & 0xff;
147		enc_major = fw_check;
148		dec_ver = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xf;
149		vep = (le32_to_cpu(hdr->ucode_version) >> 28) & 0xf;
150		DRM_INFO("Found VCN firmware Version ENC: %hu.%hu DEC: %hu VEP: %hu Revision: %hu\n",
151			enc_major, enc_minor, dec_ver, vep, fw_rev);
152	} else {
153		unsigned int version_major, version_minor, family_id;
154
155		family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
156		version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
157		version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
158		DRM_INFO("Found VCN firmware Version: %hu.%hu Family ID: %hu\n",
159			version_major, version_minor, family_id);
160	}
161
162	bo_size = AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_CONTEXT_SIZE;
163	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
164		bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
165
 
 
 
 
 
 
 
 
 
 
 
 
 
166	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
167		if (adev->vcn.harvest_config & (1 << i))
168			continue;
169
170		r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
171						AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.inst[i].vcpu_bo,
172						&adev->vcn.inst[i].gpu_addr, &adev->vcn.inst[i].cpu_addr);
173		if (r) {
174			dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", r);
175			return r;
176		}
177	}
178
179	if (adev->vcn.indirect_sram) {
180		r = amdgpu_bo_create_kernel(adev, 64 * 2 * 4, PAGE_SIZE,
181			    AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.dpg_sram_bo,
182			    &adev->vcn.dpg_sram_gpu_addr, &adev->vcn.dpg_sram_cpu_addr);
183		if (r) {
184			dev_err(adev->dev, "(%d) failed to allocate DPG bo\n", r);
185			return r;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
186		}
187	}
188
189	return 0;
190}
191
192int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
193{
194	int i, j;
195
196	if (adev->vcn.indirect_sram) {
197		amdgpu_bo_free_kernel(&adev->vcn.dpg_sram_bo,
198				      &adev->vcn.dpg_sram_gpu_addr,
199				      (void **)&adev->vcn.dpg_sram_cpu_addr);
200	}
201
202	for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
203		if (adev->vcn.harvest_config & (1 << j))
204			continue;
 
 
 
 
 
 
205		kvfree(adev->vcn.inst[j].saved_bo);
206
207		amdgpu_bo_free_kernel(&adev->vcn.inst[j].vcpu_bo,
208					  &adev->vcn.inst[j].gpu_addr,
209					  (void **)&adev->vcn.inst[j].cpu_addr);
210
211		amdgpu_ring_fini(&adev->vcn.inst[j].ring_dec);
212
213		for (i = 0; i < adev->vcn.num_enc_rings; ++i)
214			amdgpu_ring_fini(&adev->vcn.inst[j].ring_enc[i]);
215
216		amdgpu_ring_fini(&adev->vcn.inst[j].ring_jpeg);
217	}
218
219	release_firmware(adev->vcn.fw);
 
 
220
221	return 0;
222}
223
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
224int amdgpu_vcn_suspend(struct amdgpu_device *adev)
225{
226	unsigned size;
227	void *ptr;
228	int i;
229
230	cancel_delayed_work_sync(&adev->vcn.idle_work);
231
232	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
233		if (adev->vcn.harvest_config & (1 << i))
234			continue;
235		if (adev->vcn.inst[i].vcpu_bo == NULL)
236			return 0;
237
238		size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo);
239		ptr = adev->vcn.inst[i].cpu_addr;
240
241		adev->vcn.inst[i].saved_bo = kvmalloc(size, GFP_KERNEL);
242		if (!adev->vcn.inst[i].saved_bo)
243			return -ENOMEM;
244
245		memcpy_fromio(adev->vcn.inst[i].saved_bo, ptr, size);
 
 
 
246	}
247	return 0;
248}
249
250int amdgpu_vcn_resume(struct amdgpu_device *adev)
251{
252	unsigned size;
253	void *ptr;
254	int i;
255
256	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
257		if (adev->vcn.harvest_config & (1 << i))
258			continue;
259		if (adev->vcn.inst[i].vcpu_bo == NULL)
260			return -EINVAL;
261
262		size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo);
263		ptr = adev->vcn.inst[i].cpu_addr;
264
265		if (adev->vcn.inst[i].saved_bo != NULL) {
266			memcpy_toio(ptr, adev->vcn.inst[i].saved_bo, size);
 
 
 
267			kvfree(adev->vcn.inst[i].saved_bo);
268			adev->vcn.inst[i].saved_bo = NULL;
269		} else {
270			const struct common_firmware_header *hdr;
271			unsigned offset;
272
273			hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
274			if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
275				offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
276				memcpy_toio(adev->vcn.inst[i].cpu_addr, adev->vcn.fw->data + offset,
277					    le32_to_cpu(hdr->ucode_size_bytes));
 
 
 
278				size -= le32_to_cpu(hdr->ucode_size_bytes);
279				ptr += le32_to_cpu(hdr->ucode_size_bytes);
280			}
281			memset_io(ptr, 0, size);
282		}
283	}
284	return 0;
285}
286
287static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
288{
289	struct amdgpu_device *adev =
290		container_of(work, struct amdgpu_device, vcn.idle_work.work);
291	unsigned int fences = 0, fence[AMDGPU_MAX_VCN_INSTANCES] = {0};
292	unsigned int i, j;
 
293
294	for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
295		if (adev->vcn.harvest_config & (1 << j))
296			continue;
 
297		for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
298			fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_enc[i]);
299		}
300
301		if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)	{
302			struct dpg_pause_state new_state;
303
304			if (fence[j])
 
305				new_state.fw_based = VCN_DPG_STATE__PAUSE;
306			else
307				new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
308
309			if (amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_jpeg))
310				new_state.jpeg = VCN_DPG_STATE__PAUSE;
311			else
312				new_state.jpeg = VCN_DPG_STATE__UNPAUSE;
313
314			adev->vcn.pause_dpg_mode(adev, &new_state);
315		}
316
317		fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_jpeg);
318		fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_dec);
319		fences += fence[j];
320	}
321
322	if (fences == 0) {
323		amdgpu_gfx_off_ctrl(adev, true);
324		if (adev->asic_type < CHIP_ARCTURUS && adev->pm.dpm_enabled)
325			amdgpu_dpm_enable_uvd(adev, false);
326		else
327			amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
328							       AMD_PG_STATE_GATE);
329	} else {
330		schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
331	}
332}
333
334void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
335{
336	struct amdgpu_device *adev = ring->adev;
337	bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work);
338
339	if (set_clocks) {
340		amdgpu_gfx_off_ctrl(adev, false);
341		if (adev->asic_type < CHIP_ARCTURUS && adev->pm.dpm_enabled)
342			amdgpu_dpm_enable_uvd(adev, true);
343		else
344			amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
345							       AMD_PG_STATE_UNGATE);
346	}
347
 
 
 
 
348	if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)	{
349		struct dpg_pause_state new_state;
350		unsigned int fences = 0;
351		unsigned int i;
352
353		for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
354			fences += amdgpu_fence_count_emitted(&adev->vcn.inst[ring->me].ring_enc[i]);
355		}
356		if (fences)
357			new_state.fw_based = VCN_DPG_STATE__PAUSE;
358		else
359			new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
 
360
361		if (amdgpu_fence_count_emitted(&adev->vcn.inst[ring->me].ring_jpeg))
362			new_state.jpeg = VCN_DPG_STATE__PAUSE;
363		else
364			new_state.jpeg = VCN_DPG_STATE__UNPAUSE;
365
366		if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
367			new_state.fw_based = VCN_DPG_STATE__PAUSE;
368		else if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG)
369			new_state.jpeg = VCN_DPG_STATE__PAUSE;
 
370
371		adev->vcn.pause_dpg_mode(adev, &new_state);
372	}
 
373}
374
375void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring)
376{
 
 
 
 
 
 
377	schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
378}
379
380int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
381{
382	struct amdgpu_device *adev = ring->adev;
383	uint32_t tmp = 0;
384	unsigned i;
385	int r;
386
 
 
 
 
387	WREG32(adev->vcn.inst[ring->me].external.scratch9, 0xCAFEDEAD);
388	r = amdgpu_ring_alloc(ring, 3);
389	if (r)
390		return r;
391	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.scratch9, 0));
392	amdgpu_ring_write(ring, 0xDEADBEEF);
393	amdgpu_ring_commit(ring);
394	for (i = 0; i < adev->usec_timeout; i++) {
395		tmp = RREG32(adev->vcn.inst[ring->me].external.scratch9);
396		if (tmp == 0xDEADBEEF)
397			break;
398		udelay(1);
399	}
400
401	if (i >= adev->usec_timeout)
402		r = -ETIMEDOUT;
403
404	return r;
405}
406
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
407static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
408				   struct amdgpu_bo *bo,
409				   struct dma_fence **fence)
410{
 
411	struct amdgpu_device *adev = ring->adev;
412	struct dma_fence *f = NULL;
413	struct amdgpu_job *job;
414	struct amdgpu_ib *ib;
415	uint64_t addr;
416	int i, r;
417
418	r = amdgpu_job_alloc_with_ib(adev, 64, &job);
 
 
419	if (r)
420		goto err;
421
422	ib = &job->ibs[0];
423	addr = amdgpu_bo_gpu_offset(bo);
424	ib->ptr[0] = PACKET0(adev->vcn.internal.data0, 0);
425	ib->ptr[1] = addr;
426	ib->ptr[2] = PACKET0(adev->vcn.internal.data1, 0);
427	ib->ptr[3] = addr >> 32;
428	ib->ptr[4] = PACKET0(adev->vcn.internal.cmd, 0);
429	ib->ptr[5] = 0;
430	for (i = 6; i < 16; i += 2) {
431		ib->ptr[i] = PACKET0(adev->vcn.internal.nop, 0);
432		ib->ptr[i+1] = 0;
433	}
434	ib->length_dw = 16;
435
436	r = amdgpu_job_submit_direct(job, ring, &f);
437	if (r)
438		goto err_free;
439
440	amdgpu_bo_fence(bo, f, false);
441	amdgpu_bo_unreserve(bo);
442	amdgpu_bo_unref(&bo);
443
444	if (fence)
445		*fence = dma_fence_get(f);
446	dma_fence_put(f);
447
448	return 0;
449
450err_free:
451	amdgpu_job_free(job);
452
453err:
454	amdgpu_bo_unreserve(bo);
455	amdgpu_bo_unref(&bo);
456	return r;
457}
458
459static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
460			      struct dma_fence **fence)
461{
462	struct amdgpu_device *adev = ring->adev;
463	struct amdgpu_bo *bo = NULL;
464	uint32_t *msg;
465	int r, i;
466
467	r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
468				      AMDGPU_GEM_DOMAIN_VRAM,
469				      &bo, NULL, (void **)&msg);
 
470	if (r)
471		return r;
472
 
473	msg[0] = cpu_to_le32(0x00000028);
474	msg[1] = cpu_to_le32(0x00000038);
475	msg[2] = cpu_to_le32(0x00000001);
476	msg[3] = cpu_to_le32(0x00000000);
477	msg[4] = cpu_to_le32(handle);
478	msg[5] = cpu_to_le32(0x00000000);
479	msg[6] = cpu_to_le32(0x00000001);
480	msg[7] = cpu_to_le32(0x00000028);
481	msg[8] = cpu_to_le32(0x00000010);
482	msg[9] = cpu_to_le32(0x00000000);
483	msg[10] = cpu_to_le32(0x00000007);
484	msg[11] = cpu_to_le32(0x00000000);
485	msg[12] = cpu_to_le32(0x00000780);
486	msg[13] = cpu_to_le32(0x00000440);
487	for (i = 14; i < 1024; ++i)
488		msg[i] = cpu_to_le32(0x0);
489
490	return amdgpu_vcn_dec_send_msg(ring, bo, fence);
491}
492
493static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
494			       struct dma_fence **fence)
495{
496	struct amdgpu_device *adev = ring->adev;
497	struct amdgpu_bo *bo = NULL;
498	uint32_t *msg;
499	int r, i;
500
501	r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
502				      AMDGPU_GEM_DOMAIN_VRAM,
503				      &bo, NULL, (void **)&msg);
 
504	if (r)
505		return r;
506
 
507	msg[0] = cpu_to_le32(0x00000028);
508	msg[1] = cpu_to_le32(0x00000018);
509	msg[2] = cpu_to_le32(0x00000000);
510	msg[3] = cpu_to_le32(0x00000002);
511	msg[4] = cpu_to_le32(handle);
512	msg[5] = cpu_to_le32(0x00000000);
513	for (i = 6; i < 1024; ++i)
514		msg[i] = cpu_to_le32(0x0);
515
516	return amdgpu_vcn_dec_send_msg(ring, bo, fence);
517}
518
519int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
520{
521	struct dma_fence *fence;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
522	long r;
523
524	r = amdgpu_vcn_dec_get_create_msg(ring, 1, NULL);
525	if (r)
526		goto error;
527
528	r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &fence);
 
 
 
 
 
 
 
529	if (r)
530		goto error;
531
532	r = dma_fence_wait_timeout(fence, false, timeout);
533	if (r == 0)
534		r = -ETIMEDOUT;
535	else if (r > 0)
536		r = 0;
537
538	dma_fence_put(fence);
539error:
540	return r;
541}
542
543int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
544{
545	struct amdgpu_device *adev = ring->adev;
546	uint32_t rptr;
547	unsigned i;
548	int r;
549
 
 
 
550	r = amdgpu_ring_alloc(ring, 16);
551	if (r)
552		return r;
553
554	rptr = amdgpu_ring_get_rptr(ring);
555
556	amdgpu_ring_write(ring, VCN_ENC_CMD_END);
557	amdgpu_ring_commit(ring);
558
559	for (i = 0; i < adev->usec_timeout; i++) {
560		if (amdgpu_ring_get_rptr(ring) != rptr)
561			break;
562		udelay(1);
563	}
564
565	if (i >= adev->usec_timeout)
566		r = -ETIMEDOUT;
567
568	return r;
569}
570
571static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
572					 struct amdgpu_bo *bo,
573					 struct dma_fence **fence)
574{
575	const unsigned ib_size_dw = 16;
576	struct amdgpu_job *job;
577	struct amdgpu_ib *ib;
578	struct dma_fence *f = NULL;
 
579	uint64_t addr;
 
580	int i, r;
581
582	r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
 
 
 
 
 
583	if (r)
584		return r;
585
586	ib = &job->ibs[0];
587	addr = amdgpu_bo_gpu_offset(bo);
588
589	ib->length_dw = 0;
 
 
 
 
590	ib->ptr[ib->length_dw++] = 0x00000018;
591	ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
592	ib->ptr[ib->length_dw++] = handle;
593	ib->ptr[ib->length_dw++] = upper_32_bits(addr);
594	ib->ptr[ib->length_dw++] = addr;
595	ib->ptr[ib->length_dw++] = 0x0000000b;
596
597	ib->ptr[ib->length_dw++] = 0x00000014;
598	ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
599	ib->ptr[ib->length_dw++] = 0x0000001c;
600	ib->ptr[ib->length_dw++] = 0x00000000;
601	ib->ptr[ib->length_dw++] = 0x00000000;
602
603	ib->ptr[ib->length_dw++] = 0x00000008;
604	ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */
605
606	for (i = ib->length_dw; i < ib_size_dw; ++i)
607		ib->ptr[i] = 0x0;
608
 
 
 
609	r = amdgpu_job_submit_direct(job, ring, &f);
610	if (r)
611		goto err;
612
613	if (fence)
614		*fence = dma_fence_get(f);
615	dma_fence_put(f);
616
617	return 0;
618
619err:
620	amdgpu_job_free(job);
621	return r;
622}
623
624static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
625					  struct amdgpu_bo *bo,
626					  struct dma_fence **fence)
627{
628	const unsigned ib_size_dw = 16;
629	struct amdgpu_job *job;
630	struct amdgpu_ib *ib;
631	struct dma_fence *f = NULL;
 
632	uint64_t addr;
 
633	int i, r;
634
635	r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
 
 
 
 
 
636	if (r)
637		return r;
638
639	ib = &job->ibs[0];
640	addr = amdgpu_bo_gpu_offset(bo);
641
642	ib->length_dw = 0;
 
 
 
 
643	ib->ptr[ib->length_dw++] = 0x00000018;
644	ib->ptr[ib->length_dw++] = 0x00000001;
645	ib->ptr[ib->length_dw++] = handle;
646	ib->ptr[ib->length_dw++] = upper_32_bits(addr);
647	ib->ptr[ib->length_dw++] = addr;
648	ib->ptr[ib->length_dw++] = 0x0000000b;
649
650	ib->ptr[ib->length_dw++] = 0x00000014;
651	ib->ptr[ib->length_dw++] = 0x00000002;
652	ib->ptr[ib->length_dw++] = 0x0000001c;
653	ib->ptr[ib->length_dw++] = 0x00000000;
654	ib->ptr[ib->length_dw++] = 0x00000000;
655
656	ib->ptr[ib->length_dw++] = 0x00000008;
657	ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */
658
659	for (i = ib->length_dw; i < ib_size_dw; ++i)
660		ib->ptr[i] = 0x0;
661
 
 
 
662	r = amdgpu_job_submit_direct(job, ring, &f);
663	if (r)
664		goto err;
665
666	if (fence)
667		*fence = dma_fence_get(f);
668	dma_fence_put(f);
669
670	return 0;
671
672err:
673	amdgpu_job_free(job);
674	return r;
675}
676
677int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
678{
 
679	struct dma_fence *fence = NULL;
680	struct amdgpu_bo *bo = NULL;
681	long r;
682
683	r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE,
684				      AMDGPU_GEM_DOMAIN_VRAM,
685				      &bo, NULL, NULL);
 
686	if (r)
687		return r;
688
689	r = amdgpu_vcn_enc_get_create_msg(ring, 1, bo, NULL);
690	if (r)
691		goto error;
692
693	r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, bo, &fence);
694	if (r)
695		goto error;
696
697	r = dma_fence_wait_timeout(fence, false, timeout);
698	if (r == 0)
699		r = -ETIMEDOUT;
700	else if (r > 0)
701		r = 0;
702
703error:
 
704	dma_fence_put(fence);
705	amdgpu_bo_unreserve(bo);
706	amdgpu_bo_unref(&bo);
707	return r;
708}
709
710int amdgpu_vcn_jpeg_ring_test_ring(struct amdgpu_ring *ring)
711{
712	struct amdgpu_device *adev = ring->adev;
713	uint32_t tmp = 0;
714	unsigned i;
715	int r;
716
717	WREG32(adev->vcn.inst[ring->me].external.jpeg_pitch, 0xCAFEDEAD);
718	r = amdgpu_ring_alloc(ring, 3);
719	if (r)
720		return r;
 
 
721
722	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.jpeg_pitch, 0));
723	amdgpu_ring_write(ring, 0xDEADBEEF);
724	amdgpu_ring_commit(ring);
725
726	for (i = 0; i < adev->usec_timeout; i++) {
727		tmp = RREG32(adev->vcn.inst[ring->me].external.jpeg_pitch);
728		if (tmp == 0xDEADBEEF)
729			break;
730		udelay(1);
 
 
 
 
 
 
731	}
 
732
733	if (i >= adev->usec_timeout)
734		r = -ETIMEDOUT;
 
 
735
736	return r;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
737}
738
739static int amdgpu_vcn_jpeg_set_reg(struct amdgpu_ring *ring, uint32_t handle,
740		struct dma_fence **fence)
 
 
 
 
741{
742	struct amdgpu_device *adev = ring->adev;
743	struct amdgpu_job *job;
744	struct amdgpu_ib *ib;
745	struct dma_fence *f = NULL;
746	const unsigned ib_size_dw = 16;
747	int i, r;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
748
749	r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
750	if (r)
751		return r;
 
 
 
 
752
753	ib = &job->ibs[0];
754
755	ib->ptr[0] = PACKETJ(adev->vcn.internal.jpeg_pitch, 0, 0, PACKETJ_TYPE0);
756	ib->ptr[1] = 0xDEADBEEF;
757	for (i = 2; i < 16; i += 2) {
758		ib->ptr[i] = PACKETJ(0, 0, 0, PACKETJ_TYPE6);
759		ib->ptr[i+1] = 0;
760	}
761	ib->length_dw = 16;
762
763	r = amdgpu_job_submit_direct(job, ring, &f);
764	if (r)
765		goto err;
 
766
767	if (fence)
768		*fence = dma_fence_get(f);
769	dma_fence_put(f);
 
 
 
770
771	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
772
773err:
774	amdgpu_job_free(job);
775	return r;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
776}
777
778int amdgpu_vcn_jpeg_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 
 
779{
780	struct amdgpu_device *adev = ring->adev;
781	uint32_t tmp = 0;
782	unsigned i;
783	struct dma_fence *fence = NULL;
784	long r = 0;
 
 
785
786	r = amdgpu_vcn_jpeg_set_reg(ring, 1, &fence);
787	if (r)
788		goto error;
789
790	r = dma_fence_wait_timeout(fence, false, timeout);
791	if (r == 0) {
792		r = -ETIMEDOUT;
793		goto error;
794	} else if (r < 0) {
795		goto error;
796	} else {
797		r = 0;
798	}
799
800	for (i = 0; i < adev->usec_timeout; i++) {
801		tmp = RREG32(adev->vcn.inst[ring->me].external.jpeg_pitch);
802		if (tmp == 0xDEADBEEF)
803			break;
804		udelay(1);
805	}
806
807	if (i >= adev->usec_timeout)
808		r = -ETIMEDOUT;
809
810	dma_fence_put(fence);
811error:
812	return r;
 
 
 
 
 
813}