Linux Audio

Check our new training course

Open-source upstreaming

Need help get the support for your hardware in upstream Linux?
Loading...
v6.9.4
   1/*
   2 * Copyright 2016 Advanced Micro Devices, Inc.
   3 * All Rights Reserved.
   4 *
   5 * Permission is hereby granted, free of charge, to any person obtaining a
   6 * copy of this software and associated documentation files (the
   7 * "Software"), to deal in the Software without restriction, including
   8 * without limitation the rights to use, copy, modify, merge, publish,
   9 * distribute, sub license, and/or sell copies of the Software, and to
  10 * permit persons to whom the Software is furnished to do so, subject to
  11 * the following conditions:
  12 *
  13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  20 *
  21 * The above copyright notice and this permission notice (including the
  22 * next paragraph) shall be included in all copies or substantial portions
  23 * of the Software.
  24 *
  25 */
  26
  27#include <linux/firmware.h>
  28#include <linux/module.h>
  29#include <linux/dmi.h>
  30#include <linux/pci.h>
  31#include <linux/debugfs.h>
  32#include <drm/drm_drv.h>
  33
  34#include "amdgpu.h"
  35#include "amdgpu_pm.h"
  36#include "amdgpu_vcn.h"
  37#include "soc15d.h"
  38
  39/* Firmware Names */
  40#define FIRMWARE_RAVEN			"amdgpu/raven_vcn.bin"
  41#define FIRMWARE_PICASSO		"amdgpu/picasso_vcn.bin"
  42#define FIRMWARE_RAVEN2			"amdgpu/raven2_vcn.bin"
  43#define FIRMWARE_ARCTURUS		"amdgpu/arcturus_vcn.bin"
  44#define FIRMWARE_RENOIR			"amdgpu/renoir_vcn.bin"
  45#define FIRMWARE_GREEN_SARDINE		"amdgpu/green_sardine_vcn.bin"
  46#define FIRMWARE_NAVI10			"amdgpu/navi10_vcn.bin"
  47#define FIRMWARE_NAVI14			"amdgpu/navi14_vcn.bin"
  48#define FIRMWARE_NAVI12			"amdgpu/navi12_vcn.bin"
  49#define FIRMWARE_SIENNA_CICHLID		"amdgpu/sienna_cichlid_vcn.bin"
  50#define FIRMWARE_NAVY_FLOUNDER		"amdgpu/navy_flounder_vcn.bin"
  51#define FIRMWARE_VANGOGH		"amdgpu/vangogh_vcn.bin"
  52#define FIRMWARE_DIMGREY_CAVEFISH	"amdgpu/dimgrey_cavefish_vcn.bin"
  53#define FIRMWARE_ALDEBARAN		"amdgpu/aldebaran_vcn.bin"
  54#define FIRMWARE_BEIGE_GOBY		"amdgpu/beige_goby_vcn.bin"
  55#define FIRMWARE_YELLOW_CARP		"amdgpu/yellow_carp_vcn.bin"
  56#define FIRMWARE_VCN_3_1_2		"amdgpu/vcn_3_1_2.bin"
  57#define FIRMWARE_VCN4_0_0		"amdgpu/vcn_4_0_0.bin"
  58#define FIRMWARE_VCN4_0_2		"amdgpu/vcn_4_0_2.bin"
  59#define FIRMWARE_VCN4_0_3		"amdgpu/vcn_4_0_3.bin"
  60#define FIRMWARE_VCN4_0_4		"amdgpu/vcn_4_0_4.bin"
  61#define FIRMWARE_VCN4_0_5		"amdgpu/vcn_4_0_5.bin"
  62#define FIRMWARE_VCN4_0_6		"amdgpu/vcn_4_0_6.bin"
  63#define FIRMWARE_VCN4_0_6_1		"amdgpu/vcn_4_0_6_1.bin"
  64#define FIRMWARE_VCN5_0_0		"amdgpu/vcn_5_0_0.bin"
  65
  66MODULE_FIRMWARE(FIRMWARE_RAVEN);
  67MODULE_FIRMWARE(FIRMWARE_PICASSO);
  68MODULE_FIRMWARE(FIRMWARE_RAVEN2);
  69MODULE_FIRMWARE(FIRMWARE_ARCTURUS);
  70MODULE_FIRMWARE(FIRMWARE_RENOIR);
  71MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE);
  72MODULE_FIRMWARE(FIRMWARE_ALDEBARAN);
  73MODULE_FIRMWARE(FIRMWARE_NAVI10);
  74MODULE_FIRMWARE(FIRMWARE_NAVI14);
  75MODULE_FIRMWARE(FIRMWARE_NAVI12);
  76MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID);
  77MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER);
  78MODULE_FIRMWARE(FIRMWARE_VANGOGH);
  79MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH);
  80MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY);
  81MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP);
  82MODULE_FIRMWARE(FIRMWARE_VCN_3_1_2);
  83MODULE_FIRMWARE(FIRMWARE_VCN4_0_0);
  84MODULE_FIRMWARE(FIRMWARE_VCN4_0_2);
  85MODULE_FIRMWARE(FIRMWARE_VCN4_0_3);
  86MODULE_FIRMWARE(FIRMWARE_VCN4_0_4);
  87MODULE_FIRMWARE(FIRMWARE_VCN4_0_5);
  88MODULE_FIRMWARE(FIRMWARE_VCN4_0_6);
  89MODULE_FIRMWARE(FIRMWARE_VCN4_0_6_1);
  90MODULE_FIRMWARE(FIRMWARE_VCN5_0_0);
  91
  92static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
  93
  94int amdgpu_vcn_early_init(struct amdgpu_device *adev)
  95{
  96	char ucode_prefix[30];
  97	char fw_name[40];
  98	int r, i;
  99
 100	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
 101		amdgpu_ucode_ip_version_decode(adev, UVD_HWIP, ucode_prefix, sizeof(ucode_prefix));
 102		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", ucode_prefix);
 103		if (amdgpu_ip_version(adev, UVD_HWIP, 0) ==  IP_VERSION(4, 0, 6) &&
 104			i == 1) {
 105			snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_%d.bin", ucode_prefix, i);
 106		}
 107
 108		r = amdgpu_ucode_request(adev, &adev->vcn.fw[i], fw_name);
 109		if (r) {
 110			amdgpu_ucode_release(&adev->vcn.fw[i]);
 111			return r;
 112		}
 113	}
 114	return r;
 115}
 116
 117int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
 118{
 119	unsigned long bo_size;
 
 120	const struct common_firmware_header *hdr;
 121	unsigned char fw_check;
 122	unsigned int fw_shared_size, log_offset;
 123	int i, r;
 124
 125	INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler);
 126	mutex_init(&adev->vcn.vcn_pg_lock);
 127	mutex_init(&adev->vcn.vcn1_jpeg1_workaround);
 128	atomic_set(&adev->vcn.total_submission_cnt, 0);
 129	for (i = 0; i < adev->vcn.num_vcn_inst; i++)
 130		atomic_set(&adev->vcn.inst[i].dpg_enc_submission_cnt, 0);
 131
 132	if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
 133	    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
 134		adev->vcn.indirect_sram = true;
 135
 136	/*
 137	 * Some Steam Deck's BIOS versions are incompatible with the
 138	 * indirect SRAM mode, leading to amdgpu being unable to get
 139	 * properly probed (and even potentially crashing the kernel).
 140	 * Hence, check for these versions here - notice this is
 141	 * restricted to Vangogh (Deck's APU).
 142	 */
 143	if (amdgpu_ip_version(adev, UVD_HWIP, 0) == IP_VERSION(3, 0, 2)) {
 144		const char *bios_ver = dmi_get_system_info(DMI_BIOS_VERSION);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 145
 146		if (bios_ver && (!strncmp("F7A0113", bios_ver, 7) ||
 147		     !strncmp("F7A0114", bios_ver, 7))) {
 148			adev->vcn.indirect_sram = false;
 149			dev_info(adev->dev,
 150				"Steam Deck quirk: indirect SRAM disabled on BIOS %s\n", bios_ver);
 151		}
 
 
 
 
 
 
 
 
 152	}
 153
 154	hdr = (const struct common_firmware_header *)adev->vcn.fw[0]->data;
 155	adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version);
 156
 157	/* Bit 20-23, it is encode major and non-zero for new naming convention.
 158	 * This field is part of version minor and DRM_DISABLED_FLAG in old naming
 159	 * convention. Since the l:wq!atest version minor is 0x5B and DRM_DISABLED_FLAG
 160	 * is zero in old naming convention, this field is always zero so far.
 161	 * These four bits are used to tell which naming convention is present.
 162	 */
 163	fw_check = (le32_to_cpu(hdr->ucode_version) >> 20) & 0xf;
 164	if (fw_check) {
 165		unsigned int dec_ver, enc_major, enc_minor, vep, fw_rev;
 166
 167		fw_rev = le32_to_cpu(hdr->ucode_version) & 0xfff;
 168		enc_minor = (le32_to_cpu(hdr->ucode_version) >> 12) & 0xff;
 169		enc_major = fw_check;
 170		dec_ver = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xf;
 171		vep = (le32_to_cpu(hdr->ucode_version) >> 28) & 0xf;
 172		DRM_INFO("Found VCN firmware Version ENC: %u.%u DEC: %u VEP: %u Revision: %u\n",
 173			enc_major, enc_minor, dec_ver, vep, fw_rev);
 174	} else {
 175		unsigned int version_major, version_minor, family_id;
 176
 177		family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
 178		version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
 179		version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
 180		DRM_INFO("Found VCN firmware Version: %u.%u Family ID: %u\n",
 181			version_major, version_minor, family_id);
 182	}
 183
 184	bo_size = AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_CONTEXT_SIZE;
 185	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
 186		bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
 187
 188	if (amdgpu_ip_version(adev, UVD_HWIP, 0) >= IP_VERSION(4, 0, 0)) {
 189		fw_shared_size = AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared));
 190		log_offset = offsetof(struct amdgpu_vcn4_fw_shared, fw_log);
 191	} else {
 192		fw_shared_size = AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared));
 193		log_offset = offsetof(struct amdgpu_fw_shared, fw_log);
 194	}
 195
 196	bo_size += fw_shared_size;
 197
 198	if (amdgpu_vcnfw_log)
 199		bo_size += AMDGPU_VCNFW_LOG_SIZE;
 200
 201	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
 202		if (adev->vcn.harvest_config & (1 << i))
 203			continue;
 204
 205		r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
 206					    AMDGPU_GEM_DOMAIN_VRAM |
 207					    AMDGPU_GEM_DOMAIN_GTT,
 208					    &adev->vcn.inst[i].vcpu_bo,
 209					    &adev->vcn.inst[i].gpu_addr,
 210					    &adev->vcn.inst[i].cpu_addr);
 211		if (r) {
 212			dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", r);
 213			return r;
 214		}
 215
 216		adev->vcn.inst[i].fw_shared.cpu_addr = adev->vcn.inst[i].cpu_addr +
 217				bo_size - fw_shared_size;
 218		adev->vcn.inst[i].fw_shared.gpu_addr = adev->vcn.inst[i].gpu_addr +
 219				bo_size - fw_shared_size;
 220
 221		adev->vcn.inst[i].fw_shared.mem_size = fw_shared_size;
 222
 223		if (amdgpu_vcnfw_log) {
 224			adev->vcn.inst[i].fw_shared.cpu_addr -= AMDGPU_VCNFW_LOG_SIZE;
 225			adev->vcn.inst[i].fw_shared.gpu_addr -= AMDGPU_VCNFW_LOG_SIZE;
 226			adev->vcn.inst[i].fw_shared.log_offset = log_offset;
 227		}
 228
 229		if (adev->vcn.indirect_sram) {
 230			r = amdgpu_bo_create_kernel(adev, 64 * 2 * 4, PAGE_SIZE,
 231					AMDGPU_GEM_DOMAIN_VRAM |
 232					AMDGPU_GEM_DOMAIN_GTT,
 233					&adev->vcn.inst[i].dpg_sram_bo,
 234					&adev->vcn.inst[i].dpg_sram_gpu_addr,
 235					&adev->vcn.inst[i].dpg_sram_cpu_addr);
 236			if (r) {
 237				dev_err(adev->dev, "VCN %d (%d) failed to allocate DPG bo\n", i, r);
 238				return r;
 239			}
 240		}
 241	}
 242
 243	return 0;
 244}
 245
 246int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
 247{
 248	int i, j;
 249
 
 
 250	for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
 251		if (adev->vcn.harvest_config & (1 << j))
 252			continue;
 253
 254		amdgpu_bo_free_kernel(
 255			&adev->vcn.inst[j].dpg_sram_bo,
 256			&adev->vcn.inst[j].dpg_sram_gpu_addr,
 257			(void **)&adev->vcn.inst[j].dpg_sram_cpu_addr);
 258
 259		kvfree(adev->vcn.inst[j].saved_bo);
 260
 261		amdgpu_bo_free_kernel(&adev->vcn.inst[j].vcpu_bo,
 262					  &adev->vcn.inst[j].gpu_addr,
 263					  (void **)&adev->vcn.inst[j].cpu_addr);
 264
 265		amdgpu_ring_fini(&adev->vcn.inst[j].ring_dec);
 266
 267		for (i = 0; i < adev->vcn.num_enc_rings; ++i)
 268			amdgpu_ring_fini(&adev->vcn.inst[j].ring_enc[i]);
 269
 270		amdgpu_ucode_release(&adev->vcn.fw[j]);
 271	}
 272
 273	mutex_destroy(&adev->vcn.vcn1_jpeg1_workaround);
 274	mutex_destroy(&adev->vcn.vcn_pg_lock);
 275
 276	return 0;
 277}
 278
 279/* from vcn4 and above, only unified queue is used */
 280static bool amdgpu_vcn_using_unified_queue(struct amdgpu_ring *ring)
 281{
 282	struct amdgpu_device *adev = ring->adev;
 283	bool ret = false;
 284
 285	if (amdgpu_ip_version(adev, UVD_HWIP, 0) >= IP_VERSION(4, 0, 0))
 286		ret = true;
 287
 288	return ret;
 289}
 290
 291bool amdgpu_vcn_is_disabled_vcn(struct amdgpu_device *adev, enum vcn_ring_type type, uint32_t vcn_instance)
 292{
 293	bool ret = false;
 294	int vcn_config = adev->vcn.vcn_config[vcn_instance];
 295
 296	if ((type == VCN_ENCODE_RING) && (vcn_config & VCN_BLOCK_ENCODE_DISABLE_MASK))
 297		ret = true;
 298	else if ((type == VCN_DECODE_RING) && (vcn_config & VCN_BLOCK_DECODE_DISABLE_MASK))
 299		ret = true;
 300	else if ((type == VCN_UNIFIED_RING) && (vcn_config & VCN_BLOCK_QUEUE_DISABLE_MASK))
 301		ret = true;
 302
 303	return ret;
 304}
 305
 306int amdgpu_vcn_suspend(struct amdgpu_device *adev)
 307{
 308	unsigned int size;
 309	void *ptr;
 310	int i, idx;
 311
 312	bool in_ras_intr = amdgpu_ras_intr_triggered();
 313
 314	cancel_delayed_work_sync(&adev->vcn.idle_work);
 315
 316	/* err_event_athub will corrupt VCPU buffer, so we need to
 317	 * restore fw data and clear buffer in amdgpu_vcn_resume() */
 318	if (in_ras_intr)
 319		return 0;
 320
 321	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
 322		if (adev->vcn.harvest_config & (1 << i))
 323			continue;
 324		if (adev->vcn.inst[i].vcpu_bo == NULL)
 325			return 0;
 326
 327		size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo);
 328		ptr = adev->vcn.inst[i].cpu_addr;
 329
 330		adev->vcn.inst[i].saved_bo = kvmalloc(size, GFP_KERNEL);
 331		if (!adev->vcn.inst[i].saved_bo)
 332			return -ENOMEM;
 333
 334		if (drm_dev_enter(adev_to_drm(adev), &idx)) {
 335			memcpy_fromio(adev->vcn.inst[i].saved_bo, ptr, size);
 336			drm_dev_exit(idx);
 337		}
 338	}
 339	return 0;
 340}
 341
 342int amdgpu_vcn_resume(struct amdgpu_device *adev)
 343{
 344	unsigned int size;
 345	void *ptr;
 346	int i, idx;
 347
 348	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
 349		if (adev->vcn.harvest_config & (1 << i))
 350			continue;
 351		if (adev->vcn.inst[i].vcpu_bo == NULL)
 352			return -EINVAL;
 353
 354		size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo);
 355		ptr = adev->vcn.inst[i].cpu_addr;
 356
 357		if (adev->vcn.inst[i].saved_bo != NULL) {
 358			if (drm_dev_enter(adev_to_drm(adev), &idx)) {
 359				memcpy_toio(ptr, adev->vcn.inst[i].saved_bo, size);
 360				drm_dev_exit(idx);
 361			}
 362			kvfree(adev->vcn.inst[i].saved_bo);
 363			adev->vcn.inst[i].saved_bo = NULL;
 364		} else {
 365			const struct common_firmware_header *hdr;
 366			unsigned int offset;
 367
 368			hdr = (const struct common_firmware_header *)adev->vcn.fw[i]->data;
 369			if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
 370				offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
 371				if (drm_dev_enter(adev_to_drm(adev), &idx)) {
 372					memcpy_toio(adev->vcn.inst[i].cpu_addr,
 373						    adev->vcn.fw[i]->data + offset,
 374						    le32_to_cpu(hdr->ucode_size_bytes));
 375					drm_dev_exit(idx);
 376				}
 377				size -= le32_to_cpu(hdr->ucode_size_bytes);
 378				ptr += le32_to_cpu(hdr->ucode_size_bytes);
 379			}
 380			memset_io(ptr, 0, size);
 381		}
 382	}
 383	return 0;
 384}
 385
 386static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
 387{
 388	struct amdgpu_device *adev =
 389		container_of(work, struct amdgpu_device, vcn.idle_work.work);
 390	unsigned int fences = 0, fence[AMDGPU_MAX_VCN_INSTANCES] = {0};
 391	unsigned int i, j;
 392	int r = 0;
 393
 394	for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
 395		if (adev->vcn.harvest_config & (1 << j))
 396			continue;
 397
 398		for (i = 0; i < adev->vcn.num_enc_rings; ++i)
 399			fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_enc[i]);
 
 400
 401		if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)	{
 402			struct dpg_pause_state new_state;
 403
 404			if (fence[j] ||
 405				unlikely(atomic_read(&adev->vcn.inst[j].dpg_enc_submission_cnt)))
 406				new_state.fw_based = VCN_DPG_STATE__PAUSE;
 407			else
 408				new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
 409
 410			adev->vcn.pause_dpg_mode(adev, j, &new_state);
 411		}
 412
 413		fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_dec);
 414		fences += fence[j];
 415	}
 416
 417	if (!fences && !atomic_read(&adev->vcn.total_submission_cnt)) {
 418		amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
 419		       AMD_PG_STATE_GATE);
 420		r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO,
 421				false);
 422		if (r)
 423			dev_warn(adev->dev, "(%d) failed to disable video power profile mode\n", r);
 424	} else {
 425		schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
 426	}
 427}
 428
 429void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
 430{
 431	struct amdgpu_device *adev = ring->adev;
 432	int r = 0;
 433
 434	atomic_inc(&adev->vcn.total_submission_cnt);
 435
 436	if (!cancel_delayed_work_sync(&adev->vcn.idle_work)) {
 437		r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO,
 438				true);
 439		if (r)
 440			dev_warn(adev->dev, "(%d) failed to switch to video power profile mode\n", r);
 441	}
 442
 443	mutex_lock(&adev->vcn.vcn_pg_lock);
 444	amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
 445	       AMD_PG_STATE_UNGATE);
 446
 447	if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)	{
 448		struct dpg_pause_state new_state;
 449
 450		if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC) {
 451			atomic_inc(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt);
 452			new_state.fw_based = VCN_DPG_STATE__PAUSE;
 453		} else {
 454			unsigned int fences = 0;
 455			unsigned int i;
 456
 457			for (i = 0; i < adev->vcn.num_enc_rings; ++i)
 458				fences += amdgpu_fence_count_emitted(&adev->vcn.inst[ring->me].ring_enc[i]);
 459
 460			if (fences || atomic_read(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt))
 461				new_state.fw_based = VCN_DPG_STATE__PAUSE;
 462			else
 463				new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
 464		}
 465
 466		adev->vcn.pause_dpg_mode(adev, ring->me, &new_state);
 467	}
 468	mutex_unlock(&adev->vcn.vcn_pg_lock);
 469}
 470
 471void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring)
 472{
 473	if (ring->adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
 474		ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
 475		atomic_dec(&ring->adev->vcn.inst[ring->me].dpg_enc_submission_cnt);
 476
 477	atomic_dec(&ring->adev->vcn.total_submission_cnt);
 478
 479	schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
 480}
 481
 482int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
 483{
 484	struct amdgpu_device *adev = ring->adev;
 485	uint32_t tmp = 0;
 486	unsigned int i;
 487	int r;
 488
 489	/* VCN in SRIOV does not support direct register read/write */
 490	if (amdgpu_sriov_vf(adev))
 491		return 0;
 492
 493	WREG32(adev->vcn.inst[ring->me].external.scratch9, 0xCAFEDEAD);
 494	r = amdgpu_ring_alloc(ring, 3);
 495	if (r)
 496		return r;
 497	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.scratch9, 0));
 498	amdgpu_ring_write(ring, 0xDEADBEEF);
 499	amdgpu_ring_commit(ring);
 500	for (i = 0; i < adev->usec_timeout; i++) {
 501		tmp = RREG32(adev->vcn.inst[ring->me].external.scratch9);
 502		if (tmp == 0xDEADBEEF)
 503			break;
 504		udelay(1);
 505	}
 506
 507	if (i >= adev->usec_timeout)
 508		r = -ETIMEDOUT;
 509
 510	return r;
 511}
 512
 513int amdgpu_vcn_dec_sw_ring_test_ring(struct amdgpu_ring *ring)
 514{
 515	struct amdgpu_device *adev = ring->adev;
 516	uint32_t rptr;
 517	unsigned int i;
 518	int r;
 519
 520	if (amdgpu_sriov_vf(adev))
 521		return 0;
 522
 523	r = amdgpu_ring_alloc(ring, 16);
 524	if (r)
 525		return r;
 526
 527	rptr = amdgpu_ring_get_rptr(ring);
 528
 529	amdgpu_ring_write(ring, VCN_DEC_SW_CMD_END);
 530	amdgpu_ring_commit(ring);
 531
 532	for (i = 0; i < adev->usec_timeout; i++) {
 533		if (amdgpu_ring_get_rptr(ring) != rptr)
 534			break;
 535		udelay(1);
 536	}
 537
 538	if (i >= adev->usec_timeout)
 539		r = -ETIMEDOUT;
 540
 541	return r;
 542}
 543
 544static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
 545				   struct amdgpu_ib *ib_msg,
 546				   struct dma_fence **fence)
 547{
 548	u64 addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
 549	struct amdgpu_device *adev = ring->adev;
 550	struct dma_fence *f = NULL;
 551	struct amdgpu_job *job;
 552	struct amdgpu_ib *ib;
 
 553	int i, r;
 554
 555	r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
 556				     64, AMDGPU_IB_POOL_DIRECT,
 557				     &job);
 558	if (r)
 559		goto err;
 560
 561	ib = &job->ibs[0];
 
 562	ib->ptr[0] = PACKET0(adev->vcn.internal.data0, 0);
 563	ib->ptr[1] = addr;
 564	ib->ptr[2] = PACKET0(adev->vcn.internal.data1, 0);
 565	ib->ptr[3] = addr >> 32;
 566	ib->ptr[4] = PACKET0(adev->vcn.internal.cmd, 0);
 567	ib->ptr[5] = 0;
 568	for (i = 6; i < 16; i += 2) {
 569		ib->ptr[i] = PACKET0(adev->vcn.internal.nop, 0);
 570		ib->ptr[i+1] = 0;
 571	}
 572	ib->length_dw = 16;
 573
 574	r = amdgpu_job_submit_direct(job, ring, &f);
 575	if (r)
 576		goto err_free;
 577
 578	amdgpu_ib_free(adev, ib_msg, f);
 
 
 579
 580	if (fence)
 581		*fence = dma_fence_get(f);
 582	dma_fence_put(f);
 583
 584	return 0;
 585
 586err_free:
 587	amdgpu_job_free(job);
 
 588err:
 589	amdgpu_ib_free(adev, ib_msg, f);
 
 590	return r;
 591}
 592
 593static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
 594		struct amdgpu_ib *ib)
 595{
 596	struct amdgpu_device *adev = ring->adev;
 
 597	uint32_t *msg;
 598	int r, i;
 599
 600	memset(ib, 0, sizeof(*ib));
 601	r = amdgpu_ib_get(adev, NULL, AMDGPU_GPU_PAGE_SIZE * 2,
 602			AMDGPU_IB_POOL_DIRECT,
 603			ib);
 604	if (r)
 605		return r;
 606
 607	msg = (uint32_t *)AMDGPU_GPU_PAGE_ALIGN((unsigned long)ib->ptr);
 608	msg[0] = cpu_to_le32(0x00000028);
 609	msg[1] = cpu_to_le32(0x00000038);
 610	msg[2] = cpu_to_le32(0x00000001);
 611	msg[3] = cpu_to_le32(0x00000000);
 612	msg[4] = cpu_to_le32(handle);
 613	msg[5] = cpu_to_le32(0x00000000);
 614	msg[6] = cpu_to_le32(0x00000001);
 615	msg[7] = cpu_to_le32(0x00000028);
 616	msg[8] = cpu_to_le32(0x00000010);
 617	msg[9] = cpu_to_le32(0x00000000);
 618	msg[10] = cpu_to_le32(0x00000007);
 619	msg[11] = cpu_to_le32(0x00000000);
 620	msg[12] = cpu_to_le32(0x00000780);
 621	msg[13] = cpu_to_le32(0x00000440);
 622	for (i = 14; i < 1024; ++i)
 623		msg[i] = cpu_to_le32(0x0);
 624
 625	return 0;
 626}
 627
 628static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
 629					  struct amdgpu_ib *ib)
 630{
 631	struct amdgpu_device *adev = ring->adev;
 
 632	uint32_t *msg;
 633	int r, i;
 634
 635	memset(ib, 0, sizeof(*ib));
 636	r = amdgpu_ib_get(adev, NULL, AMDGPU_GPU_PAGE_SIZE * 2,
 637			AMDGPU_IB_POOL_DIRECT,
 638			ib);
 639	if (r)
 640		return r;
 641
 642	msg = (uint32_t *)AMDGPU_GPU_PAGE_ALIGN((unsigned long)ib->ptr);
 643	msg[0] = cpu_to_le32(0x00000028);
 644	msg[1] = cpu_to_le32(0x00000018);
 645	msg[2] = cpu_to_le32(0x00000000);
 646	msg[3] = cpu_to_le32(0x00000002);
 647	msg[4] = cpu_to_le32(handle);
 648	msg[5] = cpu_to_le32(0x00000000);
 649	for (i = 6; i < 1024; ++i)
 650		msg[i] = cpu_to_le32(0x0);
 651
 652	return 0;
 653}
 654
 655int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 656{
 657	struct dma_fence *fence = NULL;
 658	struct amdgpu_ib ib;
 659	long r;
 660
 661	r = amdgpu_vcn_dec_get_create_msg(ring, 1, &ib);
 662	if (r)
 663		goto error;
 664
 665	r = amdgpu_vcn_dec_send_msg(ring, &ib, NULL);
 666	if (r)
 667		goto error;
 668	r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &ib);
 669	if (r)
 670		goto error;
 671
 672	r = amdgpu_vcn_dec_send_msg(ring, &ib, &fence);
 673	if (r)
 674		goto error;
 675
 676	r = dma_fence_wait_timeout(fence, false, timeout);
 677	if (r == 0)
 678		r = -ETIMEDOUT;
 679	else if (r > 0)
 680		r = 0;
 681
 682	dma_fence_put(fence);
 683error:
 684	return r;
 685}
 686
 687static uint32_t *amdgpu_vcn_unified_ring_ib_header(struct amdgpu_ib *ib,
 688						uint32_t ib_pack_in_dw, bool enc)
 689{
 690	uint32_t *ib_checksum;
 691
 692	ib->ptr[ib->length_dw++] = 0x00000010; /* single queue checksum */
 693	ib->ptr[ib->length_dw++] = 0x30000002;
 694	ib_checksum = &ib->ptr[ib->length_dw++];
 695	ib->ptr[ib->length_dw++] = ib_pack_in_dw;
 696
 697	ib->ptr[ib->length_dw++] = 0x00000010; /* engine info */
 698	ib->ptr[ib->length_dw++] = 0x30000001;
 699	ib->ptr[ib->length_dw++] = enc ? 0x2 : 0x3;
 700	ib->ptr[ib->length_dw++] = ib_pack_in_dw * sizeof(uint32_t);
 701
 702	return ib_checksum;
 703}
 704
 705static void amdgpu_vcn_unified_ring_ib_checksum(uint32_t **ib_checksum,
 706						uint32_t ib_pack_in_dw)
 707{
 708	uint32_t i;
 709	uint32_t checksum = 0;
 710
 711	for (i = 0; i < ib_pack_in_dw; i++)
 712		checksum += *(*ib_checksum + 2 + i);
 713
 714	**ib_checksum = checksum;
 715}
 716
 717static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
 718				      struct amdgpu_ib *ib_msg,
 719				      struct dma_fence **fence)
 720{
 721	struct amdgpu_vcn_decode_buffer *decode_buffer = NULL;
 722	unsigned int ib_size_dw = 64;
 723	struct amdgpu_device *adev = ring->adev;
 724	struct dma_fence *f = NULL;
 725	struct amdgpu_job *job;
 726	struct amdgpu_ib *ib;
 727	uint64_t addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
 728	bool sq = amdgpu_vcn_using_unified_queue(ring);
 729	uint32_t *ib_checksum;
 730	uint32_t ib_pack_in_dw;
 731	int i, r;
 732
 733	if (sq)
 734		ib_size_dw += 8;
 735
 736	r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
 737				     ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
 738				     &job);
 739	if (r)
 740		goto err;
 741
 742	ib = &job->ibs[0];
 743	ib->length_dw = 0;
 744
 745	/* single queue headers */
 746	if (sq) {
 747		ib_pack_in_dw = sizeof(struct amdgpu_vcn_decode_buffer) / sizeof(uint32_t)
 748						+ 4 + 2; /* engine info + decoding ib in dw */
 749		ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, ib_pack_in_dw, false);
 750	}
 751
 752	ib->ptr[ib->length_dw++] = sizeof(struct amdgpu_vcn_decode_buffer) + 8;
 753	ib->ptr[ib->length_dw++] = cpu_to_le32(AMDGPU_VCN_IB_FLAG_DECODE_BUFFER);
 754	decode_buffer = (struct amdgpu_vcn_decode_buffer *)&(ib->ptr[ib->length_dw]);
 755	ib->length_dw += sizeof(struct amdgpu_vcn_decode_buffer) / 4;
 756	memset(decode_buffer, 0, sizeof(struct amdgpu_vcn_decode_buffer));
 757
 758	decode_buffer->valid_buf_flag |= cpu_to_le32(AMDGPU_VCN_CMD_FLAG_MSG_BUFFER);
 759	decode_buffer->msg_buffer_address_hi = cpu_to_le32(addr >> 32);
 760	decode_buffer->msg_buffer_address_lo = cpu_to_le32(addr);
 761
 762	for (i = ib->length_dw; i < ib_size_dw; ++i)
 763		ib->ptr[i] = 0x0;
 764
 765	if (sq)
 766		amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, ib_pack_in_dw);
 767
 768	r = amdgpu_job_submit_direct(job, ring, &f);
 769	if (r)
 770		goto err_free;
 771
 772	amdgpu_ib_free(adev, ib_msg, f);
 773
 774	if (fence)
 775		*fence = dma_fence_get(f);
 776	dma_fence_put(f);
 777
 778	return 0;
 779
 780err_free:
 781	amdgpu_job_free(job);
 782err:
 783	amdgpu_ib_free(adev, ib_msg, f);
 784	return r;
 785}
 786
 787int amdgpu_vcn_dec_sw_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 788{
 789	struct dma_fence *fence = NULL;
 790	struct amdgpu_ib ib;
 791	long r;
 792
 793	r = amdgpu_vcn_dec_get_create_msg(ring, 1, &ib);
 794	if (r)
 795		goto error;
 796
 797	r = amdgpu_vcn_dec_sw_send_msg(ring, &ib, NULL);
 798	if (r)
 799		goto error;
 800	r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &ib);
 801	if (r)
 802		goto error;
 803
 804	r = amdgpu_vcn_dec_sw_send_msg(ring, &ib, &fence);
 805	if (r)
 806		goto error;
 807
 808	r = dma_fence_wait_timeout(fence, false, timeout);
 809	if (r == 0)
 810		r = -ETIMEDOUT;
 811	else if (r > 0)
 812		r = 0;
 813
 814	dma_fence_put(fence);
 815error:
 816	return r;
 817}
 818
 819int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
 820{
 821	struct amdgpu_device *adev = ring->adev;
 822	uint32_t rptr;
 823	unsigned int i;
 824	int r;
 825
 826	if (amdgpu_sriov_vf(adev))
 827		return 0;
 828
 829	r = amdgpu_ring_alloc(ring, 16);
 830	if (r)
 831		return r;
 832
 833	rptr = amdgpu_ring_get_rptr(ring);
 834
 835	amdgpu_ring_write(ring, VCN_ENC_CMD_END);
 836	amdgpu_ring_commit(ring);
 837
 838	for (i = 0; i < adev->usec_timeout; i++) {
 839		if (amdgpu_ring_get_rptr(ring) != rptr)
 840			break;
 841		udelay(1);
 842	}
 843
 844	if (i >= adev->usec_timeout)
 845		r = -ETIMEDOUT;
 846
 847	return r;
 848}
 849
 850static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
 851					 struct amdgpu_ib *ib_msg,
 852					 struct dma_fence **fence)
 853{
 854	unsigned int ib_size_dw = 16;
 855	struct amdgpu_job *job;
 856	struct amdgpu_ib *ib;
 857	struct dma_fence *f = NULL;
 858	uint32_t *ib_checksum = NULL;
 859	uint64_t addr;
 860	bool sq = amdgpu_vcn_using_unified_queue(ring);
 861	int i, r;
 862
 863	if (sq)
 864		ib_size_dw += 8;
 865
 866	r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
 867				     ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
 868				     &job);
 869	if (r)
 870		return r;
 871
 872	ib = &job->ibs[0];
 873	addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
 874
 875	ib->length_dw = 0;
 876
 877	if (sq)
 878		ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, 0x11, true);
 879
 880	ib->ptr[ib->length_dw++] = 0x00000018;
 881	ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
 882	ib->ptr[ib->length_dw++] = handle;
 883	ib->ptr[ib->length_dw++] = upper_32_bits(addr);
 884	ib->ptr[ib->length_dw++] = addr;
 885	ib->ptr[ib->length_dw++] = 0x0000000b;
 886
 887	ib->ptr[ib->length_dw++] = 0x00000014;
 888	ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
 889	ib->ptr[ib->length_dw++] = 0x0000001c;
 890	ib->ptr[ib->length_dw++] = 0x00000000;
 891	ib->ptr[ib->length_dw++] = 0x00000000;
 892
 893	ib->ptr[ib->length_dw++] = 0x00000008;
 894	ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */
 895
 896	for (i = ib->length_dw; i < ib_size_dw; ++i)
 897		ib->ptr[i] = 0x0;
 898
 899	if (sq)
 900		amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, 0x11);
 901
 902	r = amdgpu_job_submit_direct(job, ring, &f);
 903	if (r)
 904		goto err;
 905
 906	if (fence)
 907		*fence = dma_fence_get(f);
 908	dma_fence_put(f);
 909
 910	return 0;
 911
 912err:
 913	amdgpu_job_free(job);
 914	return r;
 915}
 916
 917static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
 918					  struct amdgpu_ib *ib_msg,
 919					  struct dma_fence **fence)
 920{
 921	unsigned int ib_size_dw = 16;
 922	struct amdgpu_job *job;
 923	struct amdgpu_ib *ib;
 924	struct dma_fence *f = NULL;
 925	uint32_t *ib_checksum = NULL;
 926	uint64_t addr;
 927	bool sq = amdgpu_vcn_using_unified_queue(ring);
 928	int i, r;
 929
 930	if (sq)
 931		ib_size_dw += 8;
 932
 933	r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
 934				     ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
 935				     &job);
 936	if (r)
 937		return r;
 938
 939	ib = &job->ibs[0];
 940	addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
 941
 942	ib->length_dw = 0;
 943
 944	if (sq)
 945		ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, 0x11, true);
 946
 947	ib->ptr[ib->length_dw++] = 0x00000018;
 948	ib->ptr[ib->length_dw++] = 0x00000001;
 949	ib->ptr[ib->length_dw++] = handle;
 950	ib->ptr[ib->length_dw++] = upper_32_bits(addr);
 951	ib->ptr[ib->length_dw++] = addr;
 952	ib->ptr[ib->length_dw++] = 0x0000000b;
 953
 954	ib->ptr[ib->length_dw++] = 0x00000014;
 955	ib->ptr[ib->length_dw++] = 0x00000002;
 956	ib->ptr[ib->length_dw++] = 0x0000001c;
 957	ib->ptr[ib->length_dw++] = 0x00000000;
 958	ib->ptr[ib->length_dw++] = 0x00000000;
 959
 960	ib->ptr[ib->length_dw++] = 0x00000008;
 961	ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */
 962
 963	for (i = ib->length_dw; i < ib_size_dw; ++i)
 964		ib->ptr[i] = 0x0;
 965
 966	if (sq)
 967		amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, 0x11);
 968
 969	r = amdgpu_job_submit_direct(job, ring, &f);
 970	if (r)
 971		goto err;
 972
 973	if (fence)
 974		*fence = dma_fence_get(f);
 975	dma_fence_put(f);
 976
 977	return 0;
 978
 979err:
 980	amdgpu_job_free(job);
 981	return r;
 982}
 983
 984int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 985{
 986	struct amdgpu_device *adev = ring->adev;
 987	struct dma_fence *fence = NULL;
 988	struct amdgpu_ib ib;
 989	long r;
 990
 991	memset(&ib, 0, sizeof(ib));
 992	r = amdgpu_ib_get(adev, NULL, (128 << 10) + AMDGPU_GPU_PAGE_SIZE,
 993			AMDGPU_IB_POOL_DIRECT,
 994			&ib);
 995	if (r)
 996		return r;
 997
 998	r = amdgpu_vcn_enc_get_create_msg(ring, 1, &ib, NULL);
 999	if (r)
1000		goto error;
1001
1002	r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, &ib, &fence);
1003	if (r)
1004		goto error;
1005
1006	r = dma_fence_wait_timeout(fence, false, timeout);
1007	if (r == 0)
1008		r = -ETIMEDOUT;
1009	else if (r > 0)
1010		r = 0;
1011
1012error:
1013	amdgpu_ib_free(adev, &ib, fence);
1014	dma_fence_put(fence);
1015
1016	return r;
1017}
1018
1019int amdgpu_vcn_unified_ring_test_ib(struct amdgpu_ring *ring, long timeout)
1020{
1021	struct amdgpu_device *adev = ring->adev;
1022	long r;
1023
1024	if (amdgpu_ip_version(adev, UVD_HWIP, 0) != IP_VERSION(4, 0, 3)) {
1025		r = amdgpu_vcn_enc_ring_test_ib(ring, timeout);
1026		if (r)
1027			goto error;
1028	}
1029
1030	r =  amdgpu_vcn_dec_sw_ring_test_ib(ring, timeout);
1031
1032error:
1033	return r;
1034}
1035
1036enum amdgpu_ring_priority_level amdgpu_vcn_get_enc_ring_prio(int ring)
1037{
1038	switch (ring) {
1039	case 0:
1040		return AMDGPU_RING_PRIO_0;
1041	case 1:
1042		return AMDGPU_RING_PRIO_1;
1043	case 2:
1044		return AMDGPU_RING_PRIO_2;
1045	default:
1046		return AMDGPU_RING_PRIO_0;
1047	}
1048}
1049
1050void amdgpu_vcn_setup_ucode(struct amdgpu_device *adev)
1051{
1052	int i;
1053	unsigned int idx;
1054
1055	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1056		const struct common_firmware_header *hdr;
1057
1058		for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
1059			if (adev->vcn.harvest_config & (1 << i))
1060				continue;
1061
1062			hdr = (const struct common_firmware_header *)adev->vcn.fw[i]->data;
1063			/* currently only support 2 FW instances */
1064			if (i >= 2) {
1065				dev_info(adev->dev, "More then 2 VCN FW instances!\n");
1066				break;
1067			}
1068			idx = AMDGPU_UCODE_ID_VCN + i;
1069			adev->firmware.ucode[idx].ucode_id = idx;
1070			adev->firmware.ucode[idx].fw = adev->vcn.fw[i];
1071			adev->firmware.fw_size +=
1072				ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
1073
1074			if (amdgpu_ip_version(adev, UVD_HWIP, 0) ==
1075			    IP_VERSION(4, 0, 3))
1076				break;
1077		}
1078		dev_info(adev->dev, "Will use PSP to load VCN firmware\n");
1079	}
1080}
1081
1082/*
1083 * debugfs for mapping vcn firmware log buffer.
1084 */
1085#if defined(CONFIG_DEBUG_FS)
1086static ssize_t amdgpu_debugfs_vcn_fwlog_read(struct file *f, char __user *buf,
1087					     size_t size, loff_t *pos)
1088{
1089	struct amdgpu_vcn_inst *vcn;
1090	void *log_buf;
1091	volatile struct amdgpu_vcn_fwlog *plog;
1092	unsigned int read_pos, write_pos, available, i, read_bytes = 0;
1093	unsigned int read_num[2] = {0};
1094
1095	vcn = file_inode(f)->i_private;
1096	if (!vcn)
1097		return -ENODEV;
1098
1099	if (!vcn->fw_shared.cpu_addr || !amdgpu_vcnfw_log)
1100		return -EFAULT;
1101
1102	log_buf = vcn->fw_shared.cpu_addr + vcn->fw_shared.mem_size;
1103
1104	plog = (volatile struct amdgpu_vcn_fwlog *)log_buf;
1105	read_pos = plog->rptr;
1106	write_pos = plog->wptr;
1107
1108	if (read_pos > AMDGPU_VCNFW_LOG_SIZE || write_pos > AMDGPU_VCNFW_LOG_SIZE)
1109		return -EFAULT;
1110
1111	if (!size || (read_pos == write_pos))
1112		return 0;
1113
1114	if (write_pos > read_pos) {
1115		available = write_pos - read_pos;
1116		read_num[0] = min_t(size_t, size, available);
1117	} else {
1118		read_num[0] = AMDGPU_VCNFW_LOG_SIZE - read_pos;
1119		available = read_num[0] + write_pos - plog->header_size;
1120		if (size > available)
1121			read_num[1] = write_pos - plog->header_size;
1122		else if (size > read_num[0])
1123			read_num[1] = size - read_num[0];
1124		else
1125			read_num[0] = size;
1126	}
1127
1128	for (i = 0; i < 2; i++) {
1129		if (read_num[i]) {
1130			if (read_pos == AMDGPU_VCNFW_LOG_SIZE)
1131				read_pos = plog->header_size;
1132			if (read_num[i] == copy_to_user((buf + read_bytes),
1133							(log_buf + read_pos), read_num[i]))
1134				return -EFAULT;
1135
1136			read_bytes += read_num[i];
1137			read_pos += read_num[i];
1138		}
1139	}
1140
1141	plog->rptr = read_pos;
1142	*pos += read_bytes;
1143	return read_bytes;
1144}
1145
1146static const struct file_operations amdgpu_debugfs_vcnfwlog_fops = {
1147	.owner = THIS_MODULE,
1148	.read = amdgpu_debugfs_vcn_fwlog_read,
1149	.llseek = default_llseek
1150};
1151#endif
1152
1153void amdgpu_debugfs_vcn_fwlog_init(struct amdgpu_device *adev, uint8_t i,
1154				   struct amdgpu_vcn_inst *vcn)
1155{
1156#if defined(CONFIG_DEBUG_FS)
1157	struct drm_minor *minor = adev_to_drm(adev)->primary;
1158	struct dentry *root = minor->debugfs_root;
1159	char name[32];
1160
1161	sprintf(name, "amdgpu_vcn_%d_fwlog", i);
1162	debugfs_create_file_size(name, S_IFREG | 0444, root, vcn,
1163				 &amdgpu_debugfs_vcnfwlog_fops,
1164				 AMDGPU_VCNFW_LOG_SIZE);
1165#endif
1166}
1167
1168void amdgpu_vcn_fwlog_init(struct amdgpu_vcn_inst *vcn)
1169{
1170#if defined(CONFIG_DEBUG_FS)
1171	volatile uint32_t *flag = vcn->fw_shared.cpu_addr;
1172	void *fw_log_cpu_addr = vcn->fw_shared.cpu_addr + vcn->fw_shared.mem_size;
1173	uint64_t fw_log_gpu_addr = vcn->fw_shared.gpu_addr + vcn->fw_shared.mem_size;
1174	volatile struct amdgpu_vcn_fwlog *log_buf = fw_log_cpu_addr;
1175	volatile struct amdgpu_fw_shared_fw_logging *fw_log = vcn->fw_shared.cpu_addr
1176							 + vcn->fw_shared.log_offset;
1177	*flag |= cpu_to_le32(AMDGPU_VCN_FW_LOGGING_FLAG);
1178	fw_log->is_enabled = 1;
1179	fw_log->addr_lo = cpu_to_le32(fw_log_gpu_addr & 0xFFFFFFFF);
1180	fw_log->addr_hi = cpu_to_le32(fw_log_gpu_addr >> 32);
1181	fw_log->size = cpu_to_le32(AMDGPU_VCNFW_LOG_SIZE);
1182
1183	log_buf->header_size = sizeof(struct amdgpu_vcn_fwlog);
1184	log_buf->buffer_size = AMDGPU_VCNFW_LOG_SIZE;
1185	log_buf->rptr = log_buf->header_size;
1186	log_buf->wptr = log_buf->header_size;
1187	log_buf->wrapped = 0;
1188#endif
1189}
1190
1191int amdgpu_vcn_process_poison_irq(struct amdgpu_device *adev,
1192				struct amdgpu_irq_src *source,
1193				struct amdgpu_iv_entry *entry)
1194{
1195	struct ras_common_if *ras_if = adev->vcn.ras_if;
1196	struct ras_dispatch_if ih_data = {
1197		.entry = entry,
1198	};
1199
1200	if (!ras_if)
1201		return 0;
1202
1203	if (!amdgpu_sriov_vf(adev)) {
1204		ih_data.head = *ras_if;
1205		amdgpu_ras_interrupt_dispatch(adev, &ih_data);
1206	} else {
1207		if (adev->virt.ops && adev->virt.ops->ras_poison_handler)
1208			adev->virt.ops->ras_poison_handler(adev, ras_if->block);
1209		else
1210			dev_warn(adev->dev,
1211				"No ras_poison_handler interface in SRIOV for VCN!\n");
1212	}
1213
1214	return 0;
1215}
1216
1217int amdgpu_vcn_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
1218{
1219	int r, i;
1220
1221	r = amdgpu_ras_block_late_init(adev, ras_block);
1222	if (r)
1223		return r;
1224
1225	if (amdgpu_ras_is_supported(adev, ras_block->block)) {
1226		for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
1227			if (adev->vcn.harvest_config & (1 << i) ||
1228			    !adev->vcn.inst[i].ras_poison_irq.funcs)
1229				continue;
1230
1231			r = amdgpu_irq_get(adev, &adev->vcn.inst[i].ras_poison_irq, 0);
1232			if (r)
1233				goto late_fini;
1234		}
1235	}
1236	return 0;
1237
1238late_fini:
1239	amdgpu_ras_block_late_fini(adev, ras_block);
1240	return r;
1241}
1242
1243int amdgpu_vcn_ras_sw_init(struct amdgpu_device *adev)
1244{
1245	int err;
1246	struct amdgpu_vcn_ras *ras;
1247
1248	if (!adev->vcn.ras)
1249		return 0;
1250
1251	ras = adev->vcn.ras;
1252	err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
1253	if (err) {
1254		dev_err(adev->dev, "Failed to register vcn ras block!\n");
1255		return err;
1256	}
1257
1258	strcpy(ras->ras_block.ras_comm.name, "vcn");
1259	ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__VCN;
1260	ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__POISON;
1261	adev->vcn.ras_if = &ras->ras_block.ras_comm;
1262
1263	if (!ras->ras_block.ras_late_init)
1264		ras->ras_block.ras_late_init = amdgpu_vcn_ras_late_init;
1265
1266	return 0;
1267}
1268
1269int amdgpu_vcn_psp_update_sram(struct amdgpu_device *adev, int inst_idx,
1270			       enum AMDGPU_UCODE_ID ucode_id)
1271{
1272	struct amdgpu_firmware_info ucode = {
1273		.ucode_id = (ucode_id ? ucode_id :
1274			    (inst_idx ? AMDGPU_UCODE_ID_VCN1_RAM :
1275					AMDGPU_UCODE_ID_VCN0_RAM)),
1276		.mc_addr = adev->vcn.inst[inst_idx].dpg_sram_gpu_addr,
1277		.ucode_size = ((uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_curr_addr -
1278			      (uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr),
1279	};
1280
1281	return psp_execute_ip_fw_load(&adev->psp, &ucode);
1282}
v5.9
  1/*
  2 * Copyright 2016 Advanced Micro Devices, Inc.
  3 * All Rights Reserved.
  4 *
  5 * Permission is hereby granted, free of charge, to any person obtaining a
  6 * copy of this software and associated documentation files (the
  7 * "Software"), to deal in the Software without restriction, including
  8 * without limitation the rights to use, copy, modify, merge, publish,
  9 * distribute, sub license, and/or sell copies of the Software, and to
 10 * permit persons to whom the Software is furnished to do so, subject to
 11 * the following conditions:
 12 *
 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 20 *
 21 * The above copyright notice and this permission notice (including the
 22 * next paragraph) shall be included in all copies or substantial portions
 23 * of the Software.
 24 *
 25 */
 26
 27#include <linux/firmware.h>
 28#include <linux/module.h>
 
 29#include <linux/pci.h>
 
 
 30
 31#include "amdgpu.h"
 32#include "amdgpu_pm.h"
 33#include "amdgpu_vcn.h"
 34#include "soc15d.h"
 35
 36/* Firmware Names */
 37#define FIRMWARE_RAVEN		"amdgpu/raven_vcn.bin"
 38#define FIRMWARE_PICASSO	"amdgpu/picasso_vcn.bin"
 39#define FIRMWARE_RAVEN2		"amdgpu/raven2_vcn.bin"
 40#define FIRMWARE_ARCTURUS 	"amdgpu/arcturus_vcn.bin"
 41#define FIRMWARE_RENOIR 	"amdgpu/renoir_vcn.bin"
 42#define FIRMWARE_NAVI10 	"amdgpu/navi10_vcn.bin"
 43#define FIRMWARE_NAVI14 	"amdgpu/navi14_vcn.bin"
 44#define FIRMWARE_NAVI12 	"amdgpu/navi12_vcn.bin"
 45#define FIRMWARE_SIENNA_CICHLID 	"amdgpu/sienna_cichlid_vcn.bin"
 46#define FIRMWARE_NAVY_FLOUNDER 	"amdgpu/navy_flounder_vcn.bin"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 47
 48MODULE_FIRMWARE(FIRMWARE_RAVEN);
 49MODULE_FIRMWARE(FIRMWARE_PICASSO);
 50MODULE_FIRMWARE(FIRMWARE_RAVEN2);
 51MODULE_FIRMWARE(FIRMWARE_ARCTURUS);
 52MODULE_FIRMWARE(FIRMWARE_RENOIR);
 
 
 53MODULE_FIRMWARE(FIRMWARE_NAVI10);
 54MODULE_FIRMWARE(FIRMWARE_NAVI14);
 55MODULE_FIRMWARE(FIRMWARE_NAVI12);
 56MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID);
 57MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER);
 
 
 
 
 
 
 
 
 
 
 
 
 
 58
 59static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
 60
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 61int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
 62{
 63	unsigned long bo_size;
 64	const char *fw_name;
 65	const struct common_firmware_header *hdr;
 66	unsigned char fw_check;
 
 67	int i, r;
 68
 69	INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler);
 70	mutex_init(&adev->vcn.vcn_pg_lock);
 
 71	atomic_set(&adev->vcn.total_submission_cnt, 0);
 72	for (i = 0; i < adev->vcn.num_vcn_inst; i++)
 73		atomic_set(&adev->vcn.inst[i].dpg_enc_submission_cnt, 0);
 74
 75	switch (adev->asic_type) {
 76	case CHIP_RAVEN:
 77		if (adev->apu_flags & AMD_APU_IS_RAVEN2)
 78			fw_name = FIRMWARE_RAVEN2;
 79		else if (adev->apu_flags & AMD_APU_IS_PICASSO)
 80			fw_name = FIRMWARE_PICASSO;
 81		else
 82			fw_name = FIRMWARE_RAVEN;
 83		break;
 84	case CHIP_ARCTURUS:
 85		fw_name = FIRMWARE_ARCTURUS;
 86		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
 87		    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
 88			adev->vcn.indirect_sram = true;
 89		break;
 90	case CHIP_RENOIR:
 91		fw_name = FIRMWARE_RENOIR;
 92		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
 93		    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
 94			adev->vcn.indirect_sram = true;
 95		break;
 96	case CHIP_NAVI10:
 97		fw_name = FIRMWARE_NAVI10;
 98		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
 99		    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
100			adev->vcn.indirect_sram = true;
101		break;
102	case CHIP_NAVI14:
103		fw_name = FIRMWARE_NAVI14;
104		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
105		    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
106			adev->vcn.indirect_sram = true;
107		break;
108	case CHIP_NAVI12:
109		fw_name = FIRMWARE_NAVI12;
110		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
111		    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
112			adev->vcn.indirect_sram = true;
113		break;
114	case CHIP_SIENNA_CICHLID:
115		fw_name = FIRMWARE_SIENNA_CICHLID;
116		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
117		    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
118			adev->vcn.indirect_sram = true;
119		break;
120	case CHIP_NAVY_FLOUNDER:
121		fw_name = FIRMWARE_NAVY_FLOUNDER;
122		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
123		    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
124			adev->vcn.indirect_sram = true;
125		break;
126	default:
127		return -EINVAL;
128	}
129
130	r = request_firmware(&adev->vcn.fw, fw_name, adev->dev);
131	if (r) {
132		dev_err(adev->dev, "amdgpu_vcn: Can't load firmware \"%s\"\n",
133			fw_name);
134		return r;
135	}
136
137	r = amdgpu_ucode_validate(adev->vcn.fw);
138	if (r) {
139		dev_err(adev->dev, "amdgpu_vcn: Can't validate firmware \"%s\"\n",
140			fw_name);
141		release_firmware(adev->vcn.fw);
142		adev->vcn.fw = NULL;
143		return r;
144	}
145
146	hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
147	adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version);
148
149	/* Bit 20-23, it is encode major and non-zero for new naming convention.
150	 * This field is part of version minor and DRM_DISABLED_FLAG in old naming
151	 * convention. Since the l:wq!atest version minor is 0x5B and DRM_DISABLED_FLAG
152	 * is zero in old naming convention, this field is always zero so far.
153	 * These four bits are used to tell which naming convention is present.
154	 */
155	fw_check = (le32_to_cpu(hdr->ucode_version) >> 20) & 0xf;
156	if (fw_check) {
157		unsigned int dec_ver, enc_major, enc_minor, vep, fw_rev;
158
159		fw_rev = le32_to_cpu(hdr->ucode_version) & 0xfff;
160		enc_minor = (le32_to_cpu(hdr->ucode_version) >> 12) & 0xff;
161		enc_major = fw_check;
162		dec_ver = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xf;
163		vep = (le32_to_cpu(hdr->ucode_version) >> 28) & 0xf;
164		DRM_INFO("Found VCN firmware Version ENC: %hu.%hu DEC: %hu VEP: %hu Revision: %hu\n",
165			enc_major, enc_minor, dec_ver, vep, fw_rev);
166	} else {
167		unsigned int version_major, version_minor, family_id;
168
169		family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
170		version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
171		version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
172		DRM_INFO("Found VCN firmware Version: %hu.%hu Family ID: %hu\n",
173			version_major, version_minor, family_id);
174	}
175
176	bo_size = AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_CONTEXT_SIZE;
177	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
178		bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
179	bo_size += AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared));
 
 
 
 
 
 
 
 
 
 
 
 
180
181	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
182		if (adev->vcn.harvest_config & (1 << i))
183			continue;
184
185		r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
186						AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.inst[i].vcpu_bo,
187						&adev->vcn.inst[i].gpu_addr, &adev->vcn.inst[i].cpu_addr);
 
 
 
188		if (r) {
189			dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", r);
190			return r;
191		}
192
193		adev->vcn.inst[i].fw_shared_cpu_addr = adev->vcn.inst[i].cpu_addr +
194				bo_size - AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared));
195		adev->vcn.inst[i].fw_shared_gpu_addr = adev->vcn.inst[i].gpu_addr +
196				bo_size - AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared));
 
 
 
 
 
 
 
 
197
198		if (adev->vcn.indirect_sram) {
199			r = amdgpu_bo_create_kernel(adev, 64 * 2 * 4, PAGE_SIZE,
200					AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.inst[i].dpg_sram_bo,
201					&adev->vcn.inst[i].dpg_sram_gpu_addr, &adev->vcn.inst[i].dpg_sram_cpu_addr);
 
 
 
202			if (r) {
203				dev_err(adev->dev, "VCN %d (%d) failed to allocate DPG bo\n", i, r);
204				return r;
205			}
206		}
207	}
208
209	return 0;
210}
211
212int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
213{
214	int i, j;
215
216	cancel_delayed_work_sync(&adev->vcn.idle_work);
217
218	for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
219		if (adev->vcn.harvest_config & (1 << j))
220			continue;
221
222		if (adev->vcn.indirect_sram) {
223			amdgpu_bo_free_kernel(&adev->vcn.inst[j].dpg_sram_bo,
224						  &adev->vcn.inst[j].dpg_sram_gpu_addr,
225						  (void **)&adev->vcn.inst[j].dpg_sram_cpu_addr);
226		}
227		kvfree(adev->vcn.inst[j].saved_bo);
228
229		amdgpu_bo_free_kernel(&adev->vcn.inst[j].vcpu_bo,
230					  &adev->vcn.inst[j].gpu_addr,
231					  (void **)&adev->vcn.inst[j].cpu_addr);
232
233		amdgpu_ring_fini(&adev->vcn.inst[j].ring_dec);
234
235		for (i = 0; i < adev->vcn.num_enc_rings; ++i)
236			amdgpu_ring_fini(&adev->vcn.inst[j].ring_enc[i]);
 
 
237	}
238
239	release_firmware(adev->vcn.fw);
240	mutex_destroy(&adev->vcn.vcn_pg_lock);
241
242	return 0;
243}
244
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
245int amdgpu_vcn_suspend(struct amdgpu_device *adev)
246{
247	unsigned size;
248	void *ptr;
249	int i;
 
 
250
251	cancel_delayed_work_sync(&adev->vcn.idle_work);
252
 
 
 
 
 
253	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
254		if (adev->vcn.harvest_config & (1 << i))
255			continue;
256		if (adev->vcn.inst[i].vcpu_bo == NULL)
257			return 0;
258
259		size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo);
260		ptr = adev->vcn.inst[i].cpu_addr;
261
262		adev->vcn.inst[i].saved_bo = kvmalloc(size, GFP_KERNEL);
263		if (!adev->vcn.inst[i].saved_bo)
264			return -ENOMEM;
265
266		memcpy_fromio(adev->vcn.inst[i].saved_bo, ptr, size);
 
 
 
267	}
268	return 0;
269}
270
271int amdgpu_vcn_resume(struct amdgpu_device *adev)
272{
273	unsigned size;
274	void *ptr;
275	int i;
276
277	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
278		if (adev->vcn.harvest_config & (1 << i))
279			continue;
280		if (adev->vcn.inst[i].vcpu_bo == NULL)
281			return -EINVAL;
282
283		size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo);
284		ptr = adev->vcn.inst[i].cpu_addr;
285
286		if (adev->vcn.inst[i].saved_bo != NULL) {
287			memcpy_toio(ptr, adev->vcn.inst[i].saved_bo, size);
 
 
 
288			kvfree(adev->vcn.inst[i].saved_bo);
289			adev->vcn.inst[i].saved_bo = NULL;
290		} else {
291			const struct common_firmware_header *hdr;
292			unsigned offset;
293
294			hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
295			if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
296				offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
297				memcpy_toio(adev->vcn.inst[i].cpu_addr, adev->vcn.fw->data + offset,
298					    le32_to_cpu(hdr->ucode_size_bytes));
 
 
 
 
299				size -= le32_to_cpu(hdr->ucode_size_bytes);
300				ptr += le32_to_cpu(hdr->ucode_size_bytes);
301			}
302			memset_io(ptr, 0, size);
303		}
304	}
305	return 0;
306}
307
308static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
309{
310	struct amdgpu_device *adev =
311		container_of(work, struct amdgpu_device, vcn.idle_work.work);
312	unsigned int fences = 0, fence[AMDGPU_MAX_VCN_INSTANCES] = {0};
313	unsigned int i, j;
 
314
315	for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
316		if (adev->vcn.harvest_config & (1 << j))
317			continue;
318
319		for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
320			fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_enc[i]);
321		}
322
323		if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)	{
324			struct dpg_pause_state new_state;
325
326			if (fence[j] ||
327				unlikely(atomic_read(&adev->vcn.inst[j].dpg_enc_submission_cnt)))
328				new_state.fw_based = VCN_DPG_STATE__PAUSE;
329			else
330				new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
331
332			adev->vcn.pause_dpg_mode(adev, j, &new_state);
333		}
334
335		fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_dec);
336		fences += fence[j];
337	}
338
339	if (!fences && !atomic_read(&adev->vcn.total_submission_cnt)) {
340		amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
341		       AMD_PG_STATE_GATE);
 
 
 
 
342	} else {
343		schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
344	}
345}
346
347void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
348{
349	struct amdgpu_device *adev = ring->adev;
 
350
351	atomic_inc(&adev->vcn.total_submission_cnt);
352	cancel_delayed_work_sync(&adev->vcn.idle_work);
 
 
 
 
 
 
353
354	mutex_lock(&adev->vcn.vcn_pg_lock);
355	amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
356	       AMD_PG_STATE_UNGATE);
357
358	if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)	{
359		struct dpg_pause_state new_state;
360
361		if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC) {
362			atomic_inc(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt);
363			new_state.fw_based = VCN_DPG_STATE__PAUSE;
364		} else {
365			unsigned int fences = 0;
366			unsigned int i;
367
368			for (i = 0; i < adev->vcn.num_enc_rings; ++i)
369				fences += amdgpu_fence_count_emitted(&adev->vcn.inst[ring->me].ring_enc[i]);
370
371			if (fences || atomic_read(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt))
372				new_state.fw_based = VCN_DPG_STATE__PAUSE;
373			else
374				new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
375		}
376
377		adev->vcn.pause_dpg_mode(adev, ring->me, &new_state);
378	}
379	mutex_unlock(&adev->vcn.vcn_pg_lock);
380}
381
382void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring)
383{
384	if (ring->adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
385		ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
386		atomic_dec(&ring->adev->vcn.inst[ring->me].dpg_enc_submission_cnt);
387
388	atomic_dec(&ring->adev->vcn.total_submission_cnt);
389
390	schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
391}
392
393int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
394{
395	struct amdgpu_device *adev = ring->adev;
396	uint32_t tmp = 0;
397	unsigned i;
398	int r;
399
400	/* VCN in SRIOV does not support direct register read/write */
401	if (amdgpu_sriov_vf(adev))
402		return 0;
403
404	WREG32(adev->vcn.inst[ring->me].external.scratch9, 0xCAFEDEAD);
405	r = amdgpu_ring_alloc(ring, 3);
406	if (r)
407		return r;
408	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.scratch9, 0));
409	amdgpu_ring_write(ring, 0xDEADBEEF);
410	amdgpu_ring_commit(ring);
411	for (i = 0; i < adev->usec_timeout; i++) {
412		tmp = RREG32(adev->vcn.inst[ring->me].external.scratch9);
413		if (tmp == 0xDEADBEEF)
414			break;
415		udelay(1);
416	}
417
418	if (i >= adev->usec_timeout)
419		r = -ETIMEDOUT;
420
421	return r;
422}
423
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
424static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
425				   struct amdgpu_bo *bo,
426				   struct dma_fence **fence)
427{
 
428	struct amdgpu_device *adev = ring->adev;
429	struct dma_fence *f = NULL;
430	struct amdgpu_job *job;
431	struct amdgpu_ib *ib;
432	uint64_t addr;
433	int i, r;
434
435	r = amdgpu_job_alloc_with_ib(adev, 64,
436					AMDGPU_IB_POOL_DIRECT, &job);
 
437	if (r)
438		goto err;
439
440	ib = &job->ibs[0];
441	addr = amdgpu_bo_gpu_offset(bo);
442	ib->ptr[0] = PACKET0(adev->vcn.internal.data0, 0);
443	ib->ptr[1] = addr;
444	ib->ptr[2] = PACKET0(adev->vcn.internal.data1, 0);
445	ib->ptr[3] = addr >> 32;
446	ib->ptr[4] = PACKET0(adev->vcn.internal.cmd, 0);
447	ib->ptr[5] = 0;
448	for (i = 6; i < 16; i += 2) {
449		ib->ptr[i] = PACKET0(adev->vcn.internal.nop, 0);
450		ib->ptr[i+1] = 0;
451	}
452	ib->length_dw = 16;
453
454	r = amdgpu_job_submit_direct(job, ring, &f);
455	if (r)
456		goto err_free;
457
458	amdgpu_bo_fence(bo, f, false);
459	amdgpu_bo_unreserve(bo);
460	amdgpu_bo_unref(&bo);
461
462	if (fence)
463		*fence = dma_fence_get(f);
464	dma_fence_put(f);
465
466	return 0;
467
468err_free:
469	amdgpu_job_free(job);
470
471err:
472	amdgpu_bo_unreserve(bo);
473	amdgpu_bo_unref(&bo);
474	return r;
475}
476
477static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
478			      struct dma_fence **fence)
479{
480	struct amdgpu_device *adev = ring->adev;
481	struct amdgpu_bo *bo = NULL;
482	uint32_t *msg;
483	int r, i;
484
485	r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
486				      AMDGPU_GEM_DOMAIN_VRAM,
487				      &bo, NULL, (void **)&msg);
 
488	if (r)
489		return r;
490
 
491	msg[0] = cpu_to_le32(0x00000028);
492	msg[1] = cpu_to_le32(0x00000038);
493	msg[2] = cpu_to_le32(0x00000001);
494	msg[3] = cpu_to_le32(0x00000000);
495	msg[4] = cpu_to_le32(handle);
496	msg[5] = cpu_to_le32(0x00000000);
497	msg[6] = cpu_to_le32(0x00000001);
498	msg[7] = cpu_to_le32(0x00000028);
499	msg[8] = cpu_to_le32(0x00000010);
500	msg[9] = cpu_to_le32(0x00000000);
501	msg[10] = cpu_to_le32(0x00000007);
502	msg[11] = cpu_to_le32(0x00000000);
503	msg[12] = cpu_to_le32(0x00000780);
504	msg[13] = cpu_to_le32(0x00000440);
505	for (i = 14; i < 1024; ++i)
506		msg[i] = cpu_to_le32(0x0);
507
508	return amdgpu_vcn_dec_send_msg(ring, bo, fence);
509}
510
511static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
512			       struct dma_fence **fence)
513{
514	struct amdgpu_device *adev = ring->adev;
515	struct amdgpu_bo *bo = NULL;
516	uint32_t *msg;
517	int r, i;
518
519	r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
520				      AMDGPU_GEM_DOMAIN_VRAM,
521				      &bo, NULL, (void **)&msg);
 
522	if (r)
523		return r;
524
 
525	msg[0] = cpu_to_le32(0x00000028);
526	msg[1] = cpu_to_le32(0x00000018);
527	msg[2] = cpu_to_le32(0x00000000);
528	msg[3] = cpu_to_le32(0x00000002);
529	msg[4] = cpu_to_le32(handle);
530	msg[5] = cpu_to_le32(0x00000000);
531	for (i = 6; i < 1024; ++i)
532		msg[i] = cpu_to_le32(0x0);
533
534	return amdgpu_vcn_dec_send_msg(ring, bo, fence);
535}
536
537int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
538{
539	struct dma_fence *fence;
 
540	long r;
541
542	r = amdgpu_vcn_dec_get_create_msg(ring, 1, NULL);
543	if (r)
544		goto error;
545
546	r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &fence);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
547	if (r)
548		goto error;
549
550	r = dma_fence_wait_timeout(fence, false, timeout);
551	if (r == 0)
552		r = -ETIMEDOUT;
553	else if (r > 0)
554		r = 0;
555
556	dma_fence_put(fence);
557error:
558	return r;
559}
560
561int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
562{
563	struct amdgpu_device *adev = ring->adev;
564	uint32_t rptr;
565	unsigned i;
566	int r;
567
568	if (amdgpu_sriov_vf(adev))
569		return 0;
570
571	r = amdgpu_ring_alloc(ring, 16);
572	if (r)
573		return r;
574
575	rptr = amdgpu_ring_get_rptr(ring);
576
577	amdgpu_ring_write(ring, VCN_ENC_CMD_END);
578	amdgpu_ring_commit(ring);
579
580	for (i = 0; i < adev->usec_timeout; i++) {
581		if (amdgpu_ring_get_rptr(ring) != rptr)
582			break;
583		udelay(1);
584	}
585
586	if (i >= adev->usec_timeout)
587		r = -ETIMEDOUT;
588
589	return r;
590}
591
592static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
593					 struct amdgpu_bo *bo,
594					 struct dma_fence **fence)
595{
596	const unsigned ib_size_dw = 16;
597	struct amdgpu_job *job;
598	struct amdgpu_ib *ib;
599	struct dma_fence *f = NULL;
 
600	uint64_t addr;
 
601	int i, r;
602
603	r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
604					AMDGPU_IB_POOL_DIRECT, &job);
 
 
 
 
605	if (r)
606		return r;
607
608	ib = &job->ibs[0];
609	addr = amdgpu_bo_gpu_offset(bo);
610
611	ib->length_dw = 0;
 
 
 
 
612	ib->ptr[ib->length_dw++] = 0x00000018;
613	ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
614	ib->ptr[ib->length_dw++] = handle;
615	ib->ptr[ib->length_dw++] = upper_32_bits(addr);
616	ib->ptr[ib->length_dw++] = addr;
617	ib->ptr[ib->length_dw++] = 0x0000000b;
618
619	ib->ptr[ib->length_dw++] = 0x00000014;
620	ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
621	ib->ptr[ib->length_dw++] = 0x0000001c;
622	ib->ptr[ib->length_dw++] = 0x00000000;
623	ib->ptr[ib->length_dw++] = 0x00000000;
624
625	ib->ptr[ib->length_dw++] = 0x00000008;
626	ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */
627
628	for (i = ib->length_dw; i < ib_size_dw; ++i)
629		ib->ptr[i] = 0x0;
630
 
 
 
631	r = amdgpu_job_submit_direct(job, ring, &f);
632	if (r)
633		goto err;
634
635	if (fence)
636		*fence = dma_fence_get(f);
637	dma_fence_put(f);
638
639	return 0;
640
641err:
642	amdgpu_job_free(job);
643	return r;
644}
645
646static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
647					  struct amdgpu_bo *bo,
648					  struct dma_fence **fence)
649{
650	const unsigned ib_size_dw = 16;
651	struct amdgpu_job *job;
652	struct amdgpu_ib *ib;
653	struct dma_fence *f = NULL;
 
654	uint64_t addr;
 
655	int i, r;
656
657	r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
658					AMDGPU_IB_POOL_DIRECT, &job);
 
 
 
 
659	if (r)
660		return r;
661
662	ib = &job->ibs[0];
663	addr = amdgpu_bo_gpu_offset(bo);
664
665	ib->length_dw = 0;
 
 
 
 
666	ib->ptr[ib->length_dw++] = 0x00000018;
667	ib->ptr[ib->length_dw++] = 0x00000001;
668	ib->ptr[ib->length_dw++] = handle;
669	ib->ptr[ib->length_dw++] = upper_32_bits(addr);
670	ib->ptr[ib->length_dw++] = addr;
671	ib->ptr[ib->length_dw++] = 0x0000000b;
672
673	ib->ptr[ib->length_dw++] = 0x00000014;
674	ib->ptr[ib->length_dw++] = 0x00000002;
675	ib->ptr[ib->length_dw++] = 0x0000001c;
676	ib->ptr[ib->length_dw++] = 0x00000000;
677	ib->ptr[ib->length_dw++] = 0x00000000;
678
679	ib->ptr[ib->length_dw++] = 0x00000008;
680	ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */
681
682	for (i = ib->length_dw; i < ib_size_dw; ++i)
683		ib->ptr[i] = 0x0;
684
 
 
 
685	r = amdgpu_job_submit_direct(job, ring, &f);
686	if (r)
687		goto err;
688
689	if (fence)
690		*fence = dma_fence_get(f);
691	dma_fence_put(f);
692
693	return 0;
694
695err:
696	amdgpu_job_free(job);
697	return r;
698}
699
700int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
701{
 
702	struct dma_fence *fence = NULL;
703	struct amdgpu_bo *bo = NULL;
704	long r;
705
706	r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE,
707				      AMDGPU_GEM_DOMAIN_VRAM,
708				      &bo, NULL, NULL);
 
709	if (r)
710		return r;
711
712	r = amdgpu_vcn_enc_get_create_msg(ring, 1, bo, NULL);
713	if (r)
714		goto error;
715
716	r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, bo, &fence);
717	if (r)
718		goto error;
719
720	r = dma_fence_wait_timeout(fence, false, timeout);
721	if (r == 0)
722		r = -ETIMEDOUT;
723	else if (r > 0)
724		r = 0;
725
726error:
 
727	dma_fence_put(fence);
728	amdgpu_bo_unreserve(bo);
729	amdgpu_bo_unref(&bo);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
730	return r;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
731}