Linux Audio

Check our new training course

Loading...
v5.4
   1/*
   2 * Copyright 2013 Advanced Micro Devices, Inc.
   3 * All Rights Reserved.
   4 *
   5 * Permission is hereby granted, free of charge, to any person obtaining a
   6 * copy of this software and associated documentation files (the
   7 * "Software"), to deal in the Software without restriction, including
   8 * without limitation the rights to use, copy, modify, merge, publish,
   9 * distribute, sub license, and/or sell copies of the Software, and to
  10 * permit persons to whom the Software is furnished to do so, subject to
  11 * the following conditions:
  12 *
  13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  20 *
  21 * The above copyright notice and this permission notice (including the
  22 * next paragraph) shall be included in all copies or substantial portions
  23 * of the Software.
  24 *
  25 * Authors: Christian König <christian.koenig@amd.com>
  26 */
  27
  28#include <linux/firmware.h>
  29#include <linux/module.h>
  30
  31#include <drm/drm.h>
  32
  33#include "amdgpu.h"
  34#include "amdgpu_pm.h"
  35#include "amdgpu_vce.h"
  36#include "cikd.h"
  37
  38/* 1 second timeout */
  39#define VCE_IDLE_TIMEOUT	msecs_to_jiffies(1000)
  40
  41/* Firmware Names */
  42#ifdef CONFIG_DRM_AMDGPU_CIK
  43#define FIRMWARE_BONAIRE	"amdgpu/bonaire_vce.bin"
  44#define FIRMWARE_KABINI	"amdgpu/kabini_vce.bin"
  45#define FIRMWARE_KAVERI	"amdgpu/kaveri_vce.bin"
  46#define FIRMWARE_HAWAII	"amdgpu/hawaii_vce.bin"
  47#define FIRMWARE_MULLINS	"amdgpu/mullins_vce.bin"
  48#endif
  49#define FIRMWARE_TONGA		"amdgpu/tonga_vce.bin"
  50#define FIRMWARE_CARRIZO	"amdgpu/carrizo_vce.bin"
  51#define FIRMWARE_FIJI		"amdgpu/fiji_vce.bin"
  52#define FIRMWARE_STONEY		"amdgpu/stoney_vce.bin"
  53#define FIRMWARE_POLARIS10	"amdgpu/polaris10_vce.bin"
  54#define FIRMWARE_POLARIS11	"amdgpu/polaris11_vce.bin"
  55#define FIRMWARE_POLARIS12	"amdgpu/polaris12_vce.bin"
  56#define FIRMWARE_VEGAM		"amdgpu/vegam_vce.bin"
  57
  58#define FIRMWARE_VEGA10		"amdgpu/vega10_vce.bin"
  59#define FIRMWARE_VEGA12		"amdgpu/vega12_vce.bin"
  60#define FIRMWARE_VEGA20		"amdgpu/vega20_vce.bin"
  61
  62#ifdef CONFIG_DRM_AMDGPU_CIK
  63MODULE_FIRMWARE(FIRMWARE_BONAIRE);
  64MODULE_FIRMWARE(FIRMWARE_KABINI);
  65MODULE_FIRMWARE(FIRMWARE_KAVERI);
  66MODULE_FIRMWARE(FIRMWARE_HAWAII);
  67MODULE_FIRMWARE(FIRMWARE_MULLINS);
  68#endif
  69MODULE_FIRMWARE(FIRMWARE_TONGA);
  70MODULE_FIRMWARE(FIRMWARE_CARRIZO);
  71MODULE_FIRMWARE(FIRMWARE_FIJI);
  72MODULE_FIRMWARE(FIRMWARE_STONEY);
  73MODULE_FIRMWARE(FIRMWARE_POLARIS10);
  74MODULE_FIRMWARE(FIRMWARE_POLARIS11);
  75MODULE_FIRMWARE(FIRMWARE_POLARIS12);
  76MODULE_FIRMWARE(FIRMWARE_VEGAM);
  77
  78MODULE_FIRMWARE(FIRMWARE_VEGA10);
  79MODULE_FIRMWARE(FIRMWARE_VEGA12);
  80MODULE_FIRMWARE(FIRMWARE_VEGA20);
  81
  82static void amdgpu_vce_idle_work_handler(struct work_struct *work);
  83
  84/**
  85 * amdgpu_vce_init - allocate memory, load vce firmware
  86 *
  87 * @adev: amdgpu_device pointer
  88 *
  89 * First step to get VCE online, allocate memory and load the firmware
  90 */
  91int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
  92{
 
 
  93	const char *fw_name;
  94	const struct common_firmware_header *hdr;
  95	unsigned ucode_version, version_major, version_minor, binary_id;
  96	int i, r;
  97
 
 
  98	switch (adev->asic_type) {
  99#ifdef CONFIG_DRM_AMDGPU_CIK
 100	case CHIP_BONAIRE:
 101		fw_name = FIRMWARE_BONAIRE;
 102		break;
 103	case CHIP_KAVERI:
 104		fw_name = FIRMWARE_KAVERI;
 105		break;
 106	case CHIP_KABINI:
 107		fw_name = FIRMWARE_KABINI;
 108		break;
 109	case CHIP_HAWAII:
 110		fw_name = FIRMWARE_HAWAII;
 111		break;
 112	case CHIP_MULLINS:
 113		fw_name = FIRMWARE_MULLINS;
 114		break;
 115#endif
 116	case CHIP_TONGA:
 117		fw_name = FIRMWARE_TONGA;
 118		break;
 119	case CHIP_CARRIZO:
 120		fw_name = FIRMWARE_CARRIZO;
 121		break;
 122	case CHIP_FIJI:
 123		fw_name = FIRMWARE_FIJI;
 124		break;
 125	case CHIP_STONEY:
 126		fw_name = FIRMWARE_STONEY;
 127		break;
 128	case CHIP_POLARIS10:
 129		fw_name = FIRMWARE_POLARIS10;
 130		break;
 131	case CHIP_POLARIS11:
 132		fw_name = FIRMWARE_POLARIS11;
 133		break;
 134	case CHIP_POLARIS12:
 135		fw_name = FIRMWARE_POLARIS12;
 136		break;
 137	case CHIP_VEGAM:
 138		fw_name = FIRMWARE_VEGAM;
 139		break;
 140	case CHIP_VEGA10:
 141		fw_name = FIRMWARE_VEGA10;
 142		break;
 143	case CHIP_VEGA12:
 144		fw_name = FIRMWARE_VEGA12;
 145		break;
 146	case CHIP_VEGA20:
 147		fw_name = FIRMWARE_VEGA20;
 148		break;
 149
 150	default:
 151		return -EINVAL;
 152	}
 153
 154	r = request_firmware(&adev->vce.fw, fw_name, adev->dev);
 155	if (r) {
 156		dev_err(adev->dev, "amdgpu_vce: Can't load firmware \"%s\"\n",
 157			fw_name);
 158		return r;
 159	}
 160
 161	r = amdgpu_ucode_validate(adev->vce.fw);
 162	if (r) {
 163		dev_err(adev->dev, "amdgpu_vce: Can't validate firmware \"%s\"\n",
 164			fw_name);
 165		release_firmware(adev->vce.fw);
 166		adev->vce.fw = NULL;
 167		return r;
 168	}
 169
 170	hdr = (const struct common_firmware_header *)adev->vce.fw->data;
 171
 172	ucode_version = le32_to_cpu(hdr->ucode_version);
 173	version_major = (ucode_version >> 20) & 0xfff;
 174	version_minor = (ucode_version >> 8) & 0xfff;
 175	binary_id = ucode_version & 0xff;
 176	DRM_INFO("Found VCE firmware Version: %hhd.%hhd Binary ID: %hhd\n",
 177		version_major, version_minor, binary_id);
 178	adev->vce.fw_version = ((version_major << 24) | (version_minor << 16) |
 179				(binary_id << 8));
 180
 181	r = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE,
 182				    AMDGPU_GEM_DOMAIN_VRAM, &adev->vce.vcpu_bo,
 183				    &adev->vce.gpu_addr, &adev->vce.cpu_addr);
 
 
 
 184	if (r) {
 185		dev_err(adev->dev, "(%d) failed to allocate VCE bo\n", r);
 186		return r;
 187	}
 188
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 189	for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
 190		atomic_set(&adev->vce.handles[i], 0);
 191		adev->vce.filp[i] = NULL;
 192	}
 193
 194	INIT_DELAYED_WORK(&adev->vce.idle_work, amdgpu_vce_idle_work_handler);
 195	mutex_init(&adev->vce.idle_mutex);
 196
 197	return 0;
 198}
 199
 200/**
 201 * amdgpu_vce_fini - free memory
 202 *
 203 * @adev: amdgpu_device pointer
 204 *
 205 * Last step on VCE teardown, free firmware memory
 206 */
 207int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
 208{
 209	unsigned i;
 210
 211	if (adev->vce.vcpu_bo == NULL)
 212		return 0;
 213
 214	drm_sched_entity_destroy(&adev->vce.entity);
 215
 216	amdgpu_bo_free_kernel(&adev->vce.vcpu_bo, &adev->vce.gpu_addr,
 217		(void **)&adev->vce.cpu_addr);
 218
 219	for (i = 0; i < adev->vce.num_rings; i++)
 220		amdgpu_ring_fini(&adev->vce.ring[i]);
 221
 222	release_firmware(adev->vce.fw);
 223	mutex_destroy(&adev->vce.idle_mutex);
 224
 225	return 0;
 226}
 227
 228/**
 229 * amdgpu_vce_entity_init - init entity
 230 *
 231 * @adev: amdgpu_device pointer
 232 *
 233 */
 234int amdgpu_vce_entity_init(struct amdgpu_device *adev)
 235{
 236	struct amdgpu_ring *ring;
 237	struct drm_sched_rq *rq;
 238	int r;
 239
 240	ring = &adev->vce.ring[0];
 241	rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
 242	r = drm_sched_entity_init(&adev->vce.entity, &rq, 1, NULL);
 243	if (r != 0) {
 244		DRM_ERROR("Failed setting up VCE run queue.\n");
 245		return r;
 246	}
 247
 248	return 0;
 249}
 250
 251/**
 252 * amdgpu_vce_suspend - unpin VCE fw memory
 253 *
 254 * @adev: amdgpu_device pointer
 255 *
 256 */
 257int amdgpu_vce_suspend(struct amdgpu_device *adev)
 258{
 259	int i;
 260
 261	cancel_delayed_work_sync(&adev->vce.idle_work);
 262
 263	if (adev->vce.vcpu_bo == NULL)
 264		return 0;
 265
 266	for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
 267		if (atomic_read(&adev->vce.handles[i]))
 268			break;
 269
 270	if (i == AMDGPU_MAX_VCE_HANDLES)
 271		return 0;
 272
 
 273	/* TODO: suspending running encoding sessions isn't supported */
 274	return -EINVAL;
 275}
 276
 277/**
 278 * amdgpu_vce_resume - pin VCE fw memory
 279 *
 280 * @adev: amdgpu_device pointer
 281 *
 282 */
 283int amdgpu_vce_resume(struct amdgpu_device *adev)
 284{
 285	void *cpu_addr;
 286	const struct common_firmware_header *hdr;
 287	unsigned offset;
 288	int r;
 289
 290	if (adev->vce.vcpu_bo == NULL)
 291		return -EINVAL;
 292
 293	r = amdgpu_bo_reserve(adev->vce.vcpu_bo, false);
 294	if (r) {
 295		dev_err(adev->dev, "(%d) failed to reserve VCE bo\n", r);
 296		return r;
 297	}
 298
 299	r = amdgpu_bo_kmap(adev->vce.vcpu_bo, &cpu_addr);
 300	if (r) {
 301		amdgpu_bo_unreserve(adev->vce.vcpu_bo);
 302		dev_err(adev->dev, "(%d) VCE map failed\n", r);
 303		return r;
 304	}
 305
 306	hdr = (const struct common_firmware_header *)adev->vce.fw->data;
 307	offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
 308	memcpy_toio(cpu_addr, adev->vce.fw->data + offset,
 309		    adev->vce.fw->size - offset);
 310
 311	amdgpu_bo_kunmap(adev->vce.vcpu_bo);
 312
 313	amdgpu_bo_unreserve(adev->vce.vcpu_bo);
 314
 315	return 0;
 316}
 317
 318/**
 319 * amdgpu_vce_idle_work_handler - power off VCE
 320 *
 321 * @work: pointer to work structure
 322 *
 323 * power of VCE when it's not used any more
 324 */
 325static void amdgpu_vce_idle_work_handler(struct work_struct *work)
 326{
 327	struct amdgpu_device *adev =
 328		container_of(work, struct amdgpu_device, vce.idle_work.work);
 329	unsigned i, count = 0;
 330
 331	for (i = 0; i < adev->vce.num_rings; i++)
 332		count += amdgpu_fence_count_emitted(&adev->vce.ring[i]);
 333
 334	if (count == 0) {
 
 335		if (adev->pm.dpm_enabled) {
 336			amdgpu_dpm_enable_vce(adev, false);
 337		} else {
 338			amdgpu_asic_set_vce_clocks(adev, 0, 0);
 339			amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
 340							       AMD_PG_STATE_GATE);
 341			amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
 342							       AMD_CG_STATE_GATE);
 343		}
 344	} else {
 345		schedule_delayed_work(&adev->vce.idle_work, VCE_IDLE_TIMEOUT);
 
 346	}
 347}
 348
 349/**
 350 * amdgpu_vce_ring_begin_use - power up VCE
 351 *
 352 * @ring: amdgpu ring
 353 *
 354 * Make sure VCE is powerd up when we want to use it
 355 */
 356void amdgpu_vce_ring_begin_use(struct amdgpu_ring *ring)
 357{
 358	struct amdgpu_device *adev = ring->adev;
 359	bool set_clocks;
 
 
 360
 361	if (amdgpu_sriov_vf(adev))
 362		return;
 
 
 363
 364	mutex_lock(&adev->vce.idle_mutex);
 365	set_clocks = !cancel_delayed_work_sync(&adev->vce.idle_work);
 366	if (set_clocks) {
 367		if (adev->pm.dpm_enabled) {
 368			amdgpu_dpm_enable_vce(adev, true);
 369		} else {
 370			amdgpu_asic_set_vce_clocks(adev, 53300, 40000);
 371			amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
 372							       AMD_CG_STATE_UNGATE);
 373			amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
 374							       AMD_PG_STATE_UNGATE);
 375
 376		}
 377	}
 378	mutex_unlock(&adev->vce.idle_mutex);
 379}
 380
 381/**
 382 * amdgpu_vce_ring_end_use - power VCE down
 383 *
 384 * @ring: amdgpu ring
 385 *
 386 * Schedule work to power VCE down again
 387 */
 388void amdgpu_vce_ring_end_use(struct amdgpu_ring *ring)
 389{
 390	if (!amdgpu_sriov_vf(ring->adev))
 391		schedule_delayed_work(&ring->adev->vce.idle_work, VCE_IDLE_TIMEOUT);
 392}
 393
 394/**
 395 * amdgpu_vce_free_handles - free still open VCE handles
 396 *
 397 * @adev: amdgpu_device pointer
 398 * @filp: drm file pointer
 399 *
 400 * Close all VCE handles still open by this file pointer
 401 */
 402void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
 403{
 404	struct amdgpu_ring *ring = &adev->vce.ring[0];
 405	int i, r;
 406	for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
 407		uint32_t handle = atomic_read(&adev->vce.handles[i]);
 408
 409		if (!handle || adev->vce.filp[i] != filp)
 410			continue;
 411
 
 
 412		r = amdgpu_vce_get_destroy_msg(ring, handle, false, NULL);
 413		if (r)
 414			DRM_ERROR("Error destroying VCE handle (%d)!\n", r);
 415
 416		adev->vce.filp[i] = NULL;
 417		atomic_set(&adev->vce.handles[i], 0);
 418	}
 419}
 420
 421/**
 422 * amdgpu_vce_get_create_msg - generate a VCE create msg
 423 *
 424 * @adev: amdgpu_device pointer
 425 * @ring: ring we should submit the msg to
 426 * @handle: VCE session handle to use
 427 * @fence: optional fence to return
 428 *
 429 * Open up a stream for HW test
 430 */
 431int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
 432			      struct amdgpu_bo *bo,
 433			      struct dma_fence **fence)
 434{
 435	const unsigned ib_size_dw = 1024;
 436	struct amdgpu_job *job;
 437	struct amdgpu_ib *ib;
 438	struct dma_fence *f = NULL;
 439	uint64_t addr;
 440	int i, r;
 441
 442	r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
 443	if (r)
 444		return r;
 445
 446	ib = &job->ibs[0];
 447
 448	addr = amdgpu_bo_gpu_offset(bo);
 449
 450	/* stitch together an VCE create msg */
 451	ib->length_dw = 0;
 452	ib->ptr[ib->length_dw++] = 0x0000000c; /* len */
 453	ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */
 454	ib->ptr[ib->length_dw++] = handle;
 455
 456	if ((ring->adev->vce.fw_version >> 24) >= 52)
 457		ib->ptr[ib->length_dw++] = 0x00000040; /* len */
 458	else
 459		ib->ptr[ib->length_dw++] = 0x00000030; /* len */
 460	ib->ptr[ib->length_dw++] = 0x01000001; /* create cmd */
 461	ib->ptr[ib->length_dw++] = 0x00000000;
 462	ib->ptr[ib->length_dw++] = 0x00000042;
 463	ib->ptr[ib->length_dw++] = 0x0000000a;
 464	ib->ptr[ib->length_dw++] = 0x00000001;
 465	ib->ptr[ib->length_dw++] = 0x00000080;
 466	ib->ptr[ib->length_dw++] = 0x00000060;
 467	ib->ptr[ib->length_dw++] = 0x00000100;
 468	ib->ptr[ib->length_dw++] = 0x00000100;
 469	ib->ptr[ib->length_dw++] = 0x0000000c;
 470	ib->ptr[ib->length_dw++] = 0x00000000;
 471	if ((ring->adev->vce.fw_version >> 24) >= 52) {
 472		ib->ptr[ib->length_dw++] = 0x00000000;
 473		ib->ptr[ib->length_dw++] = 0x00000000;
 474		ib->ptr[ib->length_dw++] = 0x00000000;
 475		ib->ptr[ib->length_dw++] = 0x00000000;
 476	}
 477
 478	ib->ptr[ib->length_dw++] = 0x00000014; /* len */
 479	ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */
 480	ib->ptr[ib->length_dw++] = upper_32_bits(addr);
 481	ib->ptr[ib->length_dw++] = addr;
 482	ib->ptr[ib->length_dw++] = 0x00000001;
 483
 484	for (i = ib->length_dw; i < ib_size_dw; ++i)
 485		ib->ptr[i] = 0x0;
 486
 487	r = amdgpu_job_submit_direct(job, ring, &f);
 
 488	if (r)
 489		goto err;
 490
 
 491	if (fence)
 492		*fence = dma_fence_get(f);
 493	dma_fence_put(f);
 494	return 0;
 495
 496err:
 497	amdgpu_job_free(job);
 498	return r;
 499}
 500
 501/**
 502 * amdgpu_vce_get_destroy_msg - generate a VCE destroy msg
 503 *
 504 * @adev: amdgpu_device pointer
 505 * @ring: ring we should submit the msg to
 506 * @handle: VCE session handle to use
 507 * @fence: optional fence to return
 508 *
 509 * Close up a stream for HW test or if userspace failed to do so
 510 */
 511int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
 512			       bool direct, struct dma_fence **fence)
 513{
 514	const unsigned ib_size_dw = 1024;
 515	struct amdgpu_job *job;
 516	struct amdgpu_ib *ib;
 517	struct dma_fence *f = NULL;
 
 518	int i, r;
 519
 520	r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
 521	if (r)
 522		return r;
 523
 524	ib = &job->ibs[0];
 
 525
 526	/* stitch together an VCE destroy msg */
 527	ib->length_dw = 0;
 528	ib->ptr[ib->length_dw++] = 0x0000000c; /* len */
 529	ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */
 530	ib->ptr[ib->length_dw++] = handle;
 531
 532	ib->ptr[ib->length_dw++] = 0x00000020; /* len */
 533	ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
 534	ib->ptr[ib->length_dw++] = 0xffffffff; /* next task info, set to 0xffffffff if no */
 535	ib->ptr[ib->length_dw++] = 0x00000001; /* destroy session */
 536	ib->ptr[ib->length_dw++] = 0x00000000;
 537	ib->ptr[ib->length_dw++] = 0x00000000;
 538	ib->ptr[ib->length_dw++] = 0xffffffff; /* feedback is not needed, set to 0xffffffff and firmware will not output feedback */
 539	ib->ptr[ib->length_dw++] = 0x00000000;
 540
 541	ib->ptr[ib->length_dw++] = 0x00000008; /* len */
 542	ib->ptr[ib->length_dw++] = 0x02000001; /* destroy cmd */
 543
 544	for (i = ib->length_dw; i < ib_size_dw; ++i)
 545		ib->ptr[i] = 0x0;
 546
 547	if (direct)
 548		r = amdgpu_job_submit_direct(job, ring, &f);
 549	else
 550		r = amdgpu_job_submit(job, &ring->adev->vce.entity,
 
 
 
 
 
 551				      AMDGPU_FENCE_OWNER_UNDEFINED, &f);
 552	if (r)
 553		goto err;
 
 554
 555	if (fence)
 556		*fence = dma_fence_get(f);
 557	dma_fence_put(f);
 558	return 0;
 559
 560err:
 561	amdgpu_job_free(job);
 562	return r;
 563}
 564
 565/**
 566 * amdgpu_vce_cs_validate_bo - make sure not to cross 4GB boundary
 567 *
 568 * @p: parser context
 569 * @lo: address of lower dword
 570 * @hi: address of higher dword
 571 * @size: minimum size
 572 * @index: bs/fb index
 573 *
 574 * Make sure that no BO cross a 4GB boundary.
 575 */
 576static int amdgpu_vce_validate_bo(struct amdgpu_cs_parser *p, uint32_t ib_idx,
 577				  int lo, int hi, unsigned size, int32_t index)
 578{
 579	int64_t offset = ((uint64_t)size) * ((int64_t)index);
 580	struct ttm_operation_ctx ctx = { false, false };
 581	struct amdgpu_bo_va_mapping *mapping;
 582	unsigned i, fpfn, lpfn;
 583	struct amdgpu_bo *bo;
 584	uint64_t addr;
 585	int r;
 586
 587	addr = ((uint64_t)amdgpu_get_ib_value(p, ib_idx, lo)) |
 588	       ((uint64_t)amdgpu_get_ib_value(p, ib_idx, hi)) << 32;
 589	if (index >= 0) {
 590		addr += offset;
 591		fpfn = PAGE_ALIGN(offset) >> PAGE_SHIFT;
 592		lpfn = 0x100000000ULL >> PAGE_SHIFT;
 593	} else {
 594		fpfn = 0;
 595		lpfn = (0x100000000ULL - PAGE_ALIGN(offset)) >> PAGE_SHIFT;
 596	}
 597
 598	r = amdgpu_cs_find_mapping(p, addr, &bo, &mapping);
 599	if (r) {
 600		DRM_ERROR("Can't find BO for addr 0x%010Lx %d %d %d %d\n",
 601			  addr, lo, hi, size, index);
 602		return r;
 603	}
 604
 605	for (i = 0; i < bo->placement.num_placement; ++i) {
 606		bo->placements[i].fpfn = max(bo->placements[i].fpfn, fpfn);
 607		bo->placements[i].lpfn = bo->placements[i].lpfn ?
 608			min(bo->placements[i].lpfn, lpfn) : lpfn;
 609	}
 610	return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
 611}
 612
 613
 614/**
 615 * amdgpu_vce_cs_reloc - command submission relocation
 616 *
 617 * @p: parser context
 618 * @lo: address of lower dword
 619 * @hi: address of higher dword
 620 * @size: minimum size
 621 *
 622 * Patch relocation inside command stream with real buffer address
 623 */
 624static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx,
 625			       int lo, int hi, unsigned size, uint32_t index)
 626{
 627	struct amdgpu_bo_va_mapping *mapping;
 628	struct amdgpu_bo *bo;
 629	uint64_t addr;
 630	int r;
 631
 632	if (index == 0xffffffff)
 633		index = 0;
 634
 635	addr = ((uint64_t)amdgpu_get_ib_value(p, ib_idx, lo)) |
 636	       ((uint64_t)amdgpu_get_ib_value(p, ib_idx, hi)) << 32;
 637	addr += ((uint64_t)size) * ((uint64_t)index);
 638
 639	r = amdgpu_cs_find_mapping(p, addr, &bo, &mapping);
 640	if (r) {
 641		DRM_ERROR("Can't find BO for addr 0x%010Lx %d %d %d %d\n",
 642			  addr, lo, hi, size, index);
 643		return r;
 644	}
 645
 646	if ((addr + (uint64_t)size) >
 647	    (mapping->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
 648		DRM_ERROR("BO to small for addr 0x%010Lx %d %d\n",
 649			  addr, lo, hi);
 650		return -EINVAL;
 651	}
 652
 653	addr -= mapping->start * AMDGPU_GPU_PAGE_SIZE;
 654	addr += amdgpu_bo_gpu_offset(bo);
 655	addr -= ((uint64_t)size) * ((uint64_t)index);
 656
 657	amdgpu_set_ib_value(p, ib_idx, lo, lower_32_bits(addr));
 658	amdgpu_set_ib_value(p, ib_idx, hi, upper_32_bits(addr));
 659
 660	return 0;
 661}
 662
 663/**
 664 * amdgpu_vce_validate_handle - validate stream handle
 665 *
 666 * @p: parser context
 667 * @handle: handle to validate
 668 * @allocated: allocated a new handle?
 669 *
 670 * Validates the handle and return the found session index or -EINVAL
 671 * we we don't have another free session index.
 672 */
 673static int amdgpu_vce_validate_handle(struct amdgpu_cs_parser *p,
 674				      uint32_t handle, uint32_t *allocated)
 675{
 676	unsigned i;
 677
 
 
 678	/* validate the handle */
 679	for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
 680		if (atomic_read(&p->adev->vce.handles[i]) == handle) {
 681			if (p->adev->vce.filp[i] != p->filp) {
 682				DRM_ERROR("VCE handle collision detected!\n");
 683				return -EINVAL;
 684			}
 685			return i;
 686		}
 687	}
 688
 689	/* handle not found try to alloc a new one */
 690	for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
 691		if (!atomic_cmpxchg(&p->adev->vce.handles[i], 0, handle)) {
 692			p->adev->vce.filp[i] = p->filp;
 693			p->adev->vce.img_size[i] = 0;
 694			*allocated |= 1 << i;
 695			return i;
 696		}
 697	}
 698
 699	DRM_ERROR("No more free VCE handles!\n");
 700	return -EINVAL;
 701}
 702
 703/**
 704 * amdgpu_vce_cs_parse - parse and validate the command stream
 705 *
 706 * @p: parser context
 707 *
 708 */
 709int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
 710{
 711	struct amdgpu_ib *ib = &p->job->ibs[ib_idx];
 712	unsigned fb_idx = 0, bs_idx = 0;
 713	int session_idx = -1;
 714	uint32_t destroyed = 0;
 715	uint32_t created = 0;
 716	uint32_t allocated = 0;
 717	uint32_t tmp, handle = 0;
 718	uint32_t *size = &tmp;
 719	unsigned idx;
 720	int i, r = 0;
 721
 722	p->job->vm = NULL;
 723	ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
 724
 725	for (idx = 0; idx < ib->length_dw;) {
 726		uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx);
 727		uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1);
 728
 729		if ((len < 8) || (len & 3)) {
 730			DRM_ERROR("invalid VCE command length (%d)!\n", len);
 731			r = -EINVAL;
 732			goto out;
 733		}
 734
 735		switch (cmd) {
 736		case 0x00000002: /* task info */
 737			fb_idx = amdgpu_get_ib_value(p, ib_idx, idx + 6);
 738			bs_idx = amdgpu_get_ib_value(p, ib_idx, idx + 7);
 739			break;
 740
 741		case 0x03000001: /* encode */
 742			r = amdgpu_vce_validate_bo(p, ib_idx, idx + 10,
 743						   idx + 9, 0, 0);
 744			if (r)
 745				goto out;
 746
 747			r = amdgpu_vce_validate_bo(p, ib_idx, idx + 12,
 748						   idx + 11, 0, 0);
 749			if (r)
 750				goto out;
 751			break;
 752
 753		case 0x05000001: /* context buffer */
 754			r = amdgpu_vce_validate_bo(p, ib_idx, idx + 3,
 755						   idx + 2, 0, 0);
 756			if (r)
 757				goto out;
 758			break;
 759
 760		case 0x05000004: /* video bitstream buffer */
 761			tmp = amdgpu_get_ib_value(p, ib_idx, idx + 4);
 762			r = amdgpu_vce_validate_bo(p, ib_idx, idx + 3, idx + 2,
 763						   tmp, bs_idx);
 764			if (r)
 765				goto out;
 766			break;
 767
 768		case 0x05000005: /* feedback buffer */
 769			r = amdgpu_vce_validate_bo(p, ib_idx, idx + 3, idx + 2,
 770						   4096, fb_idx);
 771			if (r)
 772				goto out;
 773			break;
 774
 775		case 0x0500000d: /* MV buffer */
 776			r = amdgpu_vce_validate_bo(p, ib_idx, idx + 3,
 777							idx + 2, 0, 0);
 778			if (r)
 779				goto out;
 780
 781			r = amdgpu_vce_validate_bo(p, ib_idx, idx + 8,
 782							idx + 7, 0, 0);
 783			if (r)
 784				goto out;
 785			break;
 786		}
 787
 788		idx += len / 4;
 789	}
 790
 791	for (idx = 0; idx < ib->length_dw;) {
 792		uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx);
 793		uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1);
 794
 795		switch (cmd) {
 796		case 0x00000001: /* session */
 797			handle = amdgpu_get_ib_value(p, ib_idx, idx + 2);
 798			session_idx = amdgpu_vce_validate_handle(p, handle,
 799								 &allocated);
 800			if (session_idx < 0) {
 801				r = session_idx;
 802				goto out;
 803			}
 804			size = &p->adev->vce.img_size[session_idx];
 805			break;
 806
 807		case 0x00000002: /* task info */
 808			fb_idx = amdgpu_get_ib_value(p, ib_idx, idx + 6);
 809			bs_idx = amdgpu_get_ib_value(p, ib_idx, idx + 7);
 810			break;
 811
 812		case 0x01000001: /* create */
 813			created |= 1 << session_idx;
 814			if (destroyed & (1 << session_idx)) {
 815				destroyed &= ~(1 << session_idx);
 816				allocated |= 1 << session_idx;
 817
 818			} else if (!(allocated & (1 << session_idx))) {
 819				DRM_ERROR("Handle already in use!\n");
 820				r = -EINVAL;
 821				goto out;
 822			}
 823
 824			*size = amdgpu_get_ib_value(p, ib_idx, idx + 8) *
 825				amdgpu_get_ib_value(p, ib_idx, idx + 10) *
 826				8 * 3 / 2;
 827			break;
 828
 829		case 0x04000001: /* config extension */
 830		case 0x04000002: /* pic control */
 831		case 0x04000005: /* rate control */
 832		case 0x04000007: /* motion estimation */
 833		case 0x04000008: /* rdo */
 834		case 0x04000009: /* vui */
 835		case 0x05000002: /* auxiliary buffer */
 836		case 0x05000009: /* clock table */
 837			break;
 838
 839		case 0x0500000c: /* hw config */
 840			switch (p->adev->asic_type) {
 841#ifdef CONFIG_DRM_AMDGPU_CIK
 842			case CHIP_KAVERI:
 843			case CHIP_MULLINS:
 844#endif
 845			case CHIP_CARRIZO:
 846				break;
 847			default:
 848				r = -EINVAL;
 849				goto out;
 850			}
 851			break;
 852
 853		case 0x03000001: /* encode */
 854			r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 10, idx + 9,
 855						*size, 0);
 856			if (r)
 857				goto out;
 858
 859			r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 12, idx + 11,
 860						*size / 3, 0);
 861			if (r)
 862				goto out;
 863			break;
 864
 865		case 0x02000001: /* destroy */
 866			destroyed |= 1 << session_idx;
 867			break;
 868
 869		case 0x05000001: /* context buffer */
 870			r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
 871						*size * 2, 0);
 872			if (r)
 873				goto out;
 874			break;
 875
 876		case 0x05000004: /* video bitstream buffer */
 877			tmp = amdgpu_get_ib_value(p, ib_idx, idx + 4);
 878			r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
 879						tmp, bs_idx);
 880			if (r)
 881				goto out;
 882			break;
 883
 884		case 0x05000005: /* feedback buffer */
 885			r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
 886						4096, fb_idx);
 887			if (r)
 888				goto out;
 889			break;
 890
 891		case 0x0500000d: /* MV buffer */
 892			r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3,
 893							idx + 2, *size, 0);
 894			if (r)
 895				goto out;
 896
 897			r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 8,
 898							idx + 7, *size / 12, 0);
 899			if (r)
 900				goto out;
 901			break;
 902
 903		default:
 904			DRM_ERROR("invalid VCE command (0x%x)!\n", cmd);
 905			r = -EINVAL;
 906			goto out;
 907		}
 908
 909		if (session_idx == -1) {
 910			DRM_ERROR("no session command at start of IB\n");
 911			r = -EINVAL;
 912			goto out;
 913		}
 914
 915		idx += len / 4;
 916	}
 917
 918	if (allocated & ~created) {
 919		DRM_ERROR("New session without create command!\n");
 920		r = -ENOENT;
 921	}
 922
 923out:
 924	if (!r) {
 925		/* No error, free all destroyed handle slots */
 926		tmp = destroyed;
 927	} else {
 928		/* Error during parsing, free all allocated handle slots */
 929		tmp = allocated;
 
 930	}
 931
 932	for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
 933		if (tmp & (1 << i))
 934			atomic_set(&p->adev->vce.handles[i], 0);
 935
 936	return r;
 937}
 938
 939/**
 940 * amdgpu_vce_cs_parse_vm - parse the command stream in VM mode
 941 *
 942 * @p: parser context
 943 *
 944 */
 945int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, uint32_t ib_idx)
 946{
 947	struct amdgpu_ib *ib = &p->job->ibs[ib_idx];
 948	int session_idx = -1;
 949	uint32_t destroyed = 0;
 950	uint32_t created = 0;
 951	uint32_t allocated = 0;
 952	uint32_t tmp, handle = 0;
 953	int i, r = 0, idx = 0;
 954
 955	while (idx < ib->length_dw) {
 956		uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx);
 957		uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1);
 958
 959		if ((len < 8) || (len & 3)) {
 960			DRM_ERROR("invalid VCE command length (%d)!\n", len);
 961			r = -EINVAL;
 962			goto out;
 963		}
 964
 965		switch (cmd) {
 966		case 0x00000001: /* session */
 967			handle = amdgpu_get_ib_value(p, ib_idx, idx + 2);
 968			session_idx = amdgpu_vce_validate_handle(p, handle,
 969								 &allocated);
 970			if (session_idx < 0) {
 971				r = session_idx;
 972				goto out;
 973			}
 974			break;
 975
 976		case 0x01000001: /* create */
 977			created |= 1 << session_idx;
 978			if (destroyed & (1 << session_idx)) {
 979				destroyed &= ~(1 << session_idx);
 980				allocated |= 1 << session_idx;
 981
 982			} else if (!(allocated & (1 << session_idx))) {
 983				DRM_ERROR("Handle already in use!\n");
 984				r = -EINVAL;
 985				goto out;
 986			}
 987
 988			break;
 989
 990		case 0x02000001: /* destroy */
 991			destroyed |= 1 << session_idx;
 992			break;
 993
 994		default:
 995			break;
 996		}
 997
 998		if (session_idx == -1) {
 999			DRM_ERROR("no session command at start of IB\n");
1000			r = -EINVAL;
1001			goto out;
1002		}
1003
1004		idx += len / 4;
1005	}
1006
1007	if (allocated & ~created) {
1008		DRM_ERROR("New session without create command!\n");
1009		r = -ENOENT;
1010	}
1011
1012out:
1013	if (!r) {
1014		/* No error, free all destroyed handle slots */
1015		tmp = destroyed;
1016		amdgpu_ib_free(p->adev, ib, NULL);
1017	} else {
1018		/* Error during parsing, free all allocated handle slots */
1019		tmp = allocated;
1020	}
1021
1022	for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
1023		if (tmp & (1 << i))
1024			atomic_set(&p->adev->vce.handles[i], 0);
1025
1026	return r;
1027}
1028
1029/**
1030 * amdgpu_vce_ring_emit_ib - execute indirect buffer
1031 *
1032 * @ring: engine to use
1033 * @ib: the IB to execute
1034 *
1035 */
1036void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring,
1037				struct amdgpu_job *job,
1038				struct amdgpu_ib *ib,
1039				uint32_t flags)
1040{
1041	amdgpu_ring_write(ring, VCE_CMD_IB);
1042	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1043	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1044	amdgpu_ring_write(ring, ib->length_dw);
1045}
1046
1047/**
1048 * amdgpu_vce_ring_emit_fence - add a fence command to the ring
1049 *
1050 * @ring: engine to use
1051 * @fence: the fence
1052 *
1053 */
1054void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
1055				unsigned flags)
1056{
1057	WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1058
1059	amdgpu_ring_write(ring, VCE_CMD_FENCE);
1060	amdgpu_ring_write(ring, addr);
1061	amdgpu_ring_write(ring, upper_32_bits(addr));
1062	amdgpu_ring_write(ring, seq);
1063	amdgpu_ring_write(ring, VCE_CMD_TRAP);
1064	amdgpu_ring_write(ring, VCE_CMD_END);
1065}
1066
1067/**
1068 * amdgpu_vce_ring_test_ring - test if VCE ring is working
1069 *
1070 * @ring: the engine to test on
1071 *
1072 */
1073int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
1074{
1075	struct amdgpu_device *adev = ring->adev;
1076	uint32_t rptr;
1077	unsigned i;
1078	int r, timeout = adev->usec_timeout;
1079
1080	/* skip ring test for sriov*/
1081	if (amdgpu_sriov_vf(adev))
1082		return 0;
1083
1084	r = amdgpu_ring_alloc(ring, 16);
1085	if (r)
 
 
1086		return r;
1087
1088	rptr = amdgpu_ring_get_rptr(ring);
1089
1090	amdgpu_ring_write(ring, VCE_CMD_END);
1091	amdgpu_ring_commit(ring);
1092
1093	for (i = 0; i < timeout; i++) {
1094		if (amdgpu_ring_get_rptr(ring) != rptr)
1095			break;
1096		udelay(1);
1097	}
1098
1099	if (i >= timeout)
 
 
 
 
 
1100		r = -ETIMEDOUT;
 
1101
1102	return r;
1103}
1104
1105/**
1106 * amdgpu_vce_ring_test_ib - test if VCE IBs are working
1107 *
1108 * @ring: the engine to test on
1109 *
1110 */
1111int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
1112{
1113	struct dma_fence *fence = NULL;
1114	struct amdgpu_bo *bo = NULL;
1115	long r;
1116
1117	/* skip vce ring1/2 ib test for now, since it's not reliable */
1118	if (ring != &ring->adev->vce.ring[0])
1119		return 0;
1120
1121	r = amdgpu_bo_create_reserved(ring->adev, 512, PAGE_SIZE,
1122				      AMDGPU_GEM_DOMAIN_VRAM,
1123				      &bo, NULL, NULL);
1124	if (r)
1125		return r;
1126
1127	r = amdgpu_vce_get_create_msg(ring, 1, bo, NULL);
1128	if (r)
1129		goto error;
 
1130
1131	r = amdgpu_vce_get_destroy_msg(ring, 1, true, &fence);
1132	if (r)
 
1133		goto error;
 
1134
1135	r = dma_fence_wait_timeout(fence, false, timeout);
1136	if (r == 0)
1137		r = -ETIMEDOUT;
1138	else if (r > 0)
1139		r = 0;
1140
1141error:
1142	dma_fence_put(fence);
1143	amdgpu_bo_unreserve(bo);
1144	amdgpu_bo_unref(&bo);
1145	return r;
1146}
v4.6
  1/*
  2 * Copyright 2013 Advanced Micro Devices, Inc.
  3 * All Rights Reserved.
  4 *
  5 * Permission is hereby granted, free of charge, to any person obtaining a
  6 * copy of this software and associated documentation files (the
  7 * "Software"), to deal in the Software without restriction, including
  8 * without limitation the rights to use, copy, modify, merge, publish,
  9 * distribute, sub license, and/or sell copies of the Software, and to
 10 * permit persons to whom the Software is furnished to do so, subject to
 11 * the following conditions:
 12 *
 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 20 *
 21 * The above copyright notice and this permission notice (including the
 22 * next paragraph) shall be included in all copies or substantial portions
 23 * of the Software.
 24 *
 25 * Authors: Christian König <christian.koenig@amd.com>
 26 */
 27
 28#include <linux/firmware.h>
 29#include <linux/module.h>
 30#include <drm/drmP.h>
 31#include <drm/drm.h>
 32
 33#include "amdgpu.h"
 34#include "amdgpu_pm.h"
 35#include "amdgpu_vce.h"
 36#include "cikd.h"
 37
 38/* 1 second timeout */
 39#define VCE_IDLE_TIMEOUT_MS	1000
 40
 41/* Firmware Names */
 42#ifdef CONFIG_DRM_AMDGPU_CIK
 43#define FIRMWARE_BONAIRE	"radeon/bonaire_vce.bin"
 44#define FIRMWARE_KABINI 	"radeon/kabini_vce.bin"
 45#define FIRMWARE_KAVERI 	"radeon/kaveri_vce.bin"
 46#define FIRMWARE_HAWAII 	"radeon/hawaii_vce.bin"
 47#define FIRMWARE_MULLINS	"radeon/mullins_vce.bin"
 48#endif
 49#define FIRMWARE_TONGA		"amdgpu/tonga_vce.bin"
 50#define FIRMWARE_CARRIZO	"amdgpu/carrizo_vce.bin"
 51#define FIRMWARE_FIJI		"amdgpu/fiji_vce.bin"
 52#define FIRMWARE_STONEY		"amdgpu/stoney_vce.bin"
 
 
 
 
 
 
 
 
 53
 54#ifdef CONFIG_DRM_AMDGPU_CIK
 55MODULE_FIRMWARE(FIRMWARE_BONAIRE);
 56MODULE_FIRMWARE(FIRMWARE_KABINI);
 57MODULE_FIRMWARE(FIRMWARE_KAVERI);
 58MODULE_FIRMWARE(FIRMWARE_HAWAII);
 59MODULE_FIRMWARE(FIRMWARE_MULLINS);
 60#endif
 61MODULE_FIRMWARE(FIRMWARE_TONGA);
 62MODULE_FIRMWARE(FIRMWARE_CARRIZO);
 63MODULE_FIRMWARE(FIRMWARE_FIJI);
 64MODULE_FIRMWARE(FIRMWARE_STONEY);
 
 
 
 
 
 
 
 
 65
 66static void amdgpu_vce_idle_work_handler(struct work_struct *work);
 67
 68/**
 69 * amdgpu_vce_init - allocate memory, load vce firmware
 70 *
 71 * @adev: amdgpu_device pointer
 72 *
 73 * First step to get VCE online, allocate memory and load the firmware
 74 */
 75int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
 76{
 77	struct amdgpu_ring *ring;
 78	struct amd_sched_rq *rq;
 79	const char *fw_name;
 80	const struct common_firmware_header *hdr;
 81	unsigned ucode_version, version_major, version_minor, binary_id;
 82	int i, r;
 83
 84	INIT_DELAYED_WORK(&adev->vce.idle_work, amdgpu_vce_idle_work_handler);
 85
 86	switch (adev->asic_type) {
 87#ifdef CONFIG_DRM_AMDGPU_CIK
 88	case CHIP_BONAIRE:
 89		fw_name = FIRMWARE_BONAIRE;
 90		break;
 91	case CHIP_KAVERI:
 92		fw_name = FIRMWARE_KAVERI;
 93		break;
 94	case CHIP_KABINI:
 95		fw_name = FIRMWARE_KABINI;
 96		break;
 97	case CHIP_HAWAII:
 98		fw_name = FIRMWARE_HAWAII;
 99		break;
100	case CHIP_MULLINS:
101		fw_name = FIRMWARE_MULLINS;
102		break;
103#endif
104	case CHIP_TONGA:
105		fw_name = FIRMWARE_TONGA;
106		break;
107	case CHIP_CARRIZO:
108		fw_name = FIRMWARE_CARRIZO;
109		break;
110	case CHIP_FIJI:
111		fw_name = FIRMWARE_FIJI;
112		break;
113	case CHIP_STONEY:
114		fw_name = FIRMWARE_STONEY;
115		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
116
117	default:
118		return -EINVAL;
119	}
120
121	r = request_firmware(&adev->vce.fw, fw_name, adev->dev);
122	if (r) {
123		dev_err(adev->dev, "amdgpu_vce: Can't load firmware \"%s\"\n",
124			fw_name);
125		return r;
126	}
127
128	r = amdgpu_ucode_validate(adev->vce.fw);
129	if (r) {
130		dev_err(adev->dev, "amdgpu_vce: Can't validate firmware \"%s\"\n",
131			fw_name);
132		release_firmware(adev->vce.fw);
133		adev->vce.fw = NULL;
134		return r;
135	}
136
137	hdr = (const struct common_firmware_header *)adev->vce.fw->data;
138
139	ucode_version = le32_to_cpu(hdr->ucode_version);
140	version_major = (ucode_version >> 20) & 0xfff;
141	version_minor = (ucode_version >> 8) & 0xfff;
142	binary_id = ucode_version & 0xff;
143	DRM_INFO("Found VCE firmware Version: %hhd.%hhd Binary ID: %hhd\n",
144		version_major, version_minor, binary_id);
145	adev->vce.fw_version = ((version_major << 24) | (version_minor << 16) |
146				(binary_id << 8));
147
148	/* allocate firmware, stack and heap BO */
149
150	r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
151			     AMDGPU_GEM_DOMAIN_VRAM,
152			     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
153			     NULL, NULL, &adev->vce.vcpu_bo);
154	if (r) {
155		dev_err(adev->dev, "(%d) failed to allocate VCE bo\n", r);
156		return r;
157	}
158
159	r = amdgpu_bo_reserve(adev->vce.vcpu_bo, false);
160	if (r) {
161		amdgpu_bo_unref(&adev->vce.vcpu_bo);
162		dev_err(adev->dev, "(%d) failed to reserve VCE bo\n", r);
163		return r;
164	}
165
166	r = amdgpu_bo_pin(adev->vce.vcpu_bo, AMDGPU_GEM_DOMAIN_VRAM,
167			  &adev->vce.gpu_addr);
168	amdgpu_bo_unreserve(adev->vce.vcpu_bo);
169	if (r) {
170		amdgpu_bo_unref(&adev->vce.vcpu_bo);
171		dev_err(adev->dev, "(%d) VCE bo pin failed\n", r);
172		return r;
173	}
174
175
176	ring = &adev->vce.ring[0];
177	rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
178	r = amd_sched_entity_init(&ring->sched, &adev->vce.entity,
179				  rq, amdgpu_sched_jobs);
180	if (r != 0) {
181		DRM_ERROR("Failed setting up VCE run queue.\n");
182		return r;
183	}
184
185	for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
186		atomic_set(&adev->vce.handles[i], 0);
187		adev->vce.filp[i] = NULL;
188	}
189
 
 
 
190	return 0;
191}
192
193/**
194 * amdgpu_vce_fini - free memory
195 *
196 * @adev: amdgpu_device pointer
197 *
198 * Last step on VCE teardown, free firmware memory
199 */
200int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
201{
 
 
202	if (adev->vce.vcpu_bo == NULL)
203		return 0;
204
205	amd_sched_entity_fini(&adev->vce.ring[0].sched, &adev->vce.entity);
206
207	amdgpu_bo_unref(&adev->vce.vcpu_bo);
 
208
209	amdgpu_ring_fini(&adev->vce.ring[0]);
210	amdgpu_ring_fini(&adev->vce.ring[1]);
211
212	release_firmware(adev->vce.fw);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
213
214	return 0;
215}
216
217/**
218 * amdgpu_vce_suspend - unpin VCE fw memory
219 *
220 * @adev: amdgpu_device pointer
221 *
222 */
223int amdgpu_vce_suspend(struct amdgpu_device *adev)
224{
225	int i;
226
 
 
227	if (adev->vce.vcpu_bo == NULL)
228		return 0;
229
230	for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
231		if (atomic_read(&adev->vce.handles[i]))
232			break;
233
234	if (i == AMDGPU_MAX_VCE_HANDLES)
235		return 0;
236
237	cancel_delayed_work_sync(&adev->vce.idle_work);
238	/* TODO: suspending running encoding sessions isn't supported */
239	return -EINVAL;
240}
241
242/**
243 * amdgpu_vce_resume - pin VCE fw memory
244 *
245 * @adev: amdgpu_device pointer
246 *
247 */
248int amdgpu_vce_resume(struct amdgpu_device *adev)
249{
250	void *cpu_addr;
251	const struct common_firmware_header *hdr;
252	unsigned offset;
253	int r;
254
255	if (adev->vce.vcpu_bo == NULL)
256		return -EINVAL;
257
258	r = amdgpu_bo_reserve(adev->vce.vcpu_bo, false);
259	if (r) {
260		dev_err(adev->dev, "(%d) failed to reserve VCE bo\n", r);
261		return r;
262	}
263
264	r = amdgpu_bo_kmap(adev->vce.vcpu_bo, &cpu_addr);
265	if (r) {
266		amdgpu_bo_unreserve(adev->vce.vcpu_bo);
267		dev_err(adev->dev, "(%d) VCE map failed\n", r);
268		return r;
269	}
270
271	hdr = (const struct common_firmware_header *)adev->vce.fw->data;
272	offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
273	memcpy(cpu_addr, (adev->vce.fw->data) + offset,
274		(adev->vce.fw->size) - offset);
275
276	amdgpu_bo_kunmap(adev->vce.vcpu_bo);
277
278	amdgpu_bo_unreserve(adev->vce.vcpu_bo);
279
280	return 0;
281}
282
283/**
284 * amdgpu_vce_idle_work_handler - power off VCE
285 *
286 * @work: pointer to work structure
287 *
288 * power of VCE when it's not used any more
289 */
290static void amdgpu_vce_idle_work_handler(struct work_struct *work)
291{
292	struct amdgpu_device *adev =
293		container_of(work, struct amdgpu_device, vce.idle_work.work);
 
 
 
 
294
295	if ((amdgpu_fence_count_emitted(&adev->vce.ring[0]) == 0) &&
296	    (amdgpu_fence_count_emitted(&adev->vce.ring[1]) == 0)) {
297		if (adev->pm.dpm_enabled) {
298			amdgpu_dpm_enable_vce(adev, false);
299		} else {
300			amdgpu_asic_set_vce_clocks(adev, 0, 0);
 
 
 
 
301		}
302	} else {
303		schedule_delayed_work(&adev->vce.idle_work,
304				      msecs_to_jiffies(VCE_IDLE_TIMEOUT_MS));
305	}
306}
307
308/**
309 * amdgpu_vce_note_usage - power up VCE
310 *
311 * @adev: amdgpu_device pointer
312 *
313 * Make sure VCE is powerd up when we want to use it
314 */
315static void amdgpu_vce_note_usage(struct amdgpu_device *adev)
316{
317	bool streams_changed = false;
318	bool set_clocks = !cancel_delayed_work_sync(&adev->vce.idle_work);
319	set_clocks &= schedule_delayed_work(&adev->vce.idle_work,
320					    msecs_to_jiffies(VCE_IDLE_TIMEOUT_MS));
321
322	if (adev->pm.dpm_enabled) {
323		/* XXX figure out if the streams changed */
324		streams_changed = false;
325	}
326
327	if (set_clocks || streams_changed) {
 
 
328		if (adev->pm.dpm_enabled) {
329			amdgpu_dpm_enable_vce(adev, true);
330		} else {
331			amdgpu_asic_set_vce_clocks(adev, 53300, 40000);
 
 
 
 
 
332		}
333	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
334}
335
336/**
337 * amdgpu_vce_free_handles - free still open VCE handles
338 *
339 * @adev: amdgpu_device pointer
340 * @filp: drm file pointer
341 *
342 * Close all VCE handles still open by this file pointer
343 */
344void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
345{
346	struct amdgpu_ring *ring = &adev->vce.ring[0];
347	int i, r;
348	for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
349		uint32_t handle = atomic_read(&adev->vce.handles[i]);
 
350		if (!handle || adev->vce.filp[i] != filp)
351			continue;
352
353		amdgpu_vce_note_usage(adev);
354
355		r = amdgpu_vce_get_destroy_msg(ring, handle, false, NULL);
356		if (r)
357			DRM_ERROR("Error destroying VCE handle (%d)!\n", r);
358
359		adev->vce.filp[i] = NULL;
360		atomic_set(&adev->vce.handles[i], 0);
361	}
362}
363
364/**
365 * amdgpu_vce_get_create_msg - generate a VCE create msg
366 *
367 * @adev: amdgpu_device pointer
368 * @ring: ring we should submit the msg to
369 * @handle: VCE session handle to use
370 * @fence: optional fence to return
371 *
372 * Open up a stream for HW test
373 */
374int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
375			      struct fence **fence)
 
376{
377	const unsigned ib_size_dw = 1024;
378	struct amdgpu_job *job;
379	struct amdgpu_ib *ib;
380	struct fence *f = NULL;
381	uint64_t dummy;
382	int i, r;
383
384	r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
385	if (r)
386		return r;
387
388	ib = &job->ibs[0];
389
390	dummy = ib->gpu_addr + 1024;
391
392	/* stitch together an VCE create msg */
393	ib->length_dw = 0;
394	ib->ptr[ib->length_dw++] = 0x0000000c; /* len */
395	ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */
396	ib->ptr[ib->length_dw++] = handle;
397
398	if ((ring->adev->vce.fw_version >> 24) >= 52)
399		ib->ptr[ib->length_dw++] = 0x00000040; /* len */
400	else
401		ib->ptr[ib->length_dw++] = 0x00000030; /* len */
402	ib->ptr[ib->length_dw++] = 0x01000001; /* create cmd */
403	ib->ptr[ib->length_dw++] = 0x00000000;
404	ib->ptr[ib->length_dw++] = 0x00000042;
405	ib->ptr[ib->length_dw++] = 0x0000000a;
406	ib->ptr[ib->length_dw++] = 0x00000001;
407	ib->ptr[ib->length_dw++] = 0x00000080;
408	ib->ptr[ib->length_dw++] = 0x00000060;
409	ib->ptr[ib->length_dw++] = 0x00000100;
410	ib->ptr[ib->length_dw++] = 0x00000100;
411	ib->ptr[ib->length_dw++] = 0x0000000c;
412	ib->ptr[ib->length_dw++] = 0x00000000;
413	if ((ring->adev->vce.fw_version >> 24) >= 52) {
414		ib->ptr[ib->length_dw++] = 0x00000000;
415		ib->ptr[ib->length_dw++] = 0x00000000;
416		ib->ptr[ib->length_dw++] = 0x00000000;
417		ib->ptr[ib->length_dw++] = 0x00000000;
418	}
419
420	ib->ptr[ib->length_dw++] = 0x00000014; /* len */
421	ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */
422	ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
423	ib->ptr[ib->length_dw++] = dummy;
424	ib->ptr[ib->length_dw++] = 0x00000001;
425
426	for (i = ib->length_dw; i < ib_size_dw; ++i)
427		ib->ptr[i] = 0x0;
428
429	r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
430	job->fence = f;
431	if (r)
432		goto err;
433
434	amdgpu_job_free(job);
435	if (fence)
436		*fence = fence_get(f);
437	fence_put(f);
438	return 0;
439
440err:
441	amdgpu_job_free(job);
442	return r;
443}
444
445/**
446 * amdgpu_vce_get_destroy_msg - generate a VCE destroy msg
447 *
448 * @adev: amdgpu_device pointer
449 * @ring: ring we should submit the msg to
450 * @handle: VCE session handle to use
451 * @fence: optional fence to return
452 *
453 * Close up a stream for HW test or if userspace failed to do so
454 */
455int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
456			       bool direct, struct fence **fence)
457{
458	const unsigned ib_size_dw = 1024;
459	struct amdgpu_job *job;
460	struct amdgpu_ib *ib;
461	struct fence *f = NULL;
462	uint64_t dummy;
463	int i, r;
464
465	r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
466	if (r)
467		return r;
468
469	ib = &job->ibs[0];
470	dummy = ib->gpu_addr + 1024;
471
472	/* stitch together an VCE destroy msg */
473	ib->length_dw = 0;
474	ib->ptr[ib->length_dw++] = 0x0000000c; /* len */
475	ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */
476	ib->ptr[ib->length_dw++] = handle;
477
478	ib->ptr[ib->length_dw++] = 0x00000014; /* len */
479	ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */
480	ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
481	ib->ptr[ib->length_dw++] = dummy;
482	ib->ptr[ib->length_dw++] = 0x00000001;
 
 
 
483
484	ib->ptr[ib->length_dw++] = 0x00000008; /* len */
485	ib->ptr[ib->length_dw++] = 0x02000001; /* destroy cmd */
486
487	for (i = ib->length_dw; i < ib_size_dw; ++i)
488		ib->ptr[i] = 0x0;
489
490	if (direct) {
491		r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
492		job->fence = f;
493		if (r)
494			goto err;
495
496		amdgpu_job_free(job);
497	} else {
498		r = amdgpu_job_submit(job, ring, &ring->adev->vce.entity,
499				      AMDGPU_FENCE_OWNER_UNDEFINED, &f);
500		if (r)
501			goto err;
502	}
503
504	if (fence)
505		*fence = fence_get(f);
506	fence_put(f);
507	return 0;
508
509err:
510	amdgpu_job_free(job);
511	return r;
512}
513
514/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
515 * amdgpu_vce_cs_reloc - command submission relocation
516 *
517 * @p: parser context
518 * @lo: address of lower dword
519 * @hi: address of higher dword
520 * @size: minimum size
521 *
522 * Patch relocation inside command stream with real buffer address
523 */
524static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx,
525			       int lo, int hi, unsigned size, uint32_t index)
526{
527	struct amdgpu_bo_va_mapping *mapping;
528	struct amdgpu_bo *bo;
529	uint64_t addr;
 
530
531	if (index == 0xffffffff)
532		index = 0;
533
534	addr = ((uint64_t)amdgpu_get_ib_value(p, ib_idx, lo)) |
535	       ((uint64_t)amdgpu_get_ib_value(p, ib_idx, hi)) << 32;
536	addr += ((uint64_t)size) * ((uint64_t)index);
537
538	mapping = amdgpu_cs_find_mapping(p, addr, &bo);
539	if (mapping == NULL) {
540		DRM_ERROR("Can't find BO for addr 0x%010Lx %d %d %d %d\n",
541			  addr, lo, hi, size, index);
542		return -EINVAL;
543	}
544
545	if ((addr + (uint64_t)size) >
546	    ((uint64_t)mapping->it.last + 1) * AMDGPU_GPU_PAGE_SIZE) {
547		DRM_ERROR("BO to small for addr 0x%010Lx %d %d\n",
548			  addr, lo, hi);
549		return -EINVAL;
550	}
551
552	addr -= ((uint64_t)mapping->it.start) * AMDGPU_GPU_PAGE_SIZE;
553	addr += amdgpu_bo_gpu_offset(bo);
554	addr -= ((uint64_t)size) * ((uint64_t)index);
555
556	amdgpu_set_ib_value(p, ib_idx, lo, lower_32_bits(addr));
557	amdgpu_set_ib_value(p, ib_idx, hi, upper_32_bits(addr));
558
559	return 0;
560}
561
562/**
563 * amdgpu_vce_validate_handle - validate stream handle
564 *
565 * @p: parser context
566 * @handle: handle to validate
567 * @allocated: allocated a new handle?
568 *
569 * Validates the handle and return the found session index or -EINVAL
570 * we we don't have another free session index.
571 */
572static int amdgpu_vce_validate_handle(struct amdgpu_cs_parser *p,
573				      uint32_t handle, bool *allocated)
574{
575	unsigned i;
576
577	*allocated = false;
578
579	/* validate the handle */
580	for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
581		if (atomic_read(&p->adev->vce.handles[i]) == handle) {
582			if (p->adev->vce.filp[i] != p->filp) {
583				DRM_ERROR("VCE handle collision detected!\n");
584				return -EINVAL;
585			}
586			return i;
587		}
588	}
589
590	/* handle not found try to alloc a new one */
591	for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
592		if (!atomic_cmpxchg(&p->adev->vce.handles[i], 0, handle)) {
593			p->adev->vce.filp[i] = p->filp;
594			p->adev->vce.img_size[i] = 0;
595			*allocated = true;
596			return i;
597		}
598	}
599
600	DRM_ERROR("No more free VCE handles!\n");
601	return -EINVAL;
602}
603
604/**
605 * amdgpu_vce_cs_parse - parse and validate the command stream
606 *
607 * @p: parser context
608 *
609 */
610int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
611{
612	struct amdgpu_ib *ib = &p->job->ibs[ib_idx];
613	unsigned fb_idx = 0, bs_idx = 0;
614	int session_idx = -1;
615	bool destroyed = false;
616	bool created = false;
617	bool allocated = false;
618	uint32_t tmp, handle = 0;
619	uint32_t *size = &tmp;
620	int i, r = 0, idx = 0;
 
621
622	amdgpu_vce_note_usage(p->adev);
 
623
624	while (idx < ib->length_dw) {
625		uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx);
626		uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1);
627
628		if ((len < 8) || (len & 3)) {
629			DRM_ERROR("invalid VCE command length (%d)!\n", len);
630			r = -EINVAL;
631			goto out;
632		}
633
634		if (destroyed) {
635			DRM_ERROR("No other command allowed after destroy!\n");
636			r = -EINVAL;
637			goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
638		}
639
 
 
 
 
 
 
 
640		switch (cmd) {
641		case 0x00000001: // session
642			handle = amdgpu_get_ib_value(p, ib_idx, idx + 2);
643			session_idx = amdgpu_vce_validate_handle(p, handle,
644								 &allocated);
645			if (session_idx < 0)
646				return session_idx;
 
 
647			size = &p->adev->vce.img_size[session_idx];
648			break;
649
650		case 0x00000002: // task info
651			fb_idx = amdgpu_get_ib_value(p, ib_idx, idx + 6);
652			bs_idx = amdgpu_get_ib_value(p, ib_idx, idx + 7);
653			break;
654
655		case 0x01000001: // create
656			created = true;
657			if (!allocated) {
 
 
 
 
658				DRM_ERROR("Handle already in use!\n");
659				r = -EINVAL;
660				goto out;
661			}
662
663			*size = amdgpu_get_ib_value(p, ib_idx, idx + 8) *
664				amdgpu_get_ib_value(p, ib_idx, idx + 10) *
665				8 * 3 / 2;
666			break;
667
668		case 0x04000001: // config extension
669		case 0x04000002: // pic control
670		case 0x04000005: // rate control
671		case 0x04000007: // motion estimation
672		case 0x04000008: // rdo
673		case 0x04000009: // vui
674		case 0x05000002: // auxiliary buffer
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
675			break;
676
677		case 0x03000001: // encode
678			r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 10, idx + 9,
679						*size, 0);
680			if (r)
681				goto out;
682
683			r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 12, idx + 11,
684						*size / 3, 0);
685			if (r)
686				goto out;
687			break;
688
689		case 0x02000001: // destroy
690			destroyed = true;
691			break;
692
693		case 0x05000001: // context buffer
694			r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
695						*size * 2, 0);
696			if (r)
697				goto out;
698			break;
699
700		case 0x05000004: // video bitstream buffer
701			tmp = amdgpu_get_ib_value(p, ib_idx, idx + 4);
702			r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
703						tmp, bs_idx);
704			if (r)
705				goto out;
706			break;
707
708		case 0x05000005: // feedback buffer
709			r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
710						4096, fb_idx);
711			if (r)
712				goto out;
713			break;
714
 
 
 
 
 
 
 
 
 
 
 
 
715		default:
716			DRM_ERROR("invalid VCE command (0x%x)!\n", cmd);
717			r = -EINVAL;
718			goto out;
719		}
720
721		if (session_idx == -1) {
722			DRM_ERROR("no session command at start of IB\n");
723			r = -EINVAL;
724			goto out;
725		}
726
727		idx += len / 4;
728	}
729
730	if (allocated && !created) {
731		DRM_ERROR("New session without create command!\n");
732		r = -ENOENT;
733	}
734
735out:
736	if ((!r && destroyed) || (r && allocated)) {
737		/*
738		 * IB contains a destroy msg or we have allocated an
739		 * handle and got an error, anyway free the handle
740		 */
741		for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
742			atomic_cmpxchg(&p->adev->vce.handles[i], handle, 0);
743	}
744
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
745	return r;
746}
747
748/**
749 * amdgpu_vce_ring_emit_ib - execute indirect buffer
750 *
751 * @ring: engine to use
752 * @ib: the IB to execute
753 *
754 */
755void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
 
 
 
756{
757	amdgpu_ring_write(ring, VCE_CMD_IB);
758	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
759	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
760	amdgpu_ring_write(ring, ib->length_dw);
761}
762
763/**
764 * amdgpu_vce_ring_emit_fence - add a fence command to the ring
765 *
766 * @ring: engine to use
767 * @fence: the fence
768 *
769 */
770void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
771				unsigned flags)
772{
773	WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
774
775	amdgpu_ring_write(ring, VCE_CMD_FENCE);
776	amdgpu_ring_write(ring, addr);
777	amdgpu_ring_write(ring, upper_32_bits(addr));
778	amdgpu_ring_write(ring, seq);
779	amdgpu_ring_write(ring, VCE_CMD_TRAP);
780	amdgpu_ring_write(ring, VCE_CMD_END);
781}
782
783/**
784 * amdgpu_vce_ring_test_ring - test if VCE ring is working
785 *
786 * @ring: the engine to test on
787 *
788 */
789int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
790{
791	struct amdgpu_device *adev = ring->adev;
792	uint32_t rptr = amdgpu_ring_get_rptr(ring);
793	unsigned i;
794	int r;
 
 
 
 
795
796	r = amdgpu_ring_alloc(ring, 16);
797	if (r) {
798		DRM_ERROR("amdgpu: vce failed to lock ring %d (%d).\n",
799			  ring->idx, r);
800		return r;
801	}
 
 
802	amdgpu_ring_write(ring, VCE_CMD_END);
803	amdgpu_ring_commit(ring);
804
805	for (i = 0; i < adev->usec_timeout; i++) {
806		if (amdgpu_ring_get_rptr(ring) != rptr)
807			break;
808		DRM_UDELAY(1);
809	}
810
811	if (i < adev->usec_timeout) {
812		DRM_INFO("ring test on %d succeeded in %d usecs\n",
813			 ring->idx, i);
814	} else {
815		DRM_ERROR("amdgpu: ring %d test failed\n",
816			  ring->idx);
817		r = -ETIMEDOUT;
818	}
819
820	return r;
821}
822
823/**
824 * amdgpu_vce_ring_test_ib - test if VCE IBs are working
825 *
826 * @ring: the engine to test on
827 *
828 */
829int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring)
830{
831	struct fence *fence = NULL;
832	int r;
 
833
834	/* skip vce ring1 ib test for now, since it's not reliable */
835	if (ring == &ring->adev->vce.ring[1])
836		return 0;
837
838	r = amdgpu_vce_get_create_msg(ring, 1, NULL);
839	if (r) {
840		DRM_ERROR("amdgpu: failed to get create msg (%d).\n", r);
 
 
 
 
 
841		goto error;
842	}
843
844	r = amdgpu_vce_get_destroy_msg(ring, 1, true, &fence);
845	if (r) {
846		DRM_ERROR("amdgpu: failed to get destroy ib (%d).\n", r);
847		goto error;
848	}
849
850	r = fence_wait(fence, false);
851	if (r) {
852		DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
853	} else {
854		DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
855	}
856error:
857	fence_put(fence);
 
 
858	return r;
859}