Linux Audio

Check our new training course

Loading...
v4.17
  1/*
  2 * Copyright 2016 Advanced Micro Devices, Inc.
  3 * All Rights Reserved.
  4 *
  5 * Permission is hereby granted, free of charge, to any person obtaining a
  6 * copy of this software and associated documentation files (the
  7 * "Software"), to deal in the Software without restriction, including
  8 * without limitation the rights to use, copy, modify, merge, publish,
  9 * distribute, sub license, and/or sell copies of the Software, and to
 10 * permit persons to whom the Software is furnished to do so, subject to
 11 * the following conditions:
 12 *
 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 20 *
 21 * The above copyright notice and this permission notice (including the
 22 * next paragraph) shall be included in all copies or substantial portions
 23 * of the Software.
 24 *
 25 */
 26
 27#include <linux/firmware.h>
 28#include <linux/module.h>
 29#include <drm/drmP.h>
 30#include <drm/drm.h>
 31
 32#include "amdgpu.h"
 33#include "amdgpu_pm.h"
 34#include "amdgpu_vcn.h"
 35#include "soc15d.h"
 36#include "soc15_common.h"
 37
 38#include "vcn/vcn_1_0_offset.h"
 39
 40/* 1 second timeout */
 41#define VCN_IDLE_TIMEOUT	msecs_to_jiffies(1000)
 42
 43/* Firmware Names */
 44#define FIRMWARE_RAVEN		"amdgpu/raven_vcn.bin"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 45
 46MODULE_FIRMWARE(FIRMWARE_RAVEN);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 47
 48static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
 49
 50int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
 51{
 52	struct amdgpu_ring *ring;
 53	struct drm_sched_rq *rq;
 54	unsigned long bo_size;
 55	const char *fw_name;
 56	const struct common_firmware_header *hdr;
 57	unsigned version_major, version_minor, family_id;
 58	int r;
 59
 60	INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler);
 
 
 
 
 
 61
 62	switch (adev->asic_type) {
 63	case CHIP_RAVEN:
 64		fw_name = FIRMWARE_RAVEN;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 65		break;
 66	default:
 67		return -EINVAL;
 68	}
 69
 70	r = request_firmware(&adev->vcn.fw, fw_name, adev->dev);
 71	if (r) {
 72		dev_err(adev->dev, "amdgpu_vcn: Can't load firmware \"%s\"\n",
 73			fw_name);
 74		return r;
 75	}
 76
 77	r = amdgpu_ucode_validate(adev->vcn.fw);
 78	if (r) {
 79		dev_err(adev->dev, "amdgpu_vcn: Can't validate firmware \"%s\"\n",
 80			fw_name);
 81		release_firmware(adev->vcn.fw);
 82		adev->vcn.fw = NULL;
 83		return r;
 84	}
 85
 86	hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
 87	family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
 88	version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
 89	version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
 90	DRM_INFO("Found VCN firmware Version: %hu.%hu Family ID: %hu\n",
 91		version_major, version_minor, family_id);
 92
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 93
 94	bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8)
 95		  +  AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_HEAP_SIZE
 96		  +  AMDGPU_VCN_SESSION_SIZE * 40;
 97	r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
 98				    AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.vcpu_bo,
 99				    &adev->vcn.gpu_addr, &adev->vcn.cpu_addr);
100	if (r) {
101		dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", r);
102		return r;
103	}
104
105	ring = &adev->vcn.ring_dec;
106	rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
107	r = drm_sched_entity_init(&ring->sched, &adev->vcn.entity_dec,
108				  rq, amdgpu_sched_jobs, NULL);
109	if (r != 0) {
110		DRM_ERROR("Failed setting up VCN dec run queue.\n");
111		return r;
112	}
 
 
 
 
113
114	ring = &adev->vcn.ring_enc[0];
115	rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
116	r = drm_sched_entity_init(&ring->sched, &adev->vcn.entity_enc,
117				  rq, amdgpu_sched_jobs, NULL);
118	if (r != 0) {
119		DRM_ERROR("Failed setting up VCN enc run queue.\n");
120		return r;
 
 
 
 
 
 
 
121	}
122
123	return 0;
124}
125
126int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
127{
128	int i;
129
130	kfree(adev->vcn.saved_bo);
131
132	drm_sched_entity_fini(&adev->vcn.ring_dec.sched, &adev->vcn.entity_dec);
133
134	drm_sched_entity_fini(&adev->vcn.ring_enc[0].sched, &adev->vcn.entity_enc);
 
 
 
 
 
 
 
 
 
135
136	amdgpu_bo_free_kernel(&adev->vcn.vcpu_bo,
137			      &adev->vcn.gpu_addr,
138			      (void **)&adev->vcn.cpu_addr);
139
140	amdgpu_ring_fini(&adev->vcn.ring_dec);
141
142	for (i = 0; i < adev->vcn.num_enc_rings; ++i)
143		amdgpu_ring_fini(&adev->vcn.ring_enc[i]);
 
144
145	release_firmware(adev->vcn.fw);
 
 
146
147	return 0;
148}
149
150int amdgpu_vcn_suspend(struct amdgpu_device *adev)
151{
152	unsigned size;
153	void *ptr;
154
155	if (adev->vcn.vcpu_bo == NULL)
156		return 0;
 
157
158	cancel_delayed_work_sync(&adev->vcn.idle_work);
 
 
159
160	size = amdgpu_bo_size(adev->vcn.vcpu_bo);
161	ptr = adev->vcn.cpu_addr;
 
 
 
 
 
162
163	adev->vcn.saved_bo = kmalloc(size, GFP_KERNEL);
164	if (!adev->vcn.saved_bo)
165		return -ENOMEM;
166
167	memcpy_fromio(adev->vcn.saved_bo, ptr, size);
 
 
 
 
168
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
169	return 0;
170}
171
172int amdgpu_vcn_resume(struct amdgpu_device *adev)
173{
174	unsigned size;
175	void *ptr;
 
176
177	if (adev->vcn.vcpu_bo == NULL)
178		return -EINVAL;
179
180	size = amdgpu_bo_size(adev->vcn.vcpu_bo);
181	ptr = adev->vcn.cpu_addr;
182
183	if (adev->vcn.saved_bo != NULL) {
184		memcpy_toio(ptr, adev->vcn.saved_bo, size);
185		kfree(adev->vcn.saved_bo);
186		adev->vcn.saved_bo = NULL;
187	} else {
188		const struct common_firmware_header *hdr;
189		unsigned offset;
190
191		hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
192		offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
193		memcpy_toio(adev->vcn.cpu_addr, adev->vcn.fw->data + offset,
194			    le32_to_cpu(hdr->ucode_size_bytes));
195		size -= le32_to_cpu(hdr->ucode_size_bytes);
196		ptr += le32_to_cpu(hdr->ucode_size_bytes);
197		memset_io(ptr, 0, size);
 
 
 
 
 
 
 
 
 
 
 
 
198	}
199
200	return 0;
201}
202
203static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
204{
205	struct amdgpu_device *adev =
206		container_of(work, struct amdgpu_device, vcn.idle_work.work);
207	unsigned fences = amdgpu_fence_count_emitted(&adev->vcn.ring_dec);
 
 
 
 
 
 
 
 
 
 
 
 
 
208
209	if (fences == 0) {
210		if (adev->pm.dpm_enabled) {
211			/* might be used when with pg/cg
212			amdgpu_dpm_enable_uvd(adev, false);
213			*/
 
 
214		}
 
 
 
 
 
 
 
 
 
 
 
 
215	} else {
216		schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
217	}
218}
219
220void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
221{
222	struct amdgpu_device *adev = ring->adev;
223	bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
224
225	if (set_clocks && adev->pm.dpm_enabled) {
226		/* might be used when with pg/cg
227		amdgpu_dpm_enable_uvd(adev, true);
228		*/
229	}
 
230}
231
232void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring)
233{
 
 
 
 
 
 
234	schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
235}
236
237int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
238{
239	struct amdgpu_device *adev = ring->adev;
240	uint32_t tmp = 0;
241	unsigned i;
242	int r;
243
244	WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0xCAFEDEAD);
 
 
 
 
245	r = amdgpu_ring_alloc(ring, 3);
246	if (r) {
247		DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
248			  ring->idx, r);
249		return r;
250	}
251	amdgpu_ring_write(ring,
252		PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0));
253	amdgpu_ring_write(ring, 0xDEADBEEF);
254	amdgpu_ring_commit(ring);
255	for (i = 0; i < adev->usec_timeout; i++) {
256		tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID));
257		if (tmp == 0xDEADBEEF)
258			break;
259		DRM_UDELAY(1);
260	}
261
262	if (i < adev->usec_timeout) {
263		DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
264			 ring->idx, i);
265	} else {
266		DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
267			  ring->idx, tmp);
268		r = -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
269	}
 
 
 
 
270	return r;
271}
272
273static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
274				   struct amdgpu_bo *bo, bool direct,
275				   struct dma_fence **fence)
276{
277	struct amdgpu_device *adev = ring->adev;
278	struct dma_fence *f = NULL;
279	struct amdgpu_job *job;
280	struct amdgpu_ib *ib;
281	uint64_t addr;
 
282	int i, r;
283
284	r = amdgpu_job_alloc_with_ib(adev, 64, &job);
 
285	if (r)
286		goto err;
287
288	ib = &job->ibs[0];
289	addr = amdgpu_bo_gpu_offset(bo);
290	ib->ptr[0] = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0);
 
291	ib->ptr[1] = addr;
292	ib->ptr[2] = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0);
293	ib->ptr[3] = addr >> 32;
294	ib->ptr[4] = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0);
295	ib->ptr[5] = 0;
296	for (i = 6; i < 16; i += 2) {
297		ib->ptr[i] = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP), 0);
298		ib->ptr[i+1] = 0;
299	}
300	ib->length_dw = 16;
301
302	if (direct) {
303		r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
304		job->fence = dma_fence_get(f);
305		if (r)
306			goto err_free;
307
308		amdgpu_job_free(job);
309	} else {
310		r = amdgpu_job_submit(job, ring, &adev->vcn.entity_dec,
311				      AMDGPU_FENCE_OWNER_UNDEFINED, &f);
312		if (r)
313			goto err_free;
314	}
315
316	amdgpu_bo_fence(bo, f, false);
317	amdgpu_bo_unreserve(bo);
318	amdgpu_bo_unref(&bo);
319
320	if (fence)
321		*fence = dma_fence_get(f);
322	dma_fence_put(f);
323
324	return 0;
325
326err_free:
327	amdgpu_job_free(job);
328
329err:
330	amdgpu_bo_unreserve(bo);
331	amdgpu_bo_unref(&bo);
332	return r;
333}
334
335static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
336			      struct dma_fence **fence)
337{
338	struct amdgpu_device *adev = ring->adev;
339	struct amdgpu_bo *bo = NULL;
340	uint32_t *msg;
341	int r, i;
342
 
343	r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
344				      AMDGPU_GEM_DOMAIN_VRAM,
345				      &bo, NULL, (void **)&msg);
346	if (r)
347		return r;
348
349	msg[0] = cpu_to_le32(0x00000028);
350	msg[1] = cpu_to_le32(0x00000038);
351	msg[2] = cpu_to_le32(0x00000001);
352	msg[3] = cpu_to_le32(0x00000000);
353	msg[4] = cpu_to_le32(handle);
354	msg[5] = cpu_to_le32(0x00000000);
355	msg[6] = cpu_to_le32(0x00000001);
356	msg[7] = cpu_to_le32(0x00000028);
357	msg[8] = cpu_to_le32(0x00000010);
358	msg[9] = cpu_to_le32(0x00000000);
359	msg[10] = cpu_to_le32(0x00000007);
360	msg[11] = cpu_to_le32(0x00000000);
361	msg[12] = cpu_to_le32(0x00000780);
362	msg[13] = cpu_to_le32(0x00000440);
363	for (i = 14; i < 1024; ++i)
364		msg[i] = cpu_to_le32(0x0);
365
366	return amdgpu_vcn_dec_send_msg(ring, bo, true, fence);
367}
368
369static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
370			       bool direct, struct dma_fence **fence)
371{
372	struct amdgpu_device *adev = ring->adev;
373	struct amdgpu_bo *bo = NULL;
374	uint32_t *msg;
375	int r, i;
376
 
377	r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
378				      AMDGPU_GEM_DOMAIN_VRAM,
379				      &bo, NULL, (void **)&msg);
380	if (r)
381		return r;
382
383	msg[0] = cpu_to_le32(0x00000028);
384	msg[1] = cpu_to_le32(0x00000018);
385	msg[2] = cpu_to_le32(0x00000000);
386	msg[3] = cpu_to_le32(0x00000002);
387	msg[4] = cpu_to_le32(handle);
388	msg[5] = cpu_to_le32(0x00000000);
389	for (i = 6; i < 1024; ++i)
390		msg[i] = cpu_to_le32(0x0);
391
392	return amdgpu_vcn_dec_send_msg(ring, bo, direct, fence);
393}
394
395int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
396{
397	struct dma_fence *fence;
 
398	long r;
399
400	r = amdgpu_vcn_dec_get_create_msg(ring, 1, NULL);
401	if (r) {
402		DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r);
403		goto error;
404	}
405
406	r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, true, &fence);
407	if (r) {
408		DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
 
 
 
 
 
 
409		goto error;
410	}
411
412	r = dma_fence_wait_timeout(fence, false, timeout);
413	if (r == 0) {
414		DRM_ERROR("amdgpu: IB test timed out.\n");
415		r = -ETIMEDOUT;
416	} else if (r < 0) {
417		DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
418	} else {
419		DRM_DEBUG("ib test on ring %d succeeded\n",  ring->idx);
420		r = 0;
421	}
422
423	dma_fence_put(fence);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
424
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
425error:
426	return r;
427}
428
429int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
430{
431	struct amdgpu_device *adev = ring->adev;
432	uint32_t rptr = amdgpu_ring_get_rptr(ring);
433	unsigned i;
434	int r;
435
 
 
 
436	r = amdgpu_ring_alloc(ring, 16);
437	if (r) {
438		DRM_ERROR("amdgpu: vcn enc failed to lock ring %d (%d).\n",
439			  ring->idx, r);
440		return r;
441	}
 
 
442	amdgpu_ring_write(ring, VCN_ENC_CMD_END);
443	amdgpu_ring_commit(ring);
444
445	for (i = 0; i < adev->usec_timeout; i++) {
446		if (amdgpu_ring_get_rptr(ring) != rptr)
447			break;
448		DRM_UDELAY(1);
449	}
450
451	if (i < adev->usec_timeout) {
452		DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
453			 ring->idx, i);
454	} else {
455		DRM_ERROR("amdgpu: ring %d test failed\n",
456			  ring->idx);
457		r = -ETIMEDOUT;
458	}
459
460	return r;
461}
462
463static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
464			      struct dma_fence **fence)
 
465{
466	const unsigned ib_size_dw = 16;
467	struct amdgpu_job *job;
468	struct amdgpu_ib *ib;
469	struct dma_fence *f = NULL;
470	uint64_t dummy;
471	int i, r;
472
473	r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
 
474	if (r)
475		return r;
476
477	ib = &job->ibs[0];
478	dummy = ib->gpu_addr + 1024;
479
480	ib->length_dw = 0;
481	ib->ptr[ib->length_dw++] = 0x00000018;
482	ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
483	ib->ptr[ib->length_dw++] = handle;
484	ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
485	ib->ptr[ib->length_dw++] = dummy;
486	ib->ptr[ib->length_dw++] = 0x0000000b;
487
488	ib->ptr[ib->length_dw++] = 0x00000014;
489	ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
490	ib->ptr[ib->length_dw++] = 0x0000001c;
491	ib->ptr[ib->length_dw++] = 0x00000000;
492	ib->ptr[ib->length_dw++] = 0x00000000;
493
494	ib->ptr[ib->length_dw++] = 0x00000008;
495	ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */
496
497	for (i = ib->length_dw; i < ib_size_dw; ++i)
498		ib->ptr[i] = 0x0;
499
500	r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
501	job->fence = dma_fence_get(f);
502	if (r)
503		goto err;
504
505	amdgpu_job_free(job);
506	if (fence)
507		*fence = dma_fence_get(f);
508	dma_fence_put(f);
509
510	return 0;
511
512err:
513	amdgpu_job_free(job);
514	return r;
515}
516
517static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
518				struct dma_fence **fence)
 
519{
520	const unsigned ib_size_dw = 16;
521	struct amdgpu_job *job;
522	struct amdgpu_ib *ib;
523	struct dma_fence *f = NULL;
524	uint64_t dummy;
525	int i, r;
526
527	r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
 
528	if (r)
529		return r;
530
531	ib = &job->ibs[0];
532	dummy = ib->gpu_addr + 1024;
533
534	ib->length_dw = 0;
535	ib->ptr[ib->length_dw++] = 0x00000018;
536	ib->ptr[ib->length_dw++] = 0x00000001;
537	ib->ptr[ib->length_dw++] = handle;
538	ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
539	ib->ptr[ib->length_dw++] = dummy;
540	ib->ptr[ib->length_dw++] = 0x0000000b;
541
542	ib->ptr[ib->length_dw++] = 0x00000014;
543	ib->ptr[ib->length_dw++] = 0x00000002;
544	ib->ptr[ib->length_dw++] = 0x0000001c;
545	ib->ptr[ib->length_dw++] = 0x00000000;
546	ib->ptr[ib->length_dw++] = 0x00000000;
547
548	ib->ptr[ib->length_dw++] = 0x00000008;
549	ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */
550
551	for (i = ib->length_dw; i < ib_size_dw; ++i)
552		ib->ptr[i] = 0x0;
553
554	r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
555	job->fence = dma_fence_get(f);
556	if (r)
557		goto err;
558
559	amdgpu_job_free(job);
560	if (fence)
561		*fence = dma_fence_get(f);
562	dma_fence_put(f);
563
564	return 0;
565
566err:
567	amdgpu_job_free(job);
568	return r;
569}
570
571int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
572{
573	struct dma_fence *fence = NULL;
 
574	long r;
575
576	r = amdgpu_vcn_enc_get_create_msg(ring, 1, NULL);
577	if (r) {
578		DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r);
 
 
 
 
 
579		goto error;
580	}
581
582	r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, &fence);
583	if (r) {
584		DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
585		goto error;
586	}
587
588	r = dma_fence_wait_timeout(fence, false, timeout);
589	if (r == 0) {
590		DRM_ERROR("amdgpu: IB test timed out.\n");
591		r = -ETIMEDOUT;
592	} else if (r < 0) {
593		DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
594	} else {
595		DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
596		r = 0;
597	}
598error:
599	dma_fence_put(fence);
 
 
 
600	return r;
601}
v5.14.15
  1/*
  2 * Copyright 2016 Advanced Micro Devices, Inc.
  3 * All Rights Reserved.
  4 *
  5 * Permission is hereby granted, free of charge, to any person obtaining a
  6 * copy of this software and associated documentation files (the
  7 * "Software"), to deal in the Software without restriction, including
  8 * without limitation the rights to use, copy, modify, merge, publish,
  9 * distribute, sub license, and/or sell copies of the Software, and to
 10 * permit persons to whom the Software is furnished to do so, subject to
 11 * the following conditions:
 12 *
 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 20 *
 21 * The above copyright notice and this permission notice (including the
 22 * next paragraph) shall be included in all copies or substantial portions
 23 * of the Software.
 24 *
 25 */
 26
 27#include <linux/firmware.h>
 28#include <linux/module.h>
 29#include <linux/pci.h>
 30#include <drm/drm_drv.h>
 31
 32#include "amdgpu.h"
 33#include "amdgpu_pm.h"
 34#include "amdgpu_vcn.h"
 35#include "soc15d.h"
 
 
 
 
 
 
 36
 37/* Firmware Names */
 38#define FIRMWARE_RAVEN		"amdgpu/raven_vcn.bin"
 39#define FIRMWARE_PICASSO	"amdgpu/picasso_vcn.bin"
 40#define FIRMWARE_RAVEN2		"amdgpu/raven2_vcn.bin"
 41#define FIRMWARE_ARCTURUS	"amdgpu/arcturus_vcn.bin"
 42#define FIRMWARE_RENOIR		"amdgpu/renoir_vcn.bin"
 43#define FIRMWARE_GREEN_SARDINE	"amdgpu/green_sardine_vcn.bin"
 44#define FIRMWARE_NAVI10		"amdgpu/navi10_vcn.bin"
 45#define FIRMWARE_NAVI14		"amdgpu/navi14_vcn.bin"
 46#define FIRMWARE_NAVI12		"amdgpu/navi12_vcn.bin"
 47#define FIRMWARE_SIENNA_CICHLID	"amdgpu/sienna_cichlid_vcn.bin"
 48#define FIRMWARE_NAVY_FLOUNDER	"amdgpu/navy_flounder_vcn.bin"
 49#define FIRMWARE_VANGOGH	"amdgpu/vangogh_vcn.bin"
 50#define FIRMWARE_DIMGREY_CAVEFISH	"amdgpu/dimgrey_cavefish_vcn.bin"
 51#define FIRMWARE_ALDEBARAN	"amdgpu/aldebaran_vcn.bin"
 52#define FIRMWARE_BEIGE_GOBY	"amdgpu/beige_goby_vcn.bin"
 53#define FIRMWARE_YELLOW_CARP	"amdgpu/yellow_carp_vcn.bin"
 54
 55MODULE_FIRMWARE(FIRMWARE_RAVEN);
 56MODULE_FIRMWARE(FIRMWARE_PICASSO);
 57MODULE_FIRMWARE(FIRMWARE_RAVEN2);
 58MODULE_FIRMWARE(FIRMWARE_ARCTURUS);
 59MODULE_FIRMWARE(FIRMWARE_RENOIR);
 60MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE);
 61MODULE_FIRMWARE(FIRMWARE_ALDEBARAN);
 62MODULE_FIRMWARE(FIRMWARE_NAVI10);
 63MODULE_FIRMWARE(FIRMWARE_NAVI14);
 64MODULE_FIRMWARE(FIRMWARE_NAVI12);
 65MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID);
 66MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER);
 67MODULE_FIRMWARE(FIRMWARE_VANGOGH);
 68MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH);
 69MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY);
 70MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP);
 71
 72static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
 73
 74int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
 75{
 
 
 76	unsigned long bo_size;
 77	const char *fw_name;
 78	const struct common_firmware_header *hdr;
 79	unsigned char fw_check;
 80	int i, r;
 81
 82	INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler);
 83	mutex_init(&adev->vcn.vcn_pg_lock);
 84	mutex_init(&adev->vcn.vcn1_jpeg1_workaround);
 85	atomic_set(&adev->vcn.total_submission_cnt, 0);
 86	for (i = 0; i < adev->vcn.num_vcn_inst; i++)
 87		atomic_set(&adev->vcn.inst[i].dpg_enc_submission_cnt, 0);
 88
 89	switch (adev->asic_type) {
 90	case CHIP_RAVEN:
 91		if (adev->apu_flags & AMD_APU_IS_RAVEN2)
 92			fw_name = FIRMWARE_RAVEN2;
 93		else if (adev->apu_flags & AMD_APU_IS_PICASSO)
 94			fw_name = FIRMWARE_PICASSO;
 95		else
 96			fw_name = FIRMWARE_RAVEN;
 97		break;
 98	case CHIP_ARCTURUS:
 99		fw_name = FIRMWARE_ARCTURUS;
100		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
101		    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
102			adev->vcn.indirect_sram = true;
103		break;
104	case CHIP_RENOIR:
105		if (adev->apu_flags & AMD_APU_IS_RENOIR)
106			fw_name = FIRMWARE_RENOIR;
107		else
108			fw_name = FIRMWARE_GREEN_SARDINE;
109
110		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
111		    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
112			adev->vcn.indirect_sram = true;
113		break;
114	case CHIP_ALDEBARAN:
115		fw_name = FIRMWARE_ALDEBARAN;
116		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
117		    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
118			adev->vcn.indirect_sram = true;
119		break;
120	case CHIP_NAVI10:
121		fw_name = FIRMWARE_NAVI10;
122		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
123		    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
124			adev->vcn.indirect_sram = true;
125		break;
126	case CHIP_NAVI14:
127		fw_name = FIRMWARE_NAVI14;
128		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
129		    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
130			adev->vcn.indirect_sram = true;
131		break;
132	case CHIP_NAVI12:
133		fw_name = FIRMWARE_NAVI12;
134		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
135		    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
136			adev->vcn.indirect_sram = true;
137		break;
138	case CHIP_SIENNA_CICHLID:
139		fw_name = FIRMWARE_SIENNA_CICHLID;
140		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
141		    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
142			adev->vcn.indirect_sram = true;
143		break;
144	case CHIP_NAVY_FLOUNDER:
145		fw_name = FIRMWARE_NAVY_FLOUNDER;
146		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
147		    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
148			adev->vcn.indirect_sram = true;
149		break;
150	case CHIP_VANGOGH:
151		fw_name = FIRMWARE_VANGOGH;
152		break;
153	case CHIP_DIMGREY_CAVEFISH:
154		fw_name = FIRMWARE_DIMGREY_CAVEFISH;
155		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
156		    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
157			adev->vcn.indirect_sram = true;
158		break;
159	case CHIP_BEIGE_GOBY:
160		fw_name = FIRMWARE_BEIGE_GOBY;
161		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
162		    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
163			adev->vcn.indirect_sram = true;
164		break;
165	case CHIP_YELLOW_CARP:
166		fw_name = FIRMWARE_YELLOW_CARP;
167		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
168		    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
169			adev->vcn.indirect_sram = true;
170		break;
171	default:
172		return -EINVAL;
173	}
174
175	r = request_firmware(&adev->vcn.fw, fw_name, adev->dev);
176	if (r) {
177		dev_err(adev->dev, "amdgpu_vcn: Can't load firmware \"%s\"\n",
178			fw_name);
179		return r;
180	}
181
182	r = amdgpu_ucode_validate(adev->vcn.fw);
183	if (r) {
184		dev_err(adev->dev, "amdgpu_vcn: Can't validate firmware \"%s\"\n",
185			fw_name);
186		release_firmware(adev->vcn.fw);
187		adev->vcn.fw = NULL;
188		return r;
189	}
190
191	hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
192	adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version);
 
 
 
 
193
194	/* Bit 20-23, it is encode major and non-zero for new naming convention.
195	 * This field is part of version minor and DRM_DISABLED_FLAG in old naming
196	 * convention. Since the l:wq!atest version minor is 0x5B and DRM_DISABLED_FLAG
197	 * is zero in old naming convention, this field is always zero so far.
198	 * These four bits are used to tell which naming convention is present.
199	 */
200	fw_check = (le32_to_cpu(hdr->ucode_version) >> 20) & 0xf;
201	if (fw_check) {
202		unsigned int dec_ver, enc_major, enc_minor, vep, fw_rev;
203
204		fw_rev = le32_to_cpu(hdr->ucode_version) & 0xfff;
205		enc_minor = (le32_to_cpu(hdr->ucode_version) >> 12) & 0xff;
206		enc_major = fw_check;
207		dec_ver = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xf;
208		vep = (le32_to_cpu(hdr->ucode_version) >> 28) & 0xf;
209		DRM_INFO("Found VCN firmware Version ENC: %u.%u DEC: %u VEP: %u Revision: %u\n",
210			enc_major, enc_minor, dec_ver, vep, fw_rev);
211	} else {
212		unsigned int version_major, version_minor, family_id;
213
214		family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
215		version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
216		version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
217		DRM_INFO("Found VCN firmware Version: %u.%u Family ID: %u\n",
218			version_major, version_minor, family_id);
219	}
220
221	bo_size = AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_CONTEXT_SIZE;
222	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
223		bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
224	bo_size += AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared));
225
226	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
227		if (adev->vcn.harvest_config & (1 << i))
228			continue;
229
230		r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
231						AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.inst[i].vcpu_bo,
232						&adev->vcn.inst[i].gpu_addr, &adev->vcn.inst[i].cpu_addr);
233		if (r) {
234			dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", r);
235			return r;
236		}
237
238		adev->vcn.inst[i].fw_shared_cpu_addr = adev->vcn.inst[i].cpu_addr +
239				bo_size - AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared));
240		adev->vcn.inst[i].fw_shared_gpu_addr = adev->vcn.inst[i].gpu_addr +
241				bo_size - AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared));
242
243		if (adev->vcn.indirect_sram) {
244			r = amdgpu_bo_create_kernel(adev, 64 * 2 * 4, PAGE_SIZE,
245					AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.inst[i].dpg_sram_bo,
246					&adev->vcn.inst[i].dpg_sram_gpu_addr, &adev->vcn.inst[i].dpg_sram_cpu_addr);
247			if (r) {
248				dev_err(adev->dev, "VCN %d (%d) failed to allocate DPG bo\n", i, r);
249				return r;
250			}
251		}
252	}
253
254	return 0;
255}
256
257int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
258{
259	int i, j;
 
 
260
261	cancel_delayed_work_sync(&adev->vcn.idle_work);
262
263	for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
264		if (adev->vcn.harvest_config & (1 << j))
265			continue;
266
267		if (adev->vcn.indirect_sram) {
268			amdgpu_bo_free_kernel(&adev->vcn.inst[j].dpg_sram_bo,
269						  &adev->vcn.inst[j].dpg_sram_gpu_addr,
270						  (void **)&adev->vcn.inst[j].dpg_sram_cpu_addr);
271		}
272		kvfree(adev->vcn.inst[j].saved_bo);
273
274		amdgpu_bo_free_kernel(&adev->vcn.inst[j].vcpu_bo,
275					  &adev->vcn.inst[j].gpu_addr,
276					  (void **)&adev->vcn.inst[j].cpu_addr);
277
278		amdgpu_ring_fini(&adev->vcn.inst[j].ring_dec);
279
280		for (i = 0; i < adev->vcn.num_enc_rings; ++i)
281			amdgpu_ring_fini(&adev->vcn.inst[j].ring_enc[i]);
282	}
283
284	release_firmware(adev->vcn.fw);
285	mutex_destroy(&adev->vcn.vcn1_jpeg1_workaround);
286	mutex_destroy(&adev->vcn.vcn_pg_lock);
287
288	return 0;
289}
290
291bool amdgpu_vcn_is_disabled_vcn(struct amdgpu_device *adev, enum vcn_ring_type type, uint32_t vcn_instance)
292{
293	bool ret = false;
 
294
295	int major;
296	int minor;
297	int revision;
298
299	/* if cannot find IP data, then this VCN does not exist */
300	if (amdgpu_discovery_get_vcn_version(adev, vcn_instance, &major, &minor, &revision) != 0)
301		return true;
302
303	if ((type == VCN_ENCODE_RING) && (revision & VCN_BLOCK_ENCODE_DISABLE_MASK)) {
304		ret = true;
305	} else if ((type == VCN_DECODE_RING) && (revision & VCN_BLOCK_DECODE_DISABLE_MASK)) {
306		ret = true;
307	} else if ((type == VCN_UNIFIED_RING) && (revision & VCN_BLOCK_QUEUE_DISABLE_MASK)) {
308		ret = true;
309	}
310
311	return ret;
312}
 
313
314int amdgpu_vcn_suspend(struct amdgpu_device *adev)
315{
316	unsigned size;
317	void *ptr;
318	int i, idx;
319
320	cancel_delayed_work_sync(&adev->vcn.idle_work);
321
322	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
323		if (adev->vcn.harvest_config & (1 << i))
324			continue;
325		if (adev->vcn.inst[i].vcpu_bo == NULL)
326			return 0;
327
328		size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo);
329		ptr = adev->vcn.inst[i].cpu_addr;
330
331		adev->vcn.inst[i].saved_bo = kvmalloc(size, GFP_KERNEL);
332		if (!adev->vcn.inst[i].saved_bo)
333			return -ENOMEM;
334
335		if (drm_dev_enter(&adev->ddev, &idx)) {
336			memcpy_fromio(adev->vcn.inst[i].saved_bo, ptr, size);
337			drm_dev_exit(idx);
338		}
339	}
340	return 0;
341}
342
343int amdgpu_vcn_resume(struct amdgpu_device *adev)
344{
345	unsigned size;
346	void *ptr;
347	int i, idx;
348
349	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
350		if (adev->vcn.harvest_config & (1 << i))
351			continue;
352		if (adev->vcn.inst[i].vcpu_bo == NULL)
353			return -EINVAL;
354
355		size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo);
356		ptr = adev->vcn.inst[i].cpu_addr;
357
358		if (adev->vcn.inst[i].saved_bo != NULL) {
359			if (drm_dev_enter(&adev->ddev, &idx)) {
360				memcpy_toio(ptr, adev->vcn.inst[i].saved_bo, size);
361				drm_dev_exit(idx);
362			}
363			kvfree(adev->vcn.inst[i].saved_bo);
364			adev->vcn.inst[i].saved_bo = NULL;
365		} else {
366			const struct common_firmware_header *hdr;
367			unsigned offset;
368
369			hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
370			if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
371				offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
372				if (drm_dev_enter(&adev->ddev, &idx)) {
373					memcpy_toio(adev->vcn.inst[i].cpu_addr, adev->vcn.fw->data + offset,
374						    le32_to_cpu(hdr->ucode_size_bytes));
375					drm_dev_exit(idx);
376				}
377				size -= le32_to_cpu(hdr->ucode_size_bytes);
378				ptr += le32_to_cpu(hdr->ucode_size_bytes);
379			}
380			memset_io(ptr, 0, size);
381		}
382	}
 
383	return 0;
384}
385
386static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
387{
388	struct amdgpu_device *adev =
389		container_of(work, struct amdgpu_device, vcn.idle_work.work);
390	unsigned int fences = 0, fence[AMDGPU_MAX_VCN_INSTANCES] = {0};
391	unsigned int i, j;
392	int r = 0;
393
394	for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
395		if (adev->vcn.harvest_config & (1 << j))
396			continue;
397
398		for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
399			fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_enc[i]);
400		}
401
402		if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)	{
403			struct dpg_pause_state new_state;
404
405			if (fence[j] ||
406				unlikely(atomic_read(&adev->vcn.inst[j].dpg_enc_submission_cnt)))
407				new_state.fw_based = VCN_DPG_STATE__PAUSE;
408			else
409				new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
410
411			adev->vcn.pause_dpg_mode(adev, j, &new_state);
412		}
413
414		fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_dec);
415		fences += fence[j];
416	}
417
418	if (!fences && !atomic_read(&adev->vcn.total_submission_cnt)) {
419		amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
420		       AMD_PG_STATE_GATE);
421		r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO,
422				false);
423		if (r)
424			dev_warn(adev->dev, "(%d) failed to disable video power profile mode\n", r);
425	} else {
426		schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
427	}
428}
429
430void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
431{
432	struct amdgpu_device *adev = ring->adev;
433	int r = 0;
434
435	atomic_inc(&adev->vcn.total_submission_cnt);
436
437	if (!cancel_delayed_work_sync(&adev->vcn.idle_work)) {
438		r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO,
439				true);
440		if (r)
441			dev_warn(adev->dev, "(%d) failed to switch to video power profile mode\n", r);
442	}
443
444	mutex_lock(&adev->vcn.vcn_pg_lock);
445	amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
446	       AMD_PG_STATE_UNGATE);
447
448	if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)	{
449		struct dpg_pause_state new_state;
450
451		if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC) {
452			atomic_inc(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt);
453			new_state.fw_based = VCN_DPG_STATE__PAUSE;
454		} else {
455			unsigned int fences = 0;
456			unsigned int i;
457
458			for (i = 0; i < adev->vcn.num_enc_rings; ++i)
459				fences += amdgpu_fence_count_emitted(&adev->vcn.inst[ring->me].ring_enc[i]);
460
461			if (fences || atomic_read(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt))
462				new_state.fw_based = VCN_DPG_STATE__PAUSE;
463			else
464				new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
465		}
466
467		adev->vcn.pause_dpg_mode(adev, ring->me, &new_state);
 
 
 
468	}
469	mutex_unlock(&adev->vcn.vcn_pg_lock);
470}
471
472void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring)
473{
474	if (ring->adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
475		ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
476		atomic_dec(&ring->adev->vcn.inst[ring->me].dpg_enc_submission_cnt);
477
478	atomic_dec(&ring->adev->vcn.total_submission_cnt);
479
480	schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
481}
482
483int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
484{
485	struct amdgpu_device *adev = ring->adev;
486	uint32_t tmp = 0;
487	unsigned i;
488	int r;
489
490	/* VCN in SRIOV does not support direct register read/write */
491	if (amdgpu_sriov_vf(adev))
492		return 0;
493
494	WREG32(adev->vcn.inst[ring->me].external.scratch9, 0xCAFEDEAD);
495	r = amdgpu_ring_alloc(ring, 3);
496	if (r)
 
 
497		return r;
498	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.scratch9, 0));
 
 
499	amdgpu_ring_write(ring, 0xDEADBEEF);
500	amdgpu_ring_commit(ring);
501	for (i = 0; i < adev->usec_timeout; i++) {
502		tmp = RREG32(adev->vcn.inst[ring->me].external.scratch9);
503		if (tmp == 0xDEADBEEF)
504			break;
505		udelay(1);
506	}
507
508	if (i >= adev->usec_timeout)
509		r = -ETIMEDOUT;
510
511	return r;
512}
513
514int amdgpu_vcn_dec_sw_ring_test_ring(struct amdgpu_ring *ring)
515{
516	struct amdgpu_device *adev = ring->adev;
517	uint32_t rptr;
518	unsigned int i;
519	int r;
520
521	if (amdgpu_sriov_vf(adev))
522		return 0;
523
524	r = amdgpu_ring_alloc(ring, 16);
525	if (r)
526		return r;
527
528	rptr = amdgpu_ring_get_rptr(ring);
529
530	amdgpu_ring_write(ring, VCN_DEC_SW_CMD_END);
531	amdgpu_ring_commit(ring);
532
533	for (i = 0; i < adev->usec_timeout; i++) {
534		if (amdgpu_ring_get_rptr(ring) != rptr)
535			break;
536		udelay(1);
537	}
538
539	if (i >= adev->usec_timeout)
540		r = -ETIMEDOUT;
541
542	return r;
543}
544
545static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
546				   struct amdgpu_bo *bo,
547				   struct dma_fence **fence)
548{
549	struct amdgpu_device *adev = ring->adev;
550	struct dma_fence *f = NULL;
551	struct amdgpu_job *job;
552	struct amdgpu_ib *ib;
553	uint64_t addr;
554	void *msg = NULL;
555	int i, r;
556
557	r = amdgpu_job_alloc_with_ib(adev, 64,
558					AMDGPU_IB_POOL_DIRECT, &job);
559	if (r)
560		goto err;
561
562	ib = &job->ibs[0];
563	addr = amdgpu_bo_gpu_offset(bo);
564	msg = amdgpu_bo_kptr(bo);
565	ib->ptr[0] = PACKET0(adev->vcn.internal.data0, 0);
566	ib->ptr[1] = addr;
567	ib->ptr[2] = PACKET0(adev->vcn.internal.data1, 0);
568	ib->ptr[3] = addr >> 32;
569	ib->ptr[4] = PACKET0(adev->vcn.internal.cmd, 0);
570	ib->ptr[5] = 0;
571	for (i = 6; i < 16; i += 2) {
572		ib->ptr[i] = PACKET0(adev->vcn.internal.nop, 0);
573		ib->ptr[i+1] = 0;
574	}
575	ib->length_dw = 16;
576
577	r = amdgpu_job_submit_direct(job, ring, &f);
578	if (r)
579		goto err_free;
 
 
 
 
 
 
 
 
 
 
580
581	amdgpu_bo_fence(bo, f, false);
582	amdgpu_bo_unreserve(bo);
583	amdgpu_bo_free_kernel(&bo, NULL, (void **)&msg);
584
585	if (fence)
586		*fence = dma_fence_get(f);
587	dma_fence_put(f);
588
589	return 0;
590
591err_free:
592	amdgpu_job_free(job);
593
594err:
595	amdgpu_bo_unreserve(bo);
596	amdgpu_bo_free_kernel(&bo, NULL, (void **)&msg);
597	return r;
598}
599
600static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
601					 struct amdgpu_bo **bo)
602{
603	struct amdgpu_device *adev = ring->adev;
 
604	uint32_t *msg;
605	int r, i;
606
607	*bo = NULL;
608	r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
609				      AMDGPU_GEM_DOMAIN_VRAM,
610				      bo, NULL, (void **)&msg);
611	if (r)
612		return r;
613
614	msg[0] = cpu_to_le32(0x00000028);
615	msg[1] = cpu_to_le32(0x00000038);
616	msg[2] = cpu_to_le32(0x00000001);
617	msg[3] = cpu_to_le32(0x00000000);
618	msg[4] = cpu_to_le32(handle);
619	msg[5] = cpu_to_le32(0x00000000);
620	msg[6] = cpu_to_le32(0x00000001);
621	msg[7] = cpu_to_le32(0x00000028);
622	msg[8] = cpu_to_le32(0x00000010);
623	msg[9] = cpu_to_le32(0x00000000);
624	msg[10] = cpu_to_le32(0x00000007);
625	msg[11] = cpu_to_le32(0x00000000);
626	msg[12] = cpu_to_le32(0x00000780);
627	msg[13] = cpu_to_le32(0x00000440);
628	for (i = 14; i < 1024; ++i)
629		msg[i] = cpu_to_le32(0x0);
630
631	return 0;
632}
633
634static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
635					  struct amdgpu_bo **bo)
636{
637	struct amdgpu_device *adev = ring->adev;
 
638	uint32_t *msg;
639	int r, i;
640
641	*bo = NULL;
642	r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
643				      AMDGPU_GEM_DOMAIN_VRAM,
644				      bo, NULL, (void **)&msg);
645	if (r)
646		return r;
647
648	msg[0] = cpu_to_le32(0x00000028);
649	msg[1] = cpu_to_le32(0x00000018);
650	msg[2] = cpu_to_le32(0x00000000);
651	msg[3] = cpu_to_le32(0x00000002);
652	msg[4] = cpu_to_le32(handle);
653	msg[5] = cpu_to_le32(0x00000000);
654	for (i = 6; i < 1024; ++i)
655		msg[i] = cpu_to_le32(0x0);
656
657	return 0;
658}
659
660int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
661{
662	struct dma_fence *fence = NULL;
663	struct amdgpu_bo *bo;
664	long r;
665
666	r = amdgpu_vcn_dec_get_create_msg(ring, 1, &bo);
667	if (r)
 
668		goto error;
 
669
670	r = amdgpu_vcn_dec_send_msg(ring, bo, NULL);
671	if (r)
672		goto error;
673	r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &bo);
674	if (r)
675		goto error;
676
677	r = amdgpu_vcn_dec_send_msg(ring, bo, &fence);
678	if (r)
679		goto error;
 
680
681	r = dma_fence_wait_timeout(fence, false, timeout);
682	if (r == 0)
 
683		r = -ETIMEDOUT;
684	else if (r > 0)
 
 
 
685		r = 0;
 
686
687	dma_fence_put(fence);
688error:
689	return r;
690}
691
692static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
693				   struct amdgpu_bo *bo,
694				   struct dma_fence **fence)
695{
696	struct amdgpu_vcn_decode_buffer *decode_buffer = NULL;
697	const unsigned int ib_size_dw = 64;
698	struct amdgpu_device *adev = ring->adev;
699	struct dma_fence *f = NULL;
700	struct amdgpu_job *job;
701	struct amdgpu_ib *ib;
702	uint64_t addr;
703	int i, r;
704
705	r = amdgpu_job_alloc_with_ib(adev, ib_size_dw * 4,
706				AMDGPU_IB_POOL_DIRECT, &job);
707	if (r)
708		goto err;
709
710	ib = &job->ibs[0];
711	addr = amdgpu_bo_gpu_offset(bo);
712	ib->length_dw = 0;
713
714	ib->ptr[ib->length_dw++] = sizeof(struct amdgpu_vcn_decode_buffer) + 8;
715	ib->ptr[ib->length_dw++] = cpu_to_le32(AMDGPU_VCN_IB_FLAG_DECODE_BUFFER);
716	decode_buffer = (struct amdgpu_vcn_decode_buffer *)&(ib->ptr[ib->length_dw]);
717	ib->length_dw += sizeof(struct amdgpu_vcn_decode_buffer) / 4;
718	memset(decode_buffer, 0, sizeof(struct amdgpu_vcn_decode_buffer));
719
720	decode_buffer->valid_buf_flag |= cpu_to_le32(AMDGPU_VCN_CMD_FLAG_MSG_BUFFER);
721	decode_buffer->msg_buffer_address_hi = cpu_to_le32(addr >> 32);
722	decode_buffer->msg_buffer_address_lo = cpu_to_le32(addr);
723
724	for (i = ib->length_dw; i < ib_size_dw; ++i)
725		ib->ptr[i] = 0x0;
726
727	r = amdgpu_job_submit_direct(job, ring, &f);
728	if (r)
729		goto err_free;
730
731	amdgpu_bo_fence(bo, f, false);
732	amdgpu_bo_unreserve(bo);
733	amdgpu_bo_unref(&bo);
734
735	if (fence)
736		*fence = dma_fence_get(f);
737	dma_fence_put(f);
738
739	return 0;
740
741err_free:
742	amdgpu_job_free(job);
743
744err:
745	amdgpu_bo_unreserve(bo);
746	amdgpu_bo_unref(&bo);
747	return r;
748}
749
750int amdgpu_vcn_dec_sw_ring_test_ib(struct amdgpu_ring *ring, long timeout)
751{
752	struct dma_fence *fence = NULL;
753	struct amdgpu_bo *bo;
754	long r;
755
756	r = amdgpu_vcn_dec_get_create_msg(ring, 1, &bo);
757	if (r)
758		goto error;
759
760	r = amdgpu_vcn_dec_sw_send_msg(ring, bo, NULL);
761	if (r)
762		goto error;
763	r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &bo);
764	if (r)
765		goto error;
766
767	r = amdgpu_vcn_dec_sw_send_msg(ring, bo, &fence);
768	if (r)
769		goto error;
770
771	r = dma_fence_wait_timeout(fence, false, timeout);
772	if (r == 0)
773		r = -ETIMEDOUT;
774	else if (r > 0)
775		r = 0;
776
777	dma_fence_put(fence);
778error:
779	return r;
780}
781
782int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
783{
784	struct amdgpu_device *adev = ring->adev;
785	uint32_t rptr;
786	unsigned i;
787	int r;
788
789	if (amdgpu_sriov_vf(adev))
790		return 0;
791
792	r = amdgpu_ring_alloc(ring, 16);
793	if (r)
 
 
794		return r;
795
796	rptr = amdgpu_ring_get_rptr(ring);
797
798	amdgpu_ring_write(ring, VCN_ENC_CMD_END);
799	amdgpu_ring_commit(ring);
800
801	for (i = 0; i < adev->usec_timeout; i++) {
802		if (amdgpu_ring_get_rptr(ring) != rptr)
803			break;
804		udelay(1);
805	}
806
807	if (i >= adev->usec_timeout)
 
 
 
 
 
808		r = -ETIMEDOUT;
 
809
810	return r;
811}
812
813static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
814					 struct amdgpu_bo *bo,
815					 struct dma_fence **fence)
816{
817	const unsigned ib_size_dw = 16;
818	struct amdgpu_job *job;
819	struct amdgpu_ib *ib;
820	struct dma_fence *f = NULL;
821	uint64_t addr;
822	int i, r;
823
824	r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
825					AMDGPU_IB_POOL_DIRECT, &job);
826	if (r)
827		return r;
828
829	ib = &job->ibs[0];
830	addr = amdgpu_bo_gpu_offset(bo);
831
832	ib->length_dw = 0;
833	ib->ptr[ib->length_dw++] = 0x00000018;
834	ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
835	ib->ptr[ib->length_dw++] = handle;
836	ib->ptr[ib->length_dw++] = upper_32_bits(addr);
837	ib->ptr[ib->length_dw++] = addr;
838	ib->ptr[ib->length_dw++] = 0x0000000b;
839
840	ib->ptr[ib->length_dw++] = 0x00000014;
841	ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
842	ib->ptr[ib->length_dw++] = 0x0000001c;
843	ib->ptr[ib->length_dw++] = 0x00000000;
844	ib->ptr[ib->length_dw++] = 0x00000000;
845
846	ib->ptr[ib->length_dw++] = 0x00000008;
847	ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */
848
849	for (i = ib->length_dw; i < ib_size_dw; ++i)
850		ib->ptr[i] = 0x0;
851
852	r = amdgpu_job_submit_direct(job, ring, &f);
 
853	if (r)
854		goto err;
855
 
856	if (fence)
857		*fence = dma_fence_get(f);
858	dma_fence_put(f);
859
860	return 0;
861
862err:
863	amdgpu_job_free(job);
864	return r;
865}
866
867static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
868					  struct amdgpu_bo *bo,
869					  struct dma_fence **fence)
870{
871	const unsigned ib_size_dw = 16;
872	struct amdgpu_job *job;
873	struct amdgpu_ib *ib;
874	struct dma_fence *f = NULL;
875	uint64_t addr;
876	int i, r;
877
878	r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
879					AMDGPU_IB_POOL_DIRECT, &job);
880	if (r)
881		return r;
882
883	ib = &job->ibs[0];
884	addr = amdgpu_bo_gpu_offset(bo);
885
886	ib->length_dw = 0;
887	ib->ptr[ib->length_dw++] = 0x00000018;
888	ib->ptr[ib->length_dw++] = 0x00000001;
889	ib->ptr[ib->length_dw++] = handle;
890	ib->ptr[ib->length_dw++] = upper_32_bits(addr);
891	ib->ptr[ib->length_dw++] = addr;
892	ib->ptr[ib->length_dw++] = 0x0000000b;
893
894	ib->ptr[ib->length_dw++] = 0x00000014;
895	ib->ptr[ib->length_dw++] = 0x00000002;
896	ib->ptr[ib->length_dw++] = 0x0000001c;
897	ib->ptr[ib->length_dw++] = 0x00000000;
898	ib->ptr[ib->length_dw++] = 0x00000000;
899
900	ib->ptr[ib->length_dw++] = 0x00000008;
901	ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */
902
903	for (i = ib->length_dw; i < ib_size_dw; ++i)
904		ib->ptr[i] = 0x0;
905
906	r = amdgpu_job_submit_direct(job, ring, &f);
 
907	if (r)
908		goto err;
909
 
910	if (fence)
911		*fence = dma_fence_get(f);
912	dma_fence_put(f);
913
914	return 0;
915
916err:
917	amdgpu_job_free(job);
918	return r;
919}
920
921int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
922{
923	struct dma_fence *fence = NULL;
924	struct amdgpu_bo *bo = NULL;
925	long r;
926
927	r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE,
928				      AMDGPU_GEM_DOMAIN_VRAM,
929				      &bo, NULL, NULL);
930	if (r)
931		return r;
932
933	r = amdgpu_vcn_enc_get_create_msg(ring, 1, bo, NULL);
934	if (r)
935		goto error;
 
936
937	r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, bo, &fence);
938	if (r)
 
939		goto error;
 
940
941	r = dma_fence_wait_timeout(fence, false, timeout);
942	if (r == 0)
 
943		r = -ETIMEDOUT;
944	else if (r > 0)
 
 
 
945		r = 0;
946
947error:
948	dma_fence_put(fence);
949	amdgpu_bo_unreserve(bo);
950	amdgpu_bo_free_kernel(&bo, NULL, NULL);
951
952	return r;
953}