Linux Audio

Check our new training course

Loading...
v6.9.4
  1/*
  2 * Copyright 2019 Advanced Micro Devices, Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 *
 22 */
 23
 24#include "amdgpu.h"
 25#include "amdgpu_jpeg.h"
 26#include "amdgpu_pm.h"
 27#include "soc15.h"
 28#include "soc15d.h"
 29#include "jpeg_v2_0.h"
 30
 31#include "vcn/vcn_3_0_0_offset.h"
 32#include "vcn/vcn_3_0_0_sh_mask.h"
 33#include "ivsrcid/vcn/irqsrcs_vcn_2_0.h"
 34
 35#define mmUVD_JPEG_PITCH_INTERNAL_OFFSET	0x401f
 36
 37static void jpeg_v3_0_set_dec_ring_funcs(struct amdgpu_device *adev);
 38static void jpeg_v3_0_set_irq_funcs(struct amdgpu_device *adev);
 39static int jpeg_v3_0_set_powergating_state(void *handle,
 40				enum amd_powergating_state state);
 41
 42/**
 43 * jpeg_v3_0_early_init - set function pointers
 44 *
 45 * @handle: amdgpu_device pointer
 46 *
 47 * Set ring and irq function pointers
 48 */
 49static int jpeg_v3_0_early_init(void *handle)
 50{
 51	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 52
 53	u32 harvest;
 
 54
 55	switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) {
 56	case IP_VERSION(3, 1, 1):
 57	case IP_VERSION(3, 1, 2):
 58		break;
 59	default:
 60		harvest = RREG32_SOC15(JPEG, 0, mmCC_UVD_HARVESTING);
 61		if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK)
 62			return -ENOENT;
 63		break;
 64	}
 65
 66	adev->jpeg.num_jpeg_inst = 1;
 67	adev->jpeg.num_jpeg_rings = 1;
 68
 69	jpeg_v3_0_set_dec_ring_funcs(adev);
 70	jpeg_v3_0_set_irq_funcs(adev);
 71
 72	return 0;
 73}
 74
 75/**
 76 * jpeg_v3_0_sw_init - sw init for JPEG block
 77 *
 78 * @handle: amdgpu_device pointer
 79 *
 80 * Load firmware and sw initialization
 81 */
 82static int jpeg_v3_0_sw_init(void *handle)
 83{
 84	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 85	struct amdgpu_ring *ring;
 86	int r;
 87
 88	/* JPEG TRAP */
 89	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
 90		VCN_2_0__SRCID__JPEG_DECODE, &adev->jpeg.inst->irq);
 91	if (r)
 92		return r;
 93
 94	r = amdgpu_jpeg_sw_init(adev);
 95	if (r)
 96		return r;
 97
 98	r = amdgpu_jpeg_resume(adev);
 99	if (r)
100		return r;
101
102	ring = adev->jpeg.inst->ring_dec;
103	ring->use_doorbell = true;
104	ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1;
105	ring->vm_hub = AMDGPU_MMHUB0(0);
106	sprintf(ring->name, "jpeg_dec");
107	r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0,
108			     AMDGPU_RING_PRIO_DEFAULT, NULL);
109	if (r)
110		return r;
111
112	adev->jpeg.internal.jpeg_pitch[0] = mmUVD_JPEG_PITCH_INTERNAL_OFFSET;
113	adev->jpeg.inst->external.jpeg_pitch[0] = SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_PITCH);
114
115	return 0;
116}
117
118/**
119 * jpeg_v3_0_sw_fini - sw fini for JPEG block
120 *
121 * @handle: amdgpu_device pointer
122 *
123 * JPEG suspend and free up sw allocation
124 */
125static int jpeg_v3_0_sw_fini(void *handle)
126{
127	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
128	int r;
129
130	r = amdgpu_jpeg_suspend(adev);
131	if (r)
132		return r;
133
134	r = amdgpu_jpeg_sw_fini(adev);
135
136	return r;
137}
138
139/**
140 * jpeg_v3_0_hw_init - start and test JPEG block
141 *
142 * @handle: amdgpu_device pointer
143 *
144 */
145static int jpeg_v3_0_hw_init(void *handle)
146{
147	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
148	struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec;
149	int r;
150
151	adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
152		(adev->doorbell_index.vcn.vcn_ring0_1 << 1), 0);
153
154	r = amdgpu_ring_test_helper(ring);
155	if (r)
156		return r;
157
158	DRM_INFO("JPEG decode initialized successfully.\n");
159
160	return 0;
161}
162
163/**
164 * jpeg_v3_0_hw_fini - stop the hardware block
165 *
166 * @handle: amdgpu_device pointer
167 *
168 * Stop the JPEG block, mark ring as not ready any more
169 */
170static int jpeg_v3_0_hw_fini(void *handle)
171{
172	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
173
174	cancel_delayed_work_sync(&adev->vcn.idle_work);
175
176	if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
177	      RREG32_SOC15(JPEG, 0, mmUVD_JRBC_STATUS))
178		jpeg_v3_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
179
180	return 0;
181}
182
183/**
184 * jpeg_v3_0_suspend - suspend JPEG block
185 *
186 * @handle: amdgpu_device pointer
187 *
188 * HW fini and suspend JPEG block
189 */
190static int jpeg_v3_0_suspend(void *handle)
191{
192	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
193	int r;
194
195	r = jpeg_v3_0_hw_fini(adev);
196	if (r)
197		return r;
198
199	r = amdgpu_jpeg_suspend(adev);
200
201	return r;
202}
203
204/**
205 * jpeg_v3_0_resume - resume JPEG block
206 *
207 * @handle: amdgpu_device pointer
208 *
209 * Resume firmware and hw init JPEG block
210 */
211static int jpeg_v3_0_resume(void *handle)
212{
213	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
214	int r;
215
216	r = amdgpu_jpeg_resume(adev);
217	if (r)
218		return r;
219
220	r = jpeg_v3_0_hw_init(adev);
221
222	return r;
223}
224
225static void jpeg_v3_0_disable_clock_gating(struct amdgpu_device *adev)
226{
227	uint32_t data = 0;
228
229	data = RREG32_SOC15(JPEG, 0, mmJPEG_CGC_CTRL);
230	if (adev->cg_flags & AMD_CG_SUPPORT_JPEG_MGCG)
231		data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
232	else
233		data &= ~JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
234
235	data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
236	data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
237	WREG32_SOC15(JPEG, 0, mmJPEG_CGC_CTRL, data);
238
239	data = RREG32_SOC15(JPEG, 0, mmJPEG_CGC_GATE);
240	data &= ~(JPEG_CGC_GATE__JPEG_DEC_MASK
241		| JPEG_CGC_GATE__JPEG2_DEC_MASK
242		| JPEG_CGC_GATE__JPEG_ENC_MASK
243		| JPEG_CGC_GATE__JMCIF_MASK
244		| JPEG_CGC_GATE__JRBBM_MASK);
245	WREG32_SOC15(JPEG, 0, mmJPEG_CGC_GATE, data);
246
247	data = RREG32_SOC15(JPEG, 0, mmJPEG_CGC_CTRL);
248	data &= ~(JPEG_CGC_CTRL__JPEG_DEC_MODE_MASK
249		| JPEG_CGC_CTRL__JPEG2_DEC_MODE_MASK
250		| JPEG_CGC_CTRL__JMCIF_MODE_MASK
251		| JPEG_CGC_CTRL__JRBBM_MODE_MASK);
252	WREG32_SOC15(JPEG, 0, mmJPEG_CGC_CTRL, data);
253}
254
255static void jpeg_v3_0_enable_clock_gating(struct amdgpu_device *adev)
256{
257	uint32_t data = 0;
258
259	data = RREG32_SOC15(JPEG, 0, mmJPEG_CGC_GATE);
260	data |= (JPEG_CGC_GATE__JPEG_DEC_MASK
261		|JPEG_CGC_GATE__JPEG2_DEC_MASK
262		|JPEG_CGC_GATE__JPEG_ENC_MASK
263		|JPEG_CGC_GATE__JMCIF_MASK
264		|JPEG_CGC_GATE__JRBBM_MASK);
265	WREG32_SOC15(JPEG, 0, mmJPEG_CGC_GATE, data);
266}
267
268static int jpeg_v3_0_disable_static_power_gating(struct amdgpu_device *adev)
269{
270	if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) {
271		uint32_t data = 0;
272		int r = 0;
273
274		data = 1 << UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT;
275		WREG32(SOC15_REG_OFFSET(JPEG, 0, mmUVD_PGFSM_CONFIG), data);
276
277		r = SOC15_WAIT_ON_RREG(JPEG, 0,
278			mmUVD_PGFSM_STATUS, UVD_PGFSM_STATUS_UVDJ_PWR_ON,
279			UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK);
280
281		if (r) {
282			DRM_ERROR("amdgpu: JPEG disable power gating failed\n");
283			return r;
284		}
285	}
286
287	/* disable anti hang mechanism */
288	WREG32_P(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_POWER_STATUS), 0,
289		~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
290
291	/* keep the JPEG in static PG mode */
292	WREG32_P(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_POWER_STATUS), 0,
293		~UVD_JPEG_POWER_STATUS__JPEG_PG_MODE_MASK);
294
295	return 0;
296}
297
298static int jpeg_v3_0_enable_static_power_gating(struct amdgpu_device *adev)
299{
300	/* enable anti hang mechanism */
301	WREG32_P(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_POWER_STATUS),
302		UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK,
303		~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
304
305	if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) {
306		uint32_t data = 0;
307		int r = 0;
308
309		data = 2 << UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT;
310		WREG32(SOC15_REG_OFFSET(JPEG, 0, mmUVD_PGFSM_CONFIG), data);
311
312		r = SOC15_WAIT_ON_RREG(JPEG, 0, mmUVD_PGFSM_STATUS,
313			(2 << UVD_PGFSM_STATUS__UVDJ_PWR_STATUS__SHIFT),
314			UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK);
315
316		if (r) {
317			DRM_ERROR("amdgpu: JPEG enable power gating failed\n");
318			return r;
319		}
320	}
321
322	return 0;
323}
324
325/**
326 * jpeg_v3_0_start - start JPEG block
327 *
328 * @adev: amdgpu_device pointer
329 *
330 * Setup and start the JPEG block
331 */
332static int jpeg_v3_0_start(struct amdgpu_device *adev)
333{
334	struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec;
335	int r;
336
337	if (adev->pm.dpm_enabled)
338		amdgpu_dpm_enable_jpeg(adev, true);
339
340	/* disable power gating */
341	r = jpeg_v3_0_disable_static_power_gating(adev);
342	if (r)
343		return r;
344
345	/* JPEG disable CGC */
346	jpeg_v3_0_disable_clock_gating(adev);
347
348	/* MJPEG global tiling registers */
349	WREG32_SOC15(JPEG, 0, mmJPEG_DEC_GFX10_ADDR_CONFIG,
350		adev->gfx.config.gb_addr_config);
351	WREG32_SOC15(JPEG, 0, mmJPEG_ENC_GFX10_ADDR_CONFIG,
352		adev->gfx.config.gb_addr_config);
353
354	/* enable JMI channel */
355	WREG32_P(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JMI_CNTL), 0,
356		~UVD_JMI_CNTL__SOFT_RESET_MASK);
357
358	/* enable System Interrupt for JRBC */
359	WREG32_P(SOC15_REG_OFFSET(JPEG, 0, mmJPEG_SYS_INT_EN),
360		JPEG_SYS_INT_EN__DJRBC_MASK,
361		~JPEG_SYS_INT_EN__DJRBC_MASK);
362
363	WREG32_SOC15(JPEG, 0, mmUVD_LMI_JRBC_RB_VMID, 0);
364	WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L));
365	WREG32_SOC15(JPEG, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW,
366		lower_32_bits(ring->gpu_addr));
367	WREG32_SOC15(JPEG, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH,
368		upper_32_bits(ring->gpu_addr));
369	WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_RPTR, 0);
370	WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_WPTR, 0);
371	WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_CNTL, 0x00000002L);
372	WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_SIZE, ring->ring_size / 4);
373	ring->wptr = RREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_WPTR);
374
375	return 0;
376}
377
378/**
379 * jpeg_v3_0_stop - stop JPEG block
380 *
381 * @adev: amdgpu_device pointer
382 *
383 * stop the JPEG block
384 */
385static int jpeg_v3_0_stop(struct amdgpu_device *adev)
386{
387	int r;
388
389	/* reset JMI */
390	WREG32_P(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JMI_CNTL),
391		UVD_JMI_CNTL__SOFT_RESET_MASK,
392		~UVD_JMI_CNTL__SOFT_RESET_MASK);
393
394	jpeg_v3_0_enable_clock_gating(adev);
395
396	/* enable power gating */
397	r = jpeg_v3_0_enable_static_power_gating(adev);
398	if (r)
399		return r;
400
401	if (adev->pm.dpm_enabled)
402		amdgpu_dpm_enable_jpeg(adev, false);
403
404	return 0;
405}
406
407/**
408 * jpeg_v3_0_dec_ring_get_rptr - get read pointer
409 *
410 * @ring: amdgpu_ring pointer
411 *
412 * Returns the current hardware read pointer
413 */
414static uint64_t jpeg_v3_0_dec_ring_get_rptr(struct amdgpu_ring *ring)
415{
416	struct amdgpu_device *adev = ring->adev;
417
418	return RREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_RPTR);
419}
420
421/**
422 * jpeg_v3_0_dec_ring_get_wptr - get write pointer
423 *
424 * @ring: amdgpu_ring pointer
425 *
426 * Returns the current hardware write pointer
427 */
428static uint64_t jpeg_v3_0_dec_ring_get_wptr(struct amdgpu_ring *ring)
429{
430	struct amdgpu_device *adev = ring->adev;
431
432	if (ring->use_doorbell)
433		return *ring->wptr_cpu_addr;
434	else
435		return RREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_WPTR);
436}
437
438/**
439 * jpeg_v3_0_dec_ring_set_wptr - set write pointer
440 *
441 * @ring: amdgpu_ring pointer
442 *
443 * Commits the write pointer to the hardware
444 */
445static void jpeg_v3_0_dec_ring_set_wptr(struct amdgpu_ring *ring)
446{
447	struct amdgpu_device *adev = ring->adev;
448
449	if (ring->use_doorbell) {
450		*ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
451		WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
452	} else {
453		WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr));
454	}
455}
456
457static bool jpeg_v3_0_is_idle(void *handle)
458{
459	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
460	int ret = 1;
461
462	ret &= (((RREG32_SOC15(JPEG, 0, mmUVD_JRBC_STATUS) &
463		UVD_JRBC_STATUS__RB_JOB_DONE_MASK) ==
464		UVD_JRBC_STATUS__RB_JOB_DONE_MASK));
465
466	return ret;
467}
468
469static int jpeg_v3_0_wait_for_idle(void *handle)
470{
471	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
472
473	return SOC15_WAIT_ON_RREG(JPEG, 0, mmUVD_JRBC_STATUS,
474		UVD_JRBC_STATUS__RB_JOB_DONE_MASK,
475		UVD_JRBC_STATUS__RB_JOB_DONE_MASK);
476}
477
478static int jpeg_v3_0_set_clockgating_state(void *handle,
479					  enum amd_clockgating_state state)
480{
481	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
482	bool enable = state == AMD_CG_STATE_GATE;
483
484	if (enable) {
485		if (!jpeg_v3_0_is_idle(handle))
486			return -EBUSY;
487		jpeg_v3_0_enable_clock_gating(adev);
488	} else {
489		jpeg_v3_0_disable_clock_gating(adev);
490	}
491
492	return 0;
493}
494
495static int jpeg_v3_0_set_powergating_state(void *handle,
496					  enum amd_powergating_state state)
497{
498	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
499	int ret;
500
501	if(state == adev->jpeg.cur_state)
502		return 0;
503
504	if (state == AMD_PG_STATE_GATE)
505		ret = jpeg_v3_0_stop(adev);
506	else
507		ret = jpeg_v3_0_start(adev);
508
509	if(!ret)
510		adev->jpeg.cur_state = state;
511
512	return ret;
513}
514
515static int jpeg_v3_0_set_interrupt_state(struct amdgpu_device *adev,
516					struct amdgpu_irq_src *source,
517					unsigned type,
518					enum amdgpu_interrupt_state state)
519{
520	return 0;
521}
522
523static int jpeg_v3_0_process_interrupt(struct amdgpu_device *adev,
524				      struct amdgpu_irq_src *source,
525				      struct amdgpu_iv_entry *entry)
526{
527	DRM_DEBUG("IH: JPEG TRAP\n");
528
529	switch (entry->src_id) {
530	case VCN_2_0__SRCID__JPEG_DECODE:
531		amdgpu_fence_process(adev->jpeg.inst->ring_dec);
532		break;
533	default:
534		DRM_ERROR("Unhandled interrupt: %d %d\n",
535			  entry->src_id, entry->src_data[0]);
536		break;
537	}
538
539	return 0;
540}
541
542static const struct amd_ip_funcs jpeg_v3_0_ip_funcs = {
543	.name = "jpeg_v3_0",
544	.early_init = jpeg_v3_0_early_init,
545	.late_init = NULL,
546	.sw_init = jpeg_v3_0_sw_init,
547	.sw_fini = jpeg_v3_0_sw_fini,
548	.hw_init = jpeg_v3_0_hw_init,
549	.hw_fini = jpeg_v3_0_hw_fini,
550	.suspend = jpeg_v3_0_suspend,
551	.resume = jpeg_v3_0_resume,
552	.is_idle = jpeg_v3_0_is_idle,
553	.wait_for_idle = jpeg_v3_0_wait_for_idle,
554	.check_soft_reset = NULL,
555	.pre_soft_reset = NULL,
556	.soft_reset = NULL,
557	.post_soft_reset = NULL,
558	.set_clockgating_state = jpeg_v3_0_set_clockgating_state,
559	.set_powergating_state = jpeg_v3_0_set_powergating_state,
560};
561
562static const struct amdgpu_ring_funcs jpeg_v3_0_dec_ring_vm_funcs = {
563	.type = AMDGPU_RING_TYPE_VCN_JPEG,
564	.align_mask = 0xf,
 
565	.get_rptr = jpeg_v3_0_dec_ring_get_rptr,
566	.get_wptr = jpeg_v3_0_dec_ring_get_wptr,
567	.set_wptr = jpeg_v3_0_dec_ring_set_wptr,
568	.emit_frame_size =
569		SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
570		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
571		8 + /* jpeg_v3_0_dec_ring_emit_vm_flush */
572		18 + 18 + /* jpeg_v3_0_dec_ring_emit_fence x2 vm fence */
573		8 + 16,
574	.emit_ib_size = 22, /* jpeg_v3_0_dec_ring_emit_ib */
575	.emit_ib = jpeg_v2_0_dec_ring_emit_ib,
576	.emit_fence = jpeg_v2_0_dec_ring_emit_fence,
577	.emit_vm_flush = jpeg_v2_0_dec_ring_emit_vm_flush,
578	.test_ring = amdgpu_jpeg_dec_ring_test_ring,
579	.test_ib = amdgpu_jpeg_dec_ring_test_ib,
580	.insert_nop = jpeg_v2_0_dec_ring_nop,
581	.insert_start = jpeg_v2_0_dec_ring_insert_start,
582	.insert_end = jpeg_v2_0_dec_ring_insert_end,
583	.pad_ib = amdgpu_ring_generic_pad_ib,
584	.begin_use = amdgpu_jpeg_ring_begin_use,
585	.end_use = amdgpu_jpeg_ring_end_use,
586	.emit_wreg = jpeg_v2_0_dec_ring_emit_wreg,
587	.emit_reg_wait = jpeg_v2_0_dec_ring_emit_reg_wait,
588	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
589};
590
591static void jpeg_v3_0_set_dec_ring_funcs(struct amdgpu_device *adev)
592{
593	adev->jpeg.inst->ring_dec->funcs = &jpeg_v3_0_dec_ring_vm_funcs;
594	DRM_INFO("JPEG decode is enabled in VM mode\n");
595}
596
597static const struct amdgpu_irq_src_funcs jpeg_v3_0_irq_funcs = {
598	.set = jpeg_v3_0_set_interrupt_state,
599	.process = jpeg_v3_0_process_interrupt,
600};
601
602static void jpeg_v3_0_set_irq_funcs(struct amdgpu_device *adev)
603{
604	adev->jpeg.inst->irq.num_types = 1;
605	adev->jpeg.inst->irq.funcs = &jpeg_v3_0_irq_funcs;
606}
607
608const struct amdgpu_ip_block_version jpeg_v3_0_ip_block =
609{
610	.type = AMD_IP_BLOCK_TYPE_JPEG,
611	.major = 3,
612	.minor = 0,
613	.rev = 0,
614	.funcs = &jpeg_v3_0_ip_funcs,
615};
v5.14.15
  1/*
  2 * Copyright 2019 Advanced Micro Devices, Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 *
 22 */
 23
 24#include "amdgpu.h"
 25#include "amdgpu_jpeg.h"
 26#include "amdgpu_pm.h"
 27#include "soc15.h"
 28#include "soc15d.h"
 29#include "jpeg_v2_0.h"
 30
 31#include "vcn/vcn_3_0_0_offset.h"
 32#include "vcn/vcn_3_0_0_sh_mask.h"
 33#include "ivsrcid/vcn/irqsrcs_vcn_2_0.h"
 34
 35#define mmUVD_JPEG_PITCH_INTERNAL_OFFSET	0x401f
 36
 37static void jpeg_v3_0_set_dec_ring_funcs(struct amdgpu_device *adev);
 38static void jpeg_v3_0_set_irq_funcs(struct amdgpu_device *adev);
 39static int jpeg_v3_0_set_powergating_state(void *handle,
 40				enum amd_powergating_state state);
 41
 42/**
 43 * jpeg_v3_0_early_init - set function pointers
 44 *
 45 * @handle: amdgpu_device pointer
 46 *
 47 * Set ring and irq function pointers
 48 */
 49static int jpeg_v3_0_early_init(void *handle)
 50{
 51	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 52
 53	if (adev->asic_type != CHIP_YELLOW_CARP) {
 54		u32 harvest = RREG32_SOC15(JPEG, 0, mmCC_UVD_HARVESTING);
 55
 
 
 
 
 
 
 56		if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK)
 57			return -ENOENT;
 
 58	}
 59
 60	adev->jpeg.num_jpeg_inst = 1;
 
 61
 62	jpeg_v3_0_set_dec_ring_funcs(adev);
 63	jpeg_v3_0_set_irq_funcs(adev);
 64
 65	return 0;
 66}
 67
 68/**
 69 * jpeg_v3_0_sw_init - sw init for JPEG block
 70 *
 71 * @handle: amdgpu_device pointer
 72 *
 73 * Load firmware and sw initialization
 74 */
 75static int jpeg_v3_0_sw_init(void *handle)
 76{
 77	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 78	struct amdgpu_ring *ring;
 79	int r;
 80
 81	/* JPEG TRAP */
 82	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
 83		VCN_2_0__SRCID__JPEG_DECODE, &adev->jpeg.inst->irq);
 84	if (r)
 85		return r;
 86
 87	r = amdgpu_jpeg_sw_init(adev);
 88	if (r)
 89		return r;
 90
 91	r = amdgpu_jpeg_resume(adev);
 92	if (r)
 93		return r;
 94
 95	ring = &adev->jpeg.inst->ring_dec;
 96	ring->use_doorbell = true;
 97	ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1;
 
 98	sprintf(ring->name, "jpeg_dec");
 99	r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0,
100			     AMDGPU_RING_PRIO_DEFAULT, NULL);
101	if (r)
102		return r;
103
104	adev->jpeg.internal.jpeg_pitch = mmUVD_JPEG_PITCH_INTERNAL_OFFSET;
105	adev->jpeg.inst->external.jpeg_pitch = SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_PITCH);
106
107	return 0;
108}
109
110/**
111 * jpeg_v3_0_sw_fini - sw fini for JPEG block
112 *
113 * @handle: amdgpu_device pointer
114 *
115 * JPEG suspend and free up sw allocation
116 */
117static int jpeg_v3_0_sw_fini(void *handle)
118{
119	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
120	int r;
121
122	r = amdgpu_jpeg_suspend(adev);
123	if (r)
124		return r;
125
126	r = amdgpu_jpeg_sw_fini(adev);
127
128	return r;
129}
130
131/**
132 * jpeg_v3_0_hw_init - start and test JPEG block
133 *
134 * @handle: amdgpu_device pointer
135 *
136 */
137static int jpeg_v3_0_hw_init(void *handle)
138{
139	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
140	struct amdgpu_ring *ring = &adev->jpeg.inst->ring_dec;
141	int r;
142
143	adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
144		(adev->doorbell_index.vcn.vcn_ring0_1 << 1), 0);
145
146	r = amdgpu_ring_test_helper(ring);
147	if (r)
148		return r;
149
150	DRM_INFO("JPEG decode initialized successfully.\n");
151
152	return 0;
153}
154
155/**
156 * jpeg_v3_0_hw_fini - stop the hardware block
157 *
158 * @handle: amdgpu_device pointer
159 *
160 * Stop the JPEG block, mark ring as not ready any more
161 */
162static int jpeg_v3_0_hw_fini(void *handle)
163{
164	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
165
166	cancel_delayed_work_sync(&adev->vcn.idle_work);
167
168	if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
169	      RREG32_SOC15(JPEG, 0, mmUVD_JRBC_STATUS))
170		jpeg_v3_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
171
172	return 0;
173}
174
175/**
176 * jpeg_v3_0_suspend - suspend JPEG block
177 *
178 * @handle: amdgpu_device pointer
179 *
180 * HW fini and suspend JPEG block
181 */
182static int jpeg_v3_0_suspend(void *handle)
183{
184	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
185	int r;
186
187	r = jpeg_v3_0_hw_fini(adev);
188	if (r)
189		return r;
190
191	r = amdgpu_jpeg_suspend(adev);
192
193	return r;
194}
195
196/**
197 * jpeg_v3_0_resume - resume JPEG block
198 *
199 * @handle: amdgpu_device pointer
200 *
201 * Resume firmware and hw init JPEG block
202 */
203static int jpeg_v3_0_resume(void *handle)
204{
205	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
206	int r;
207
208	r = amdgpu_jpeg_resume(adev);
209	if (r)
210		return r;
211
212	r = jpeg_v3_0_hw_init(adev);
213
214	return r;
215}
216
217static void jpeg_v3_0_disable_clock_gating(struct amdgpu_device *adev)
218{
219	uint32_t data = 0;
220
221	data = RREG32_SOC15(JPEG, 0, mmJPEG_CGC_CTRL);
222	if (adev->cg_flags & AMD_CG_SUPPORT_JPEG_MGCG)
223		data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
224	else
225		data &= ~JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
226
227	data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
228	data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
229	WREG32_SOC15(JPEG, 0, mmJPEG_CGC_CTRL, data);
230
231	data = RREG32_SOC15(JPEG, 0, mmJPEG_CGC_GATE);
232	data &= ~(JPEG_CGC_GATE__JPEG_DEC_MASK
233		| JPEG_CGC_GATE__JPEG2_DEC_MASK
234		| JPEG_CGC_GATE__JPEG_ENC_MASK
235		| JPEG_CGC_GATE__JMCIF_MASK
236		| JPEG_CGC_GATE__JRBBM_MASK);
237	WREG32_SOC15(JPEG, 0, mmJPEG_CGC_GATE, data);
238
239	data = RREG32_SOC15(JPEG, 0, mmJPEG_CGC_CTRL);
240	data &= ~(JPEG_CGC_CTRL__JPEG_DEC_MODE_MASK
241		| JPEG_CGC_CTRL__JPEG2_DEC_MODE_MASK
242		| JPEG_CGC_CTRL__JMCIF_MODE_MASK
243		| JPEG_CGC_CTRL__JRBBM_MODE_MASK);
244	WREG32_SOC15(JPEG, 0, mmJPEG_CGC_CTRL, data);
245}
246
247static void jpeg_v3_0_enable_clock_gating(struct amdgpu_device *adev)
248{
249	uint32_t data = 0;
250
251	data = RREG32_SOC15(JPEG, 0, mmJPEG_CGC_GATE);
252	data |= (JPEG_CGC_GATE__JPEG_DEC_MASK
253		|JPEG_CGC_GATE__JPEG2_DEC_MASK
254		|JPEG_CGC_GATE__JPEG_ENC_MASK
255		|JPEG_CGC_GATE__JMCIF_MASK
256		|JPEG_CGC_GATE__JRBBM_MASK);
257	WREG32_SOC15(JPEG, 0, mmJPEG_CGC_GATE, data);
258}
259
260static int jpeg_v3_0_disable_static_power_gating(struct amdgpu_device *adev)
261{
262	if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) {
263		uint32_t data = 0;
264		int r = 0;
265
266		data = 1 << UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT;
267		WREG32(SOC15_REG_OFFSET(JPEG, 0, mmUVD_PGFSM_CONFIG), data);
268
269		r = SOC15_WAIT_ON_RREG(JPEG, 0,
270			mmUVD_PGFSM_STATUS, UVD_PGFSM_STATUS_UVDJ_PWR_ON,
271			UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK);
272
273		if (r) {
274			DRM_ERROR("amdgpu: JPEG disable power gating failed\n");
275			return r;
276		}
277	}
278
279	/* disable anti hang mechanism */
280	WREG32_P(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_POWER_STATUS), 0,
281		~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
282
283	/* keep the JPEG in static PG mode */
284	WREG32_P(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_POWER_STATUS), 0,
285		~UVD_JPEG_POWER_STATUS__JPEG_PG_MODE_MASK);
286
287	return 0;
288}
289
290static int jpeg_v3_0_enable_static_power_gating(struct amdgpu_device *adev)
291{
292	/* enable anti hang mechanism */
293	WREG32_P(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_POWER_STATUS),
294		UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK,
295		~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
296
297	if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) {
298		uint32_t data = 0;
299		int r = 0;
300
301		data = 2 << UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT;
302		WREG32(SOC15_REG_OFFSET(JPEG, 0, mmUVD_PGFSM_CONFIG), data);
303
304		r = SOC15_WAIT_ON_RREG(JPEG, 0, mmUVD_PGFSM_STATUS,
305			(2 << UVD_PGFSM_STATUS__UVDJ_PWR_STATUS__SHIFT),
306			UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK);
307
308		if (r) {
309			DRM_ERROR("amdgpu: JPEG enable power gating failed\n");
310			return r;
311		}
312	}
313
314	return 0;
315}
316
317/**
318 * jpeg_v3_0_start - start JPEG block
319 *
320 * @adev: amdgpu_device pointer
321 *
322 * Setup and start the JPEG block
323 */
324static int jpeg_v3_0_start(struct amdgpu_device *adev)
325{
326	struct amdgpu_ring *ring = &adev->jpeg.inst->ring_dec;
327	int r;
328
329	if (adev->pm.dpm_enabled)
330		amdgpu_dpm_enable_jpeg(adev, true);
331
332	/* disable power gating */
333	r = jpeg_v3_0_disable_static_power_gating(adev);
334	if (r)
335		return r;
336
337	/* JPEG disable CGC */
338	jpeg_v3_0_disable_clock_gating(adev);
339
340	/* MJPEG global tiling registers */
341	WREG32_SOC15(JPEG, 0, mmJPEG_DEC_GFX10_ADDR_CONFIG,
342		adev->gfx.config.gb_addr_config);
343	WREG32_SOC15(JPEG, 0, mmJPEG_ENC_GFX10_ADDR_CONFIG,
344		adev->gfx.config.gb_addr_config);
345
346	/* enable JMI channel */
347	WREG32_P(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JMI_CNTL), 0,
348		~UVD_JMI_CNTL__SOFT_RESET_MASK);
349
350	/* enable System Interrupt for JRBC */
351	WREG32_P(SOC15_REG_OFFSET(JPEG, 0, mmJPEG_SYS_INT_EN),
352		JPEG_SYS_INT_EN__DJRBC_MASK,
353		~JPEG_SYS_INT_EN__DJRBC_MASK);
354
355	WREG32_SOC15(JPEG, 0, mmUVD_LMI_JRBC_RB_VMID, 0);
356	WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L));
357	WREG32_SOC15(JPEG, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW,
358		lower_32_bits(ring->gpu_addr));
359	WREG32_SOC15(JPEG, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH,
360		upper_32_bits(ring->gpu_addr));
361	WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_RPTR, 0);
362	WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_WPTR, 0);
363	WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_CNTL, 0x00000002L);
364	WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_SIZE, ring->ring_size / 4);
365	ring->wptr = RREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_WPTR);
366
367	return 0;
368}
369
370/**
371 * jpeg_v3_0_stop - stop JPEG block
372 *
373 * @adev: amdgpu_device pointer
374 *
375 * stop the JPEG block
376 */
377static int jpeg_v3_0_stop(struct amdgpu_device *adev)
378{
379	int r;
380
381	/* reset JMI */
382	WREG32_P(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JMI_CNTL),
383		UVD_JMI_CNTL__SOFT_RESET_MASK,
384		~UVD_JMI_CNTL__SOFT_RESET_MASK);
385
386	jpeg_v3_0_enable_clock_gating(adev);
387
388	/* enable power gating */
389	r = jpeg_v3_0_enable_static_power_gating(adev);
390	if (r)
391		return r;
392
393	if (adev->pm.dpm_enabled)
394		amdgpu_dpm_enable_jpeg(adev, false);
395
396	return 0;
397}
398
399/**
400 * jpeg_v3_0_dec_ring_get_rptr - get read pointer
401 *
402 * @ring: amdgpu_ring pointer
403 *
404 * Returns the current hardware read pointer
405 */
406static uint64_t jpeg_v3_0_dec_ring_get_rptr(struct amdgpu_ring *ring)
407{
408	struct amdgpu_device *adev = ring->adev;
409
410	return RREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_RPTR);
411}
412
413/**
414 * jpeg_v3_0_dec_ring_get_wptr - get write pointer
415 *
416 * @ring: amdgpu_ring pointer
417 *
418 * Returns the current hardware write pointer
419 */
420static uint64_t jpeg_v3_0_dec_ring_get_wptr(struct amdgpu_ring *ring)
421{
422	struct amdgpu_device *adev = ring->adev;
423
424	if (ring->use_doorbell)
425		return adev->wb.wb[ring->wptr_offs];
426	else
427		return RREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_WPTR);
428}
429
430/**
431 * jpeg_v3_0_dec_ring_set_wptr - set write pointer
432 *
433 * @ring: amdgpu_ring pointer
434 *
435 * Commits the write pointer to the hardware
436 */
437static void jpeg_v3_0_dec_ring_set_wptr(struct amdgpu_ring *ring)
438{
439	struct amdgpu_device *adev = ring->adev;
440
441	if (ring->use_doorbell) {
442		adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
443		WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
444	} else {
445		WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr));
446	}
447}
448
449static bool jpeg_v3_0_is_idle(void *handle)
450{
451	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
452	int ret = 1;
453
454	ret &= (((RREG32_SOC15(JPEG, 0, mmUVD_JRBC_STATUS) &
455		UVD_JRBC_STATUS__RB_JOB_DONE_MASK) ==
456		UVD_JRBC_STATUS__RB_JOB_DONE_MASK));
457
458	return ret;
459}
460
461static int jpeg_v3_0_wait_for_idle(void *handle)
462{
463	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
464
465	return SOC15_WAIT_ON_RREG(JPEG, 0, mmUVD_JRBC_STATUS,
466		UVD_JRBC_STATUS__RB_JOB_DONE_MASK,
467		UVD_JRBC_STATUS__RB_JOB_DONE_MASK);
468}
469
470static int jpeg_v3_0_set_clockgating_state(void *handle,
471					  enum amd_clockgating_state state)
472{
473	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
474	bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
475
476	if (enable) {
477		if (!jpeg_v3_0_is_idle(handle))
478			return -EBUSY;
479		jpeg_v3_0_enable_clock_gating(adev);
480	} else {
481		jpeg_v3_0_disable_clock_gating(adev);
482	}
483
484	return 0;
485}
486
487static int jpeg_v3_0_set_powergating_state(void *handle,
488					  enum amd_powergating_state state)
489{
490	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
491	int ret;
492
493	if(state == adev->jpeg.cur_state)
494		return 0;
495
496	if (state == AMD_PG_STATE_GATE)
497		ret = jpeg_v3_0_stop(adev);
498	else
499		ret = jpeg_v3_0_start(adev);
500
501	if(!ret)
502		adev->jpeg.cur_state = state;
503
504	return ret;
505}
506
507static int jpeg_v3_0_set_interrupt_state(struct amdgpu_device *adev,
508					struct amdgpu_irq_src *source,
509					unsigned type,
510					enum amdgpu_interrupt_state state)
511{
512	return 0;
513}
514
515static int jpeg_v3_0_process_interrupt(struct amdgpu_device *adev,
516				      struct amdgpu_irq_src *source,
517				      struct amdgpu_iv_entry *entry)
518{
519	DRM_DEBUG("IH: JPEG TRAP\n");
520
521	switch (entry->src_id) {
522	case VCN_2_0__SRCID__JPEG_DECODE:
523		amdgpu_fence_process(&adev->jpeg.inst->ring_dec);
524		break;
525	default:
526		DRM_ERROR("Unhandled interrupt: %d %d\n",
527			  entry->src_id, entry->src_data[0]);
528		break;
529	}
530
531	return 0;
532}
533
534static const struct amd_ip_funcs jpeg_v3_0_ip_funcs = {
535	.name = "jpeg_v3_0",
536	.early_init = jpeg_v3_0_early_init,
537	.late_init = NULL,
538	.sw_init = jpeg_v3_0_sw_init,
539	.sw_fini = jpeg_v3_0_sw_fini,
540	.hw_init = jpeg_v3_0_hw_init,
541	.hw_fini = jpeg_v3_0_hw_fini,
542	.suspend = jpeg_v3_0_suspend,
543	.resume = jpeg_v3_0_resume,
544	.is_idle = jpeg_v3_0_is_idle,
545	.wait_for_idle = jpeg_v3_0_wait_for_idle,
546	.check_soft_reset = NULL,
547	.pre_soft_reset = NULL,
548	.soft_reset = NULL,
549	.post_soft_reset = NULL,
550	.set_clockgating_state = jpeg_v3_0_set_clockgating_state,
551	.set_powergating_state = jpeg_v3_0_set_powergating_state,
552};
553
554static const struct amdgpu_ring_funcs jpeg_v3_0_dec_ring_vm_funcs = {
555	.type = AMDGPU_RING_TYPE_VCN_JPEG,
556	.align_mask = 0xf,
557	.vmhub = AMDGPU_MMHUB_0,
558	.get_rptr = jpeg_v3_0_dec_ring_get_rptr,
559	.get_wptr = jpeg_v3_0_dec_ring_get_wptr,
560	.set_wptr = jpeg_v3_0_dec_ring_set_wptr,
561	.emit_frame_size =
562		SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
563		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
564		8 + /* jpeg_v3_0_dec_ring_emit_vm_flush */
565		18 + 18 + /* jpeg_v3_0_dec_ring_emit_fence x2 vm fence */
566		8 + 16,
567	.emit_ib_size = 22, /* jpeg_v3_0_dec_ring_emit_ib */
568	.emit_ib = jpeg_v2_0_dec_ring_emit_ib,
569	.emit_fence = jpeg_v2_0_dec_ring_emit_fence,
570	.emit_vm_flush = jpeg_v2_0_dec_ring_emit_vm_flush,
571	.test_ring = amdgpu_jpeg_dec_ring_test_ring,
572	.test_ib = amdgpu_jpeg_dec_ring_test_ib,
573	.insert_nop = jpeg_v2_0_dec_ring_nop,
574	.insert_start = jpeg_v2_0_dec_ring_insert_start,
575	.insert_end = jpeg_v2_0_dec_ring_insert_end,
576	.pad_ib = amdgpu_ring_generic_pad_ib,
577	.begin_use = amdgpu_jpeg_ring_begin_use,
578	.end_use = amdgpu_jpeg_ring_end_use,
579	.emit_wreg = jpeg_v2_0_dec_ring_emit_wreg,
580	.emit_reg_wait = jpeg_v2_0_dec_ring_emit_reg_wait,
581	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
582};
583
584static void jpeg_v3_0_set_dec_ring_funcs(struct amdgpu_device *adev)
585{
586	adev->jpeg.inst->ring_dec.funcs = &jpeg_v3_0_dec_ring_vm_funcs;
587	DRM_INFO("JPEG decode is enabled in VM mode\n");
588}
589
590static const struct amdgpu_irq_src_funcs jpeg_v3_0_irq_funcs = {
591	.set = jpeg_v3_0_set_interrupt_state,
592	.process = jpeg_v3_0_process_interrupt,
593};
594
595static void jpeg_v3_0_set_irq_funcs(struct amdgpu_device *adev)
596{
597	adev->jpeg.inst->irq.num_types = 1;
598	adev->jpeg.inst->irq.funcs = &jpeg_v3_0_irq_funcs;
599}
600
601const struct amdgpu_ip_block_version jpeg_v3_0_ip_block =
602{
603	.type = AMD_IP_BLOCK_TYPE_JPEG,
604	.major = 3,
605	.minor = 0,
606	.rev = 0,
607	.funcs = &jpeg_v3_0_ip_funcs,
608};