Linux Audio

Check our new training course

Open-source upstreaming

Need help get the support for your hardware in upstream Linux?
Loading...
v5.9
  1/*
  2 * Copyright 2019 Advanced Micro Devices, Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 *
 22 */
 23
 24#include "amdgpu.h"
 25#include "amdgpu_jpeg.h"
 26#include "amdgpu_pm.h"
 27#include "soc15.h"
 28#include "soc15d.h"
 29#include "jpeg_v2_0.h"
 30
 31#include "vcn/vcn_3_0_0_offset.h"
 32#include "vcn/vcn_3_0_0_sh_mask.h"
 33#include "ivsrcid/vcn/irqsrcs_vcn_2_0.h"
 34
 35#define mmUVD_JPEG_PITCH_INTERNAL_OFFSET	0x401f
 36
 37static void jpeg_v3_0_set_dec_ring_funcs(struct amdgpu_device *adev);
 38static void jpeg_v3_0_set_irq_funcs(struct amdgpu_device *adev);
 39static int jpeg_v3_0_set_powergating_state(void *handle,
 40				enum amd_powergating_state state);
 41
 42/**
 43 * jpeg_v3_0_early_init - set function pointers
 44 *
 45 * @handle: amdgpu_device pointer
 46 *
 47 * Set ring and irq function pointers
 48 */
 49static int jpeg_v3_0_early_init(void *handle)
 50{
 51	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 52	u32 harvest = RREG32_SOC15(JPEG, 0, mmCC_UVD_HARVESTING);
 53
 54	if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK)
 55		return -ENOENT;
 
 
 
 
 
 
 
 
 
 56
 57	adev->jpeg.num_jpeg_inst = 1;
 58
 59	jpeg_v3_0_set_dec_ring_funcs(adev);
 60	jpeg_v3_0_set_irq_funcs(adev);
 61
 62	return 0;
 63}
 64
 65/**
 66 * jpeg_v3_0_sw_init - sw init for JPEG block
 67 *
 68 * @handle: amdgpu_device pointer
 69 *
 70 * Load firmware and sw initialization
 71 */
 72static int jpeg_v3_0_sw_init(void *handle)
 73{
 74	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 75	struct amdgpu_ring *ring;
 76	int r;
 77
 78	/* JPEG TRAP */
 79	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
 80		VCN_2_0__SRCID__JPEG_DECODE, &adev->jpeg.inst->irq);
 81	if (r)
 82		return r;
 83
 84	r = amdgpu_jpeg_sw_init(adev);
 85	if (r)
 86		return r;
 87
 88	r = amdgpu_jpeg_resume(adev);
 89	if (r)
 90		return r;
 91
 92	ring = &adev->jpeg.inst->ring_dec;
 93	ring->use_doorbell = true;
 94	ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1;
 95	sprintf(ring->name, "jpeg_dec");
 96	r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0,
 97			     AMDGPU_RING_PRIO_DEFAULT);
 98	if (r)
 99		return r;
100
101	adev->jpeg.internal.jpeg_pitch = mmUVD_JPEG_PITCH_INTERNAL_OFFSET;
102	adev->jpeg.inst->external.jpeg_pitch = SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_PITCH);
103
104	return 0;
105}
106
107/**
108 * jpeg_v3_0_sw_fini - sw fini for JPEG block
109 *
110 * @handle: amdgpu_device pointer
111 *
112 * JPEG suspend and free up sw allocation
113 */
114static int jpeg_v3_0_sw_fini(void *handle)
115{
116	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
117	int r;
118
119	r = amdgpu_jpeg_suspend(adev);
120	if (r)
121		return r;
122
123	r = amdgpu_jpeg_sw_fini(adev);
124
125	return r;
126}
127
128/**
129 * jpeg_v3_0_hw_init - start and test JPEG block
130 *
131 * @handle: amdgpu_device pointer
132 *
133 */
134static int jpeg_v3_0_hw_init(void *handle)
135{
136	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
137	struct amdgpu_ring *ring = &adev->jpeg.inst->ring_dec;
138	int r;
139
140	adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
141		(adev->doorbell_index.vcn.vcn_ring0_1 << 1), 0);
142
143	r = amdgpu_ring_test_helper(ring);
144	if (r)
145		return r;
146
147	DRM_INFO("JPEG decode initialized successfully.\n");
148
149	return 0;
150}
151
152/**
153 * jpeg_v3_0_hw_fini - stop the hardware block
154 *
155 * @handle: amdgpu_device pointer
156 *
157 * Stop the JPEG block, mark ring as not ready any more
158 */
159static int jpeg_v3_0_hw_fini(void *handle)
160{
161	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
162	struct amdgpu_ring *ring;
163
164	ring = &adev->jpeg.inst->ring_dec;
 
165	if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
166	      RREG32_SOC15(JPEG, 0, mmUVD_JRBC_STATUS))
167		jpeg_v3_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
168
169	ring->sched.ready = false;
170
171	return 0;
172}
173
174/**
175 * jpeg_v3_0_suspend - suspend JPEG block
176 *
177 * @handle: amdgpu_device pointer
178 *
179 * HW fini and suspend JPEG block
180 */
181static int jpeg_v3_0_suspend(void *handle)
182{
183	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
184	int r;
185
186	r = jpeg_v3_0_hw_fini(adev);
187	if (r)
188		return r;
189
190	r = amdgpu_jpeg_suspend(adev);
191
192	return r;
193}
194
195/**
196 * jpeg_v3_0_resume - resume JPEG block
197 *
198 * @handle: amdgpu_device pointer
199 *
200 * Resume firmware and hw init JPEG block
201 */
202static int jpeg_v3_0_resume(void *handle)
203{
204	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
205	int r;
206
207	r = amdgpu_jpeg_resume(adev);
208	if (r)
209		return r;
210
211	r = jpeg_v3_0_hw_init(adev);
212
213	return r;
214}
215
216static void jpeg_v3_0_disable_clock_gating(struct amdgpu_device* adev)
217{
218	uint32_t data = 0;
219
220	data = RREG32_SOC15(JPEG, 0, mmJPEG_CGC_CTRL);
221	if (adev->cg_flags & AMD_CG_SUPPORT_JPEG_MGCG)
222		data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
223	else
224		data &= ~JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
225
226	data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
227	data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
228	WREG32_SOC15(JPEG, 0, mmJPEG_CGC_CTRL, data);
229
230	data = RREG32_SOC15(JPEG, 0, mmJPEG_CGC_GATE);
231	data &= ~(JPEG_CGC_GATE__JPEG_DEC_MASK
232		| JPEG_CGC_GATE__JPEG2_DEC_MASK
233		| JPEG_CGC_GATE__JPEG_ENC_MASK
234		| JPEG_CGC_GATE__JMCIF_MASK
235		| JPEG_CGC_GATE__JRBBM_MASK);
236	WREG32_SOC15(JPEG, 0, mmJPEG_CGC_GATE, data);
237
238	data = RREG32_SOC15(JPEG, 0, mmJPEG_CGC_CTRL);
239	data &= ~(JPEG_CGC_CTRL__JPEG_DEC_MODE_MASK
240		| JPEG_CGC_CTRL__JPEG2_DEC_MODE_MASK
241		| JPEG_CGC_CTRL__JMCIF_MODE_MASK
242		| JPEG_CGC_CTRL__JRBBM_MODE_MASK);
243	WREG32_SOC15(JPEG, 0, mmJPEG_CGC_CTRL, data);
244}
245
246static void jpeg_v3_0_enable_clock_gating(struct amdgpu_device* adev)
247{
248	uint32_t data = 0;
249
250	data = RREG32_SOC15(JPEG, 0, mmJPEG_CGC_GATE);
251	data |= (JPEG_CGC_GATE__JPEG_DEC_MASK
252		|JPEG_CGC_GATE__JPEG2_DEC_MASK
253		|JPEG_CGC_GATE__JPEG_ENC_MASK
254		|JPEG_CGC_GATE__JMCIF_MASK
255		|JPEG_CGC_GATE__JRBBM_MASK);
256	WREG32_SOC15(JPEG, 0, mmJPEG_CGC_GATE, data);
257}
258
259static int jpeg_v3_0_disable_static_power_gating(struct amdgpu_device *adev)
260{
261	if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) {
262		uint32_t data = 0;
263		int r = 0;
264
265		data = 1 << UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT;
266		WREG32(SOC15_REG_OFFSET(JPEG, 0, mmUVD_PGFSM_CONFIG), data);
267
268		r = SOC15_WAIT_ON_RREG(JPEG, 0,
269			mmUVD_PGFSM_STATUS, UVD_PGFSM_STATUS_UVDJ_PWR_ON,
270			UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK);
271
272		if (r) {
273			DRM_ERROR("amdgpu: JPEG disable power gating failed\n");
274			return r;
275		}
276	}
277
278	/* disable anti hang mechanism */
279	WREG32_P(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_POWER_STATUS), 0,
280		~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
281
282	/* keep the JPEG in static PG mode */
283	WREG32_P(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_POWER_STATUS), 0,
284		~UVD_JPEG_POWER_STATUS__JPEG_PG_MODE_MASK);
285
286	return 0;
287}
288
289static int jpeg_v3_0_enable_static_power_gating(struct amdgpu_device* adev)
290{
291	/* enable anti hang mechanism */
292	WREG32_P(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_POWER_STATUS),
293		UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK,
294		~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
295
296	if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) {
297		uint32_t data = 0;
298		int r = 0;
299
300		data = 2 << UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT;
301		WREG32(SOC15_REG_OFFSET(JPEG, 0, mmUVD_PGFSM_CONFIG), data);
302
303		r = SOC15_WAIT_ON_RREG(JPEG, 0, mmUVD_PGFSM_STATUS,
304			(2 << UVD_PGFSM_STATUS__UVDJ_PWR_STATUS__SHIFT),
305			UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK);
306
307		if (r) {
308			DRM_ERROR("amdgpu: JPEG enable power gating failed\n");
309			return r;
310		}
311	}
312
313	return 0;
314}
315
316/**
317 * jpeg_v3_0_start - start JPEG block
318 *
319 * @adev: amdgpu_device pointer
320 *
321 * Setup and start the JPEG block
322 */
323static int jpeg_v3_0_start(struct amdgpu_device *adev)
324{
325	struct amdgpu_ring *ring = &adev->jpeg.inst->ring_dec;
326	int r;
327
328	if (adev->pm.dpm_enabled)
329		amdgpu_dpm_enable_jpeg(adev, true);
330
331	/* disable power gating */
332	r = jpeg_v3_0_disable_static_power_gating(adev);
333	if (r)
334		return r;
335
336	/* JPEG disable CGC */
337	jpeg_v3_0_disable_clock_gating(adev);
338
339	/* MJPEG global tiling registers */
340	WREG32_SOC15(JPEG, 0, mmJPEG_DEC_GFX10_ADDR_CONFIG,
341		adev->gfx.config.gb_addr_config);
342	WREG32_SOC15(JPEG, 0, mmJPEG_ENC_GFX10_ADDR_CONFIG,
343		adev->gfx.config.gb_addr_config);
344
345	/* enable JMI channel */
346	WREG32_P(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JMI_CNTL), 0,
347		~UVD_JMI_CNTL__SOFT_RESET_MASK);
348
349	/* enable System Interrupt for JRBC */
350	WREG32_P(SOC15_REG_OFFSET(JPEG, 0, mmJPEG_SYS_INT_EN),
351		JPEG_SYS_INT_EN__DJRBC_MASK,
352		~JPEG_SYS_INT_EN__DJRBC_MASK);
353
354	WREG32_SOC15(JPEG, 0, mmUVD_LMI_JRBC_RB_VMID, 0);
355	WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L));
356	WREG32_SOC15(JPEG, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW,
357		lower_32_bits(ring->gpu_addr));
358	WREG32_SOC15(JPEG, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH,
359		upper_32_bits(ring->gpu_addr));
360	WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_RPTR, 0);
361	WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_WPTR, 0);
362	WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_CNTL, 0x00000002L);
363	WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_SIZE, ring->ring_size / 4);
364	ring->wptr = RREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_WPTR);
365
366	return 0;
367}
368
369/**
370 * jpeg_v3_0_stop - stop JPEG block
371 *
372 * @adev: amdgpu_device pointer
373 *
374 * stop the JPEG block
375 */
376static int jpeg_v3_0_stop(struct amdgpu_device *adev)
377{
378	int r;
379
380	/* reset JMI */
381	WREG32_P(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JMI_CNTL),
382		UVD_JMI_CNTL__SOFT_RESET_MASK,
383		~UVD_JMI_CNTL__SOFT_RESET_MASK);
384
385	jpeg_v3_0_enable_clock_gating(adev);
386
387	/* enable power gating */
388	r = jpeg_v3_0_enable_static_power_gating(adev);
389	if (r)
390		return r;
391
392	if (adev->pm.dpm_enabled)
393		amdgpu_dpm_enable_jpeg(adev, false);
394
395	return 0;
396}
397
398/**
399 * jpeg_v3_0_dec_ring_get_rptr - get read pointer
400 *
401 * @ring: amdgpu_ring pointer
402 *
403 * Returns the current hardware read pointer
404 */
405static uint64_t jpeg_v3_0_dec_ring_get_rptr(struct amdgpu_ring *ring)
406{
407	struct amdgpu_device *adev = ring->adev;
408
409	return RREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_RPTR);
410}
411
412/**
413 * jpeg_v3_0_dec_ring_get_wptr - get write pointer
414 *
415 * @ring: amdgpu_ring pointer
416 *
417 * Returns the current hardware write pointer
418 */
419static uint64_t jpeg_v3_0_dec_ring_get_wptr(struct amdgpu_ring *ring)
420{
421	struct amdgpu_device *adev = ring->adev;
422
423	if (ring->use_doorbell)
424		return adev->wb.wb[ring->wptr_offs];
425	else
426		return RREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_WPTR);
427}
428
429/**
430 * jpeg_v3_0_dec_ring_set_wptr - set write pointer
431 *
432 * @ring: amdgpu_ring pointer
433 *
434 * Commits the write pointer to the hardware
435 */
436static void jpeg_v3_0_dec_ring_set_wptr(struct amdgpu_ring *ring)
437{
438	struct amdgpu_device *adev = ring->adev;
439
440	if (ring->use_doorbell) {
441		adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
442		WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
443	} else {
444		WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr));
445	}
446}
447
448static bool jpeg_v3_0_is_idle(void *handle)
449{
450	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
451	int ret = 1;
452
453	ret &= (((RREG32_SOC15(JPEG, 0, mmUVD_JRBC_STATUS) &
454		UVD_JRBC_STATUS__RB_JOB_DONE_MASK) ==
455		UVD_JRBC_STATUS__RB_JOB_DONE_MASK));
456
457	return ret;
458}
459
460static int jpeg_v3_0_wait_for_idle(void *handle)
461{
462	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
463	int ret;
464
465	ret = SOC15_WAIT_ON_RREG(JPEG, 0, mmUVD_JRBC_STATUS,
466		UVD_JRBC_STATUS__RB_JOB_DONE_MASK,
467		UVD_JRBC_STATUS__RB_JOB_DONE_MASK);
468	if (ret)
469		return ret;
470
471	return ret;
472}
473
474static int jpeg_v3_0_set_clockgating_state(void *handle,
475					  enum amd_clockgating_state state)
476{
477	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
478	bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
479
480	if (enable) {
481		if (!jpeg_v3_0_is_idle(handle))
482			return -EBUSY;
483		jpeg_v3_0_enable_clock_gating(adev);
484	} else {
485		jpeg_v3_0_disable_clock_gating(adev);
486	}
487
488	return 0;
489}
490
491static int jpeg_v3_0_set_powergating_state(void *handle,
492					  enum amd_powergating_state state)
493{
494	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
495	int ret;
496
497	if(state == adev->jpeg.cur_state)
498		return 0;
499
500	if (state == AMD_PG_STATE_GATE)
501		ret = jpeg_v3_0_stop(adev);
502	else
503		ret = jpeg_v3_0_start(adev);
504
505	if(!ret)
506		adev->jpeg.cur_state = state;
507
508	return ret;
509}
510
511static int jpeg_v3_0_set_interrupt_state(struct amdgpu_device *adev,
512					struct amdgpu_irq_src *source,
513					unsigned type,
514					enum amdgpu_interrupt_state state)
515{
516	return 0;
517}
518
519static int jpeg_v3_0_process_interrupt(struct amdgpu_device *adev,
520				      struct amdgpu_irq_src *source,
521				      struct amdgpu_iv_entry *entry)
522{
523	DRM_DEBUG("IH: JPEG TRAP\n");
524
525	switch (entry->src_id) {
526	case VCN_2_0__SRCID__JPEG_DECODE:
527		amdgpu_fence_process(&adev->jpeg.inst->ring_dec);
528		break;
529	default:
530		DRM_ERROR("Unhandled interrupt: %d %d\n",
531			  entry->src_id, entry->src_data[0]);
532		break;
533	}
534
535	return 0;
536}
537
538static const struct amd_ip_funcs jpeg_v3_0_ip_funcs = {
539	.name = "jpeg_v3_0",
540	.early_init = jpeg_v3_0_early_init,
541	.late_init = NULL,
542	.sw_init = jpeg_v3_0_sw_init,
543	.sw_fini = jpeg_v3_0_sw_fini,
544	.hw_init = jpeg_v3_0_hw_init,
545	.hw_fini = jpeg_v3_0_hw_fini,
546	.suspend = jpeg_v3_0_suspend,
547	.resume = jpeg_v3_0_resume,
548	.is_idle = jpeg_v3_0_is_idle,
549	.wait_for_idle = jpeg_v3_0_wait_for_idle,
550	.check_soft_reset = NULL,
551	.pre_soft_reset = NULL,
552	.soft_reset = NULL,
553	.post_soft_reset = NULL,
554	.set_clockgating_state = jpeg_v3_0_set_clockgating_state,
555	.set_powergating_state = jpeg_v3_0_set_powergating_state,
556};
557
558static const struct amdgpu_ring_funcs jpeg_v3_0_dec_ring_vm_funcs = {
559	.type = AMDGPU_RING_TYPE_VCN_JPEG,
560	.align_mask = 0xf,
561	.vmhub = AMDGPU_MMHUB_0,
562	.get_rptr = jpeg_v3_0_dec_ring_get_rptr,
563	.get_wptr = jpeg_v3_0_dec_ring_get_wptr,
564	.set_wptr = jpeg_v3_0_dec_ring_set_wptr,
565	.emit_frame_size =
566		SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
567		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
568		8 + /* jpeg_v3_0_dec_ring_emit_vm_flush */
569		18 + 18 + /* jpeg_v3_0_dec_ring_emit_fence x2 vm fence */
570		8 + 16,
571	.emit_ib_size = 22, /* jpeg_v3_0_dec_ring_emit_ib */
572	.emit_ib = jpeg_v2_0_dec_ring_emit_ib,
573	.emit_fence = jpeg_v2_0_dec_ring_emit_fence,
574	.emit_vm_flush = jpeg_v2_0_dec_ring_emit_vm_flush,
575	.test_ring = amdgpu_jpeg_dec_ring_test_ring,
576	.test_ib = amdgpu_jpeg_dec_ring_test_ib,
577	.insert_nop = jpeg_v2_0_dec_ring_nop,
578	.insert_start = jpeg_v2_0_dec_ring_insert_start,
579	.insert_end = jpeg_v2_0_dec_ring_insert_end,
580	.pad_ib = amdgpu_ring_generic_pad_ib,
581	.begin_use = amdgpu_jpeg_ring_begin_use,
582	.end_use = amdgpu_jpeg_ring_end_use,
583	.emit_wreg = jpeg_v2_0_dec_ring_emit_wreg,
584	.emit_reg_wait = jpeg_v2_0_dec_ring_emit_reg_wait,
585	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
586};
587
588static void jpeg_v3_0_set_dec_ring_funcs(struct amdgpu_device *adev)
589{
590	adev->jpeg.inst->ring_dec.funcs = &jpeg_v3_0_dec_ring_vm_funcs;
591	DRM_INFO("JPEG decode is enabled in VM mode\n");
592}
593
594static const struct amdgpu_irq_src_funcs jpeg_v3_0_irq_funcs = {
595	.set = jpeg_v3_0_set_interrupt_state,
596	.process = jpeg_v3_0_process_interrupt,
597};
598
599static void jpeg_v3_0_set_irq_funcs(struct amdgpu_device *adev)
600{
601	adev->jpeg.inst->irq.num_types = 1;
602	adev->jpeg.inst->irq.funcs = &jpeg_v3_0_irq_funcs;
603}
604
605const struct amdgpu_ip_block_version jpeg_v3_0_ip_block =
606{
607	.type = AMD_IP_BLOCK_TYPE_JPEG,
608	.major = 3,
609	.minor = 0,
610	.rev = 0,
611	.funcs = &jpeg_v3_0_ip_funcs,
612};
v6.2
  1/*
  2 * Copyright 2019 Advanced Micro Devices, Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 *
 22 */
 23
 24#include "amdgpu.h"
 25#include "amdgpu_jpeg.h"
 26#include "amdgpu_pm.h"
 27#include "soc15.h"
 28#include "soc15d.h"
 29#include "jpeg_v2_0.h"
 30
 31#include "vcn/vcn_3_0_0_offset.h"
 32#include "vcn/vcn_3_0_0_sh_mask.h"
 33#include "ivsrcid/vcn/irqsrcs_vcn_2_0.h"
 34
 35#define mmUVD_JPEG_PITCH_INTERNAL_OFFSET	0x401f
 36
 37static void jpeg_v3_0_set_dec_ring_funcs(struct amdgpu_device *adev);
 38static void jpeg_v3_0_set_irq_funcs(struct amdgpu_device *adev);
 39static int jpeg_v3_0_set_powergating_state(void *handle,
 40				enum amd_powergating_state state);
 41
 42/**
 43 * jpeg_v3_0_early_init - set function pointers
 44 *
 45 * @handle: amdgpu_device pointer
 46 *
 47 * Set ring and irq function pointers
 48 */
 49static int jpeg_v3_0_early_init(void *handle)
 50{
 51	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
 52
 53	u32 harvest;
 54
 55	switch (adev->ip_versions[UVD_HWIP][0]) {
 56	case IP_VERSION(3, 1, 1):
 57		break;
 58	default:
 59		harvest = RREG32_SOC15(JPEG, 0, mmCC_UVD_HARVESTING);
 60		if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK)
 61			return -ENOENT;
 62		break;
 63	}
 64
 65	adev->jpeg.num_jpeg_inst = 1;
 66
 67	jpeg_v3_0_set_dec_ring_funcs(adev);
 68	jpeg_v3_0_set_irq_funcs(adev);
 69
 70	return 0;
 71}
 72
 73/**
 74 * jpeg_v3_0_sw_init - sw init for JPEG block
 75 *
 76 * @handle: amdgpu_device pointer
 77 *
 78 * Load firmware and sw initialization
 79 */
 80static int jpeg_v3_0_sw_init(void *handle)
 81{
 82	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 83	struct amdgpu_ring *ring;
 84	int r;
 85
 86	/* JPEG TRAP */
 87	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
 88		VCN_2_0__SRCID__JPEG_DECODE, &adev->jpeg.inst->irq);
 89	if (r)
 90		return r;
 91
 92	r = amdgpu_jpeg_sw_init(adev);
 93	if (r)
 94		return r;
 95
 96	r = amdgpu_jpeg_resume(adev);
 97	if (r)
 98		return r;
 99
100	ring = &adev->jpeg.inst->ring_dec;
101	ring->use_doorbell = true;
102	ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1;
103	sprintf(ring->name, "jpeg_dec");
104	r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0,
105			     AMDGPU_RING_PRIO_DEFAULT, NULL);
106	if (r)
107		return r;
108
109	adev->jpeg.internal.jpeg_pitch = mmUVD_JPEG_PITCH_INTERNAL_OFFSET;
110	adev->jpeg.inst->external.jpeg_pitch = SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_PITCH);
111
112	return 0;
113}
114
115/**
116 * jpeg_v3_0_sw_fini - sw fini for JPEG block
117 *
118 * @handle: amdgpu_device pointer
119 *
120 * JPEG suspend and free up sw allocation
121 */
122static int jpeg_v3_0_sw_fini(void *handle)
123{
124	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
125	int r;
126
127	r = amdgpu_jpeg_suspend(adev);
128	if (r)
129		return r;
130
131	r = amdgpu_jpeg_sw_fini(adev);
132
133	return r;
134}
135
136/**
137 * jpeg_v3_0_hw_init - start and test JPEG block
138 *
139 * @handle: amdgpu_device pointer
140 *
141 */
142static int jpeg_v3_0_hw_init(void *handle)
143{
144	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
145	struct amdgpu_ring *ring = &adev->jpeg.inst->ring_dec;
146	int r;
147
148	adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
149		(adev->doorbell_index.vcn.vcn_ring0_1 << 1), 0);
150
151	r = amdgpu_ring_test_helper(ring);
152	if (r)
153		return r;
154
155	DRM_INFO("JPEG decode initialized successfully.\n");
156
157	return 0;
158}
159
160/**
161 * jpeg_v3_0_hw_fini - stop the hardware block
162 *
163 * @handle: amdgpu_device pointer
164 *
165 * Stop the JPEG block, mark ring as not ready any more
166 */
167static int jpeg_v3_0_hw_fini(void *handle)
168{
169	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
170
171	cancel_delayed_work_sync(&adev->vcn.idle_work);
172
173	if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
174	      RREG32_SOC15(JPEG, 0, mmUVD_JRBC_STATUS))
175		jpeg_v3_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
176
 
 
177	return 0;
178}
179
180/**
181 * jpeg_v3_0_suspend - suspend JPEG block
182 *
183 * @handle: amdgpu_device pointer
184 *
185 * HW fini and suspend JPEG block
186 */
187static int jpeg_v3_0_suspend(void *handle)
188{
189	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
190	int r;
191
192	r = jpeg_v3_0_hw_fini(adev);
193	if (r)
194		return r;
195
196	r = amdgpu_jpeg_suspend(adev);
197
198	return r;
199}
200
201/**
202 * jpeg_v3_0_resume - resume JPEG block
203 *
204 * @handle: amdgpu_device pointer
205 *
206 * Resume firmware and hw init JPEG block
207 */
208static int jpeg_v3_0_resume(void *handle)
209{
210	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
211	int r;
212
213	r = amdgpu_jpeg_resume(adev);
214	if (r)
215		return r;
216
217	r = jpeg_v3_0_hw_init(adev);
218
219	return r;
220}
221
222static void jpeg_v3_0_disable_clock_gating(struct amdgpu_device *adev)
223{
224	uint32_t data = 0;
225
226	data = RREG32_SOC15(JPEG, 0, mmJPEG_CGC_CTRL);
227	if (adev->cg_flags & AMD_CG_SUPPORT_JPEG_MGCG)
228		data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
229	else
230		data &= ~JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
231
232	data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
233	data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
234	WREG32_SOC15(JPEG, 0, mmJPEG_CGC_CTRL, data);
235
236	data = RREG32_SOC15(JPEG, 0, mmJPEG_CGC_GATE);
237	data &= ~(JPEG_CGC_GATE__JPEG_DEC_MASK
238		| JPEG_CGC_GATE__JPEG2_DEC_MASK
239		| JPEG_CGC_GATE__JPEG_ENC_MASK
240		| JPEG_CGC_GATE__JMCIF_MASK
241		| JPEG_CGC_GATE__JRBBM_MASK);
242	WREG32_SOC15(JPEG, 0, mmJPEG_CGC_GATE, data);
243
244	data = RREG32_SOC15(JPEG, 0, mmJPEG_CGC_CTRL);
245	data &= ~(JPEG_CGC_CTRL__JPEG_DEC_MODE_MASK
246		| JPEG_CGC_CTRL__JPEG2_DEC_MODE_MASK
247		| JPEG_CGC_CTRL__JMCIF_MODE_MASK
248		| JPEG_CGC_CTRL__JRBBM_MODE_MASK);
249	WREG32_SOC15(JPEG, 0, mmJPEG_CGC_CTRL, data);
250}
251
252static void jpeg_v3_0_enable_clock_gating(struct amdgpu_device *adev)
253{
254	uint32_t data = 0;
255
256	data = RREG32_SOC15(JPEG, 0, mmJPEG_CGC_GATE);
257	data |= (JPEG_CGC_GATE__JPEG_DEC_MASK
258		|JPEG_CGC_GATE__JPEG2_DEC_MASK
259		|JPEG_CGC_GATE__JPEG_ENC_MASK
260		|JPEG_CGC_GATE__JMCIF_MASK
261		|JPEG_CGC_GATE__JRBBM_MASK);
262	WREG32_SOC15(JPEG, 0, mmJPEG_CGC_GATE, data);
263}
264
265static int jpeg_v3_0_disable_static_power_gating(struct amdgpu_device *adev)
266{
267	if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) {
268		uint32_t data = 0;
269		int r = 0;
270
271		data = 1 << UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT;
272		WREG32(SOC15_REG_OFFSET(JPEG, 0, mmUVD_PGFSM_CONFIG), data);
273
274		r = SOC15_WAIT_ON_RREG(JPEG, 0,
275			mmUVD_PGFSM_STATUS, UVD_PGFSM_STATUS_UVDJ_PWR_ON,
276			UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK);
277
278		if (r) {
279			DRM_ERROR("amdgpu: JPEG disable power gating failed\n");
280			return r;
281		}
282	}
283
284	/* disable anti hang mechanism */
285	WREG32_P(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_POWER_STATUS), 0,
286		~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
287
288	/* keep the JPEG in static PG mode */
289	WREG32_P(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_POWER_STATUS), 0,
290		~UVD_JPEG_POWER_STATUS__JPEG_PG_MODE_MASK);
291
292	return 0;
293}
294
295static int jpeg_v3_0_enable_static_power_gating(struct amdgpu_device *adev)
296{
297	/* enable anti hang mechanism */
298	WREG32_P(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_POWER_STATUS),
299		UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK,
300		~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
301
302	if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) {
303		uint32_t data = 0;
304		int r = 0;
305
306		data = 2 << UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT;
307		WREG32(SOC15_REG_OFFSET(JPEG, 0, mmUVD_PGFSM_CONFIG), data);
308
309		r = SOC15_WAIT_ON_RREG(JPEG, 0, mmUVD_PGFSM_STATUS,
310			(2 << UVD_PGFSM_STATUS__UVDJ_PWR_STATUS__SHIFT),
311			UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK);
312
313		if (r) {
314			DRM_ERROR("amdgpu: JPEG enable power gating failed\n");
315			return r;
316		}
317	}
318
319	return 0;
320}
321
322/**
323 * jpeg_v3_0_start - start JPEG block
324 *
325 * @adev: amdgpu_device pointer
326 *
327 * Setup and start the JPEG block
328 */
329static int jpeg_v3_0_start(struct amdgpu_device *adev)
330{
331	struct amdgpu_ring *ring = &adev->jpeg.inst->ring_dec;
332	int r;
333
334	if (adev->pm.dpm_enabled)
335		amdgpu_dpm_enable_jpeg(adev, true);
336
337	/* disable power gating */
338	r = jpeg_v3_0_disable_static_power_gating(adev);
339	if (r)
340		return r;
341
342	/* JPEG disable CGC */
343	jpeg_v3_0_disable_clock_gating(adev);
344
345	/* MJPEG global tiling registers */
346	WREG32_SOC15(JPEG, 0, mmJPEG_DEC_GFX10_ADDR_CONFIG,
347		adev->gfx.config.gb_addr_config);
348	WREG32_SOC15(JPEG, 0, mmJPEG_ENC_GFX10_ADDR_CONFIG,
349		adev->gfx.config.gb_addr_config);
350
351	/* enable JMI channel */
352	WREG32_P(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JMI_CNTL), 0,
353		~UVD_JMI_CNTL__SOFT_RESET_MASK);
354
355	/* enable System Interrupt for JRBC */
356	WREG32_P(SOC15_REG_OFFSET(JPEG, 0, mmJPEG_SYS_INT_EN),
357		JPEG_SYS_INT_EN__DJRBC_MASK,
358		~JPEG_SYS_INT_EN__DJRBC_MASK);
359
360	WREG32_SOC15(JPEG, 0, mmUVD_LMI_JRBC_RB_VMID, 0);
361	WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L));
362	WREG32_SOC15(JPEG, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW,
363		lower_32_bits(ring->gpu_addr));
364	WREG32_SOC15(JPEG, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH,
365		upper_32_bits(ring->gpu_addr));
366	WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_RPTR, 0);
367	WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_WPTR, 0);
368	WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_CNTL, 0x00000002L);
369	WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_SIZE, ring->ring_size / 4);
370	ring->wptr = RREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_WPTR);
371
372	return 0;
373}
374
375/**
376 * jpeg_v3_0_stop - stop JPEG block
377 *
378 * @adev: amdgpu_device pointer
379 *
380 * stop the JPEG block
381 */
382static int jpeg_v3_0_stop(struct amdgpu_device *adev)
383{
384	int r;
385
386	/* reset JMI */
387	WREG32_P(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JMI_CNTL),
388		UVD_JMI_CNTL__SOFT_RESET_MASK,
389		~UVD_JMI_CNTL__SOFT_RESET_MASK);
390
391	jpeg_v3_0_enable_clock_gating(adev);
392
393	/* enable power gating */
394	r = jpeg_v3_0_enable_static_power_gating(adev);
395	if (r)
396		return r;
397
398	if (adev->pm.dpm_enabled)
399		amdgpu_dpm_enable_jpeg(adev, false);
400
401	return 0;
402}
403
404/**
405 * jpeg_v3_0_dec_ring_get_rptr - get read pointer
406 *
407 * @ring: amdgpu_ring pointer
408 *
409 * Returns the current hardware read pointer
410 */
411static uint64_t jpeg_v3_0_dec_ring_get_rptr(struct amdgpu_ring *ring)
412{
413	struct amdgpu_device *adev = ring->adev;
414
415	return RREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_RPTR);
416}
417
418/**
419 * jpeg_v3_0_dec_ring_get_wptr - get write pointer
420 *
421 * @ring: amdgpu_ring pointer
422 *
423 * Returns the current hardware write pointer
424 */
425static uint64_t jpeg_v3_0_dec_ring_get_wptr(struct amdgpu_ring *ring)
426{
427	struct amdgpu_device *adev = ring->adev;
428
429	if (ring->use_doorbell)
430		return *ring->wptr_cpu_addr;
431	else
432		return RREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_WPTR);
433}
434
435/**
436 * jpeg_v3_0_dec_ring_set_wptr - set write pointer
437 *
438 * @ring: amdgpu_ring pointer
439 *
440 * Commits the write pointer to the hardware
441 */
442static void jpeg_v3_0_dec_ring_set_wptr(struct amdgpu_ring *ring)
443{
444	struct amdgpu_device *adev = ring->adev;
445
446	if (ring->use_doorbell) {
447		*ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
448		WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
449	} else {
450		WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr));
451	}
452}
453
454static bool jpeg_v3_0_is_idle(void *handle)
455{
456	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
457	int ret = 1;
458
459	ret &= (((RREG32_SOC15(JPEG, 0, mmUVD_JRBC_STATUS) &
460		UVD_JRBC_STATUS__RB_JOB_DONE_MASK) ==
461		UVD_JRBC_STATUS__RB_JOB_DONE_MASK));
462
463	return ret;
464}
465
466static int jpeg_v3_0_wait_for_idle(void *handle)
467{
468	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
469
470	return SOC15_WAIT_ON_RREG(JPEG, 0, mmUVD_JRBC_STATUS,
471		UVD_JRBC_STATUS__RB_JOB_DONE_MASK,
472		UVD_JRBC_STATUS__RB_JOB_DONE_MASK);
 
 
 
 
473}
474
475static int jpeg_v3_0_set_clockgating_state(void *handle,
476					  enum amd_clockgating_state state)
477{
478	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
479	bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
480
481	if (enable) {
482		if (!jpeg_v3_0_is_idle(handle))
483			return -EBUSY;
484		jpeg_v3_0_enable_clock_gating(adev);
485	} else {
486		jpeg_v3_0_disable_clock_gating(adev);
487	}
488
489	return 0;
490}
491
492static int jpeg_v3_0_set_powergating_state(void *handle,
493					  enum amd_powergating_state state)
494{
495	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
496	int ret;
497
498	if(state == adev->jpeg.cur_state)
499		return 0;
500
501	if (state == AMD_PG_STATE_GATE)
502		ret = jpeg_v3_0_stop(adev);
503	else
504		ret = jpeg_v3_0_start(adev);
505
506	if(!ret)
507		adev->jpeg.cur_state = state;
508
509	return ret;
510}
511
512static int jpeg_v3_0_set_interrupt_state(struct amdgpu_device *adev,
513					struct amdgpu_irq_src *source,
514					unsigned type,
515					enum amdgpu_interrupt_state state)
516{
517	return 0;
518}
519
520static int jpeg_v3_0_process_interrupt(struct amdgpu_device *adev,
521				      struct amdgpu_irq_src *source,
522				      struct amdgpu_iv_entry *entry)
523{
524	DRM_DEBUG("IH: JPEG TRAP\n");
525
526	switch (entry->src_id) {
527	case VCN_2_0__SRCID__JPEG_DECODE:
528		amdgpu_fence_process(&adev->jpeg.inst->ring_dec);
529		break;
530	default:
531		DRM_ERROR("Unhandled interrupt: %d %d\n",
532			  entry->src_id, entry->src_data[0]);
533		break;
534	}
535
536	return 0;
537}
538
539static const struct amd_ip_funcs jpeg_v3_0_ip_funcs = {
540	.name = "jpeg_v3_0",
541	.early_init = jpeg_v3_0_early_init,
542	.late_init = NULL,
543	.sw_init = jpeg_v3_0_sw_init,
544	.sw_fini = jpeg_v3_0_sw_fini,
545	.hw_init = jpeg_v3_0_hw_init,
546	.hw_fini = jpeg_v3_0_hw_fini,
547	.suspend = jpeg_v3_0_suspend,
548	.resume = jpeg_v3_0_resume,
549	.is_idle = jpeg_v3_0_is_idle,
550	.wait_for_idle = jpeg_v3_0_wait_for_idle,
551	.check_soft_reset = NULL,
552	.pre_soft_reset = NULL,
553	.soft_reset = NULL,
554	.post_soft_reset = NULL,
555	.set_clockgating_state = jpeg_v3_0_set_clockgating_state,
556	.set_powergating_state = jpeg_v3_0_set_powergating_state,
557};
558
559static const struct amdgpu_ring_funcs jpeg_v3_0_dec_ring_vm_funcs = {
560	.type = AMDGPU_RING_TYPE_VCN_JPEG,
561	.align_mask = 0xf,
562	.vmhub = AMDGPU_MMHUB_0,
563	.get_rptr = jpeg_v3_0_dec_ring_get_rptr,
564	.get_wptr = jpeg_v3_0_dec_ring_get_wptr,
565	.set_wptr = jpeg_v3_0_dec_ring_set_wptr,
566	.emit_frame_size =
567		SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
568		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
569		8 + /* jpeg_v3_0_dec_ring_emit_vm_flush */
570		18 + 18 + /* jpeg_v3_0_dec_ring_emit_fence x2 vm fence */
571		8 + 16,
572	.emit_ib_size = 22, /* jpeg_v3_0_dec_ring_emit_ib */
573	.emit_ib = jpeg_v2_0_dec_ring_emit_ib,
574	.emit_fence = jpeg_v2_0_dec_ring_emit_fence,
575	.emit_vm_flush = jpeg_v2_0_dec_ring_emit_vm_flush,
576	.test_ring = amdgpu_jpeg_dec_ring_test_ring,
577	.test_ib = amdgpu_jpeg_dec_ring_test_ib,
578	.insert_nop = jpeg_v2_0_dec_ring_nop,
579	.insert_start = jpeg_v2_0_dec_ring_insert_start,
580	.insert_end = jpeg_v2_0_dec_ring_insert_end,
581	.pad_ib = amdgpu_ring_generic_pad_ib,
582	.begin_use = amdgpu_jpeg_ring_begin_use,
583	.end_use = amdgpu_jpeg_ring_end_use,
584	.emit_wreg = jpeg_v2_0_dec_ring_emit_wreg,
585	.emit_reg_wait = jpeg_v2_0_dec_ring_emit_reg_wait,
586	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
587};
588
589static void jpeg_v3_0_set_dec_ring_funcs(struct amdgpu_device *adev)
590{
591	adev->jpeg.inst->ring_dec.funcs = &jpeg_v3_0_dec_ring_vm_funcs;
592	DRM_INFO("JPEG decode is enabled in VM mode\n");
593}
594
595static const struct amdgpu_irq_src_funcs jpeg_v3_0_irq_funcs = {
596	.set = jpeg_v3_0_set_interrupt_state,
597	.process = jpeg_v3_0_process_interrupt,
598};
599
600static void jpeg_v3_0_set_irq_funcs(struct amdgpu_device *adev)
601{
602	adev->jpeg.inst->irq.num_types = 1;
603	adev->jpeg.inst->irq.funcs = &jpeg_v3_0_irq_funcs;
604}
605
606const struct amdgpu_ip_block_version jpeg_v3_0_ip_block =
607{
608	.type = AMD_IP_BLOCK_TYPE_JPEG,
609	.major = 3,
610	.minor = 0,
611	.rev = 0,
612	.funcs = &jpeg_v3_0_ip_funcs,
613};