Linux Audio

Check our new training course

Loading...
v6.9.4
  1/*
  2 * Copyright 2015 Advanced Micro Devices, Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 *
 22 */
 23
 24#include <linux/pci.h>
 25
 26#include "amdgpu.h"
 27#include "amdgpu_ih.h"
 28#include "sid.h"
 29#include "si_ih.h"
 30#include "oss/oss_1_0_d.h"
 31#include "oss/oss_1_0_sh_mask.h"
 32
 33static void si_ih_set_interrupt_funcs(struct amdgpu_device *adev);
 34
 35static void si_ih_enable_interrupts(struct amdgpu_device *adev)
 36{
 37	u32 ih_cntl = RREG32(IH_CNTL);
 38	u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
 39
 40	ih_cntl |= ENABLE_INTR;
 41	ih_rb_cntl |= IH_RB_ENABLE;
 42	WREG32(IH_CNTL, ih_cntl);
 43	WREG32(IH_RB_CNTL, ih_rb_cntl);
 44	adev->irq.ih.enabled = true;
 45}
 46
 47static void si_ih_disable_interrupts(struct amdgpu_device *adev)
 48{
 49	u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
 50	u32 ih_cntl = RREG32(IH_CNTL);
 51
 52	ih_rb_cntl &= ~IH_RB_ENABLE;
 53	ih_cntl &= ~ENABLE_INTR;
 54	WREG32(IH_RB_CNTL, ih_rb_cntl);
 55	WREG32(IH_CNTL, ih_cntl);
 56	WREG32(IH_RB_RPTR, 0);
 57	WREG32(IH_RB_WPTR, 0);
 58	adev->irq.ih.enabled = false;
 59	adev->irq.ih.rptr = 0;
 60}
 61
 62static int si_ih_irq_init(struct amdgpu_device *adev)
 63{
 64	struct amdgpu_ih_ring *ih = &adev->irq.ih;
 65	int rb_bufsz;
 66	u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
 67
 68	si_ih_disable_interrupts(adev);
 69	/* set dummy read address to dummy page address */
 70	WREG32(INTERRUPT_CNTL2, adev->dummy_page_addr >> 8);
 71	interrupt_cntl = RREG32(INTERRUPT_CNTL);
 72	interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
 73	interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
 74	WREG32(INTERRUPT_CNTL, interrupt_cntl);
 75
 76	WREG32(IH_RB_BASE, adev->irq.ih.gpu_addr >> 8);
 77	rb_bufsz = order_base_2(adev->irq.ih.ring_size / 4);
 78
 79	ih_rb_cntl = IH_WPTR_OVERFLOW_ENABLE |
 80		     IH_WPTR_OVERFLOW_CLEAR |
 81		     (rb_bufsz << 1) |
 82		     IH_WPTR_WRITEBACK_ENABLE;
 83
 84	WREG32(IH_RB_WPTR_ADDR_LO, lower_32_bits(ih->wptr_addr));
 85	WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(ih->wptr_addr) & 0xFF);
 86	WREG32(IH_RB_CNTL, ih_rb_cntl);
 87	WREG32(IH_RB_RPTR, 0);
 88	WREG32(IH_RB_WPTR, 0);
 89
 90	ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10) | MC_VMID(0);
 91	if (adev->irq.msi_enabled)
 92		ih_cntl |= RPTR_REARM;
 93	WREG32(IH_CNTL, ih_cntl);
 94
 95	pci_set_master(adev->pdev);
 96	si_ih_enable_interrupts(adev);
 97
 98	return 0;
 99}
100
101static void si_ih_irq_disable(struct amdgpu_device *adev)
102{
103	si_ih_disable_interrupts(adev);
104	mdelay(1);
105}
106
107static u32 si_ih_get_wptr(struct amdgpu_device *adev,
108			  struct amdgpu_ih_ring *ih)
109{
110	u32 wptr, tmp;
111
112	wptr = le32_to_cpu(*ih->wptr_cpu);
113
114	if (wptr & IH_RB_WPTR__RB_OVERFLOW_MASK) {
115		wptr &= ~IH_RB_WPTR__RB_OVERFLOW_MASK;
116		dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
117			wptr, ih->rptr, (wptr + 16) & ih->ptr_mask);
118		ih->rptr = (wptr + 16) & ih->ptr_mask;
119		tmp = RREG32(IH_RB_CNTL);
120		tmp |= IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK;
121		WREG32(IH_RB_CNTL, tmp);
122
123		/* Unset the CLEAR_OVERFLOW bit immediately so new overflows
124		 * can be detected.
125		 */
126		tmp &= ~IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK;
127		WREG32(IH_RB_CNTL, tmp);
128	}
129	return (wptr & ih->ptr_mask);
130}
131
132static void si_ih_decode_iv(struct amdgpu_device *adev,
133			    struct amdgpu_ih_ring *ih,
134			    struct amdgpu_iv_entry *entry)
135{
136	u32 ring_index = ih->rptr >> 2;
137	uint32_t dw[4];
138
139	dw[0] = le32_to_cpu(ih->ring[ring_index + 0]);
140	dw[1] = le32_to_cpu(ih->ring[ring_index + 1]);
141	dw[2] = le32_to_cpu(ih->ring[ring_index + 2]);
142	dw[3] = le32_to_cpu(ih->ring[ring_index + 3]);
143
144	entry->client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
145	entry->src_id = dw[0] & 0xff;
146	entry->src_data[0] = dw[1] & 0xfffffff;
147	entry->ring_id = dw[2] & 0xff;
148	entry->vmid = (dw[2] >> 8) & 0xff;
149
150	ih->rptr += 16;
151}
152
153static void si_ih_set_rptr(struct amdgpu_device *adev,
154			   struct amdgpu_ih_ring *ih)
155{
156	WREG32(IH_RB_RPTR, ih->rptr);
157}
158
159static int si_ih_early_init(void *handle)
160{
161	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
162
163	si_ih_set_interrupt_funcs(adev);
164
165	return 0;
166}
167
168static int si_ih_sw_init(void *handle)
169{
170	int r;
171	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
172
173	r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 64 * 1024, false);
174	if (r)
175		return r;
176
177	return amdgpu_irq_init(adev);
178}
179
180static int si_ih_sw_fini(void *handle)
181{
182	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
183
184	amdgpu_irq_fini_sw(adev);
 
185
186	return 0;
187}
188
189static int si_ih_hw_init(void *handle)
190{
191	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
192
193	return si_ih_irq_init(adev);
194}
195
196static int si_ih_hw_fini(void *handle)
197{
198	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
199
200	si_ih_irq_disable(adev);
201
202	return 0;
203}
204
205static int si_ih_suspend(void *handle)
206{
207	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
208
209	return si_ih_hw_fini(adev);
210}
211
212static int si_ih_resume(void *handle)
213{
214	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
215
216	return si_ih_hw_init(adev);
217}
218
219static bool si_ih_is_idle(void *handle)
220{
221	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
222	u32 tmp = RREG32(SRBM_STATUS);
223
224	if (tmp & SRBM_STATUS__IH_BUSY_MASK)
225		return false;
226
227	return true;
228}
229
230static int si_ih_wait_for_idle(void *handle)
231{
232	unsigned i;
233	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
234
235	for (i = 0; i < adev->usec_timeout; i++) {
236		if (si_ih_is_idle(handle))
237			return 0;
238		udelay(1);
239	}
240	return -ETIMEDOUT;
241}
242
243static int si_ih_soft_reset(void *handle)
244{
245	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
246
247	u32 srbm_soft_reset = 0;
248	u32 tmp = RREG32(SRBM_STATUS);
249
250	if (tmp & SRBM_STATUS__IH_BUSY_MASK)
251		srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_IH_MASK;
252
253	if (srbm_soft_reset) {
254		tmp = RREG32(SRBM_SOFT_RESET);
255		tmp |= srbm_soft_reset;
256		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
257		WREG32(SRBM_SOFT_RESET, tmp);
258		tmp = RREG32(SRBM_SOFT_RESET);
259
260		udelay(50);
261
262		tmp &= ~srbm_soft_reset;
263		WREG32(SRBM_SOFT_RESET, tmp);
264		tmp = RREG32(SRBM_SOFT_RESET);
265
266		udelay(50);
267	}
268
269	return 0;
270}
271
272static int si_ih_set_clockgating_state(void *handle,
273					  enum amd_clockgating_state state)
274{
275	return 0;
276}
277
278static int si_ih_set_powergating_state(void *handle,
279					  enum amd_powergating_state state)
280{
281	return 0;
282}
283
284static const struct amd_ip_funcs si_ih_ip_funcs = {
285	.name = "si_ih",
286	.early_init = si_ih_early_init,
287	.late_init = NULL,
288	.sw_init = si_ih_sw_init,
289	.sw_fini = si_ih_sw_fini,
290	.hw_init = si_ih_hw_init,
291	.hw_fini = si_ih_hw_fini,
292	.suspend = si_ih_suspend,
293	.resume = si_ih_resume,
294	.is_idle = si_ih_is_idle,
295	.wait_for_idle = si_ih_wait_for_idle,
296	.soft_reset = si_ih_soft_reset,
297	.set_clockgating_state = si_ih_set_clockgating_state,
298	.set_powergating_state = si_ih_set_powergating_state,
299};
300
301static const struct amdgpu_ih_funcs si_ih_funcs = {
302	.get_wptr = si_ih_get_wptr,
303	.decode_iv = si_ih_decode_iv,
304	.set_rptr = si_ih_set_rptr
305};
306
307static void si_ih_set_interrupt_funcs(struct amdgpu_device *adev)
308{
309	adev->irq.ih_funcs = &si_ih_funcs;
310}
311
312const struct amdgpu_ip_block_version si_ih_ip_block =
313{
314	.type = AMD_IP_BLOCK_TYPE_IH,
315	.major = 1,
316	.minor = 0,
317	.rev = 0,
318	.funcs = &si_ih_ip_funcs,
319};
v5.9
  1/*
  2 * Copyright 2015 Advanced Micro Devices, Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 *
 22 */
 23
 24#include <linux/pci.h>
 25
 26#include "amdgpu.h"
 27#include "amdgpu_ih.h"
 28#include "sid.h"
 29#include "si_ih.h"
 30#include "oss/oss_1_0_d.h"
 31#include "oss/oss_1_0_sh_mask.h"
 32
 33static void si_ih_set_interrupt_funcs(struct amdgpu_device *adev);
 34
 35static void si_ih_enable_interrupts(struct amdgpu_device *adev)
 36{
 37	u32 ih_cntl = RREG32(IH_CNTL);
 38	u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
 39
 40	ih_cntl |= ENABLE_INTR;
 41	ih_rb_cntl |= IH_RB_ENABLE;
 42	WREG32(IH_CNTL, ih_cntl);
 43	WREG32(IH_RB_CNTL, ih_rb_cntl);
 44	adev->irq.ih.enabled = true;
 45}
 46  
 47static void si_ih_disable_interrupts(struct amdgpu_device *adev)
 48{
 49	u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
 50	u32 ih_cntl = RREG32(IH_CNTL);
 51
 52	ih_rb_cntl &= ~IH_RB_ENABLE;
 53	ih_cntl &= ~ENABLE_INTR;
 54	WREG32(IH_RB_CNTL, ih_rb_cntl);
 55	WREG32(IH_CNTL, ih_cntl);
 56	WREG32(IH_RB_RPTR, 0);
 57	WREG32(IH_RB_WPTR, 0);
 58	adev->irq.ih.enabled = false;
 59	adev->irq.ih.rptr = 0;
 60}
 61
 62static int si_ih_irq_init(struct amdgpu_device *adev)
 63{
 64	struct amdgpu_ih_ring *ih = &adev->irq.ih;
 65	int rb_bufsz;
 66	u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
 67
 68	si_ih_disable_interrupts(adev);
 69	/* set dummy read address to dummy page address */
 70	WREG32(INTERRUPT_CNTL2, adev->dummy_page_addr >> 8);
 71	interrupt_cntl = RREG32(INTERRUPT_CNTL);
 72	interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
 73	interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
 74	WREG32(INTERRUPT_CNTL, interrupt_cntl);
 75
 76	WREG32(IH_RB_BASE, adev->irq.ih.gpu_addr >> 8);
 77	rb_bufsz = order_base_2(adev->irq.ih.ring_size / 4);
 78
 79	ih_rb_cntl = IH_WPTR_OVERFLOW_ENABLE |
 80		     IH_WPTR_OVERFLOW_CLEAR |
 81		     (rb_bufsz << 1) |
 82		     IH_WPTR_WRITEBACK_ENABLE;
 83
 84	WREG32(IH_RB_WPTR_ADDR_LO, lower_32_bits(ih->wptr_addr));
 85	WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(ih->wptr_addr) & 0xFF);
 86	WREG32(IH_RB_CNTL, ih_rb_cntl);
 87	WREG32(IH_RB_RPTR, 0);
 88	WREG32(IH_RB_WPTR, 0);
 89
 90	ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10) | MC_VMID(0);
 91	if (adev->irq.msi_enabled)
 92		ih_cntl |= RPTR_REARM;
 93	WREG32(IH_CNTL, ih_cntl);
 94
 95	pci_set_master(adev->pdev);
 96	si_ih_enable_interrupts(adev);
 97
 98	return 0;
 99}
100
101static void si_ih_irq_disable(struct amdgpu_device *adev)
102{
103	si_ih_disable_interrupts(adev);
104	mdelay(1);
105}
106
107static u32 si_ih_get_wptr(struct amdgpu_device *adev,
108			  struct amdgpu_ih_ring *ih)
109{
110	u32 wptr, tmp;
111
112	wptr = le32_to_cpu(*ih->wptr_cpu);
113
114	if (wptr & IH_RB_WPTR__RB_OVERFLOW_MASK) {
115		wptr &= ~IH_RB_WPTR__RB_OVERFLOW_MASK;
116		dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
117			wptr, ih->rptr, (wptr + 16) & ih->ptr_mask);
118		ih->rptr = (wptr + 16) & ih->ptr_mask;
119		tmp = RREG32(IH_RB_CNTL);
120		tmp |= IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK;
121		WREG32(IH_RB_CNTL, tmp);
 
 
 
 
 
 
122	}
123	return (wptr & ih->ptr_mask);
124}
125
126static void si_ih_decode_iv(struct amdgpu_device *adev,
127			    struct amdgpu_ih_ring *ih,
128			    struct amdgpu_iv_entry *entry)
129{
130	u32 ring_index = ih->rptr >> 2;
131	uint32_t dw[4];
132
133	dw[0] = le32_to_cpu(ih->ring[ring_index + 0]);
134	dw[1] = le32_to_cpu(ih->ring[ring_index + 1]);
135	dw[2] = le32_to_cpu(ih->ring[ring_index + 2]);
136	dw[3] = le32_to_cpu(ih->ring[ring_index + 3]);
137
138	entry->client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
139	entry->src_id = dw[0] & 0xff;
140	entry->src_data[0] = dw[1] & 0xfffffff;
141	entry->ring_id = dw[2] & 0xff;
142	entry->vmid = (dw[2] >> 8) & 0xff;
143
144	ih->rptr += 16;
145}
146
147static void si_ih_set_rptr(struct amdgpu_device *adev,
148			   struct amdgpu_ih_ring *ih)
149{
150	WREG32(IH_RB_RPTR, ih->rptr);
151}
152
153static int si_ih_early_init(void *handle)
154{
155	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
156
157	si_ih_set_interrupt_funcs(adev);
158
159	return 0;
160}
161
162static int si_ih_sw_init(void *handle)
163{
164	int r;
165	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
166
167	r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 64 * 1024, false);
168	if (r)
169		return r;
170
171	return amdgpu_irq_init(adev);
172}
173
174static int si_ih_sw_fini(void *handle)
175{
176	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
177
178	amdgpu_irq_fini(adev);
179	amdgpu_ih_ring_fini(adev, &adev->irq.ih);
180
181	return 0;
182}
183
184static int si_ih_hw_init(void *handle)
185{
186	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
187
188	return si_ih_irq_init(adev);
189}
190
191static int si_ih_hw_fini(void *handle)
192{
193	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
194
195	si_ih_irq_disable(adev);
196
197	return 0;
198}
199
200static int si_ih_suspend(void *handle)
201{
202	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
203
204	return si_ih_hw_fini(adev);
205}
206
207static int si_ih_resume(void *handle)
208{
209	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
210
211	return si_ih_hw_init(adev);
212}
213
214static bool si_ih_is_idle(void *handle)
215{
216	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
217	u32 tmp = RREG32(SRBM_STATUS);
218
219	if (tmp & SRBM_STATUS__IH_BUSY_MASK)
220		return false;
221
222	return true;
223}
224
225static int si_ih_wait_for_idle(void *handle)
226{
227	unsigned i;
228	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
229
230	for (i = 0; i < adev->usec_timeout; i++) {
231		if (si_ih_is_idle(handle))
232			return 0;
233		udelay(1);
234	}
235	return -ETIMEDOUT;
236}
237
238static int si_ih_soft_reset(void *handle)
239{
240	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
241
242	u32 srbm_soft_reset = 0;
243	u32 tmp = RREG32(SRBM_STATUS);
244
245	if (tmp & SRBM_STATUS__IH_BUSY_MASK)
246		srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_IH_MASK;
247
248	if (srbm_soft_reset) {
249		tmp = RREG32(SRBM_SOFT_RESET);
250		tmp |= srbm_soft_reset;
251		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
252		WREG32(SRBM_SOFT_RESET, tmp);
253		tmp = RREG32(SRBM_SOFT_RESET);
254
255		udelay(50);
256
257		tmp &= ~srbm_soft_reset;
258		WREG32(SRBM_SOFT_RESET, tmp);
259		tmp = RREG32(SRBM_SOFT_RESET);
260
261		udelay(50);
262	}
263
264	return 0;
265}
266
267static int si_ih_set_clockgating_state(void *handle,
268					  enum amd_clockgating_state state)
269{
270	return 0;
271}
272
273static int si_ih_set_powergating_state(void *handle,
274					  enum amd_powergating_state state)
275{
276	return 0;
277}
278
279static const struct amd_ip_funcs si_ih_ip_funcs = {
280	.name = "si_ih",
281	.early_init = si_ih_early_init,
282	.late_init = NULL,
283	.sw_init = si_ih_sw_init,
284	.sw_fini = si_ih_sw_fini,
285	.hw_init = si_ih_hw_init,
286	.hw_fini = si_ih_hw_fini,
287	.suspend = si_ih_suspend,
288	.resume = si_ih_resume,
289	.is_idle = si_ih_is_idle,
290	.wait_for_idle = si_ih_wait_for_idle,
291	.soft_reset = si_ih_soft_reset,
292	.set_clockgating_state = si_ih_set_clockgating_state,
293	.set_powergating_state = si_ih_set_powergating_state,
294};
295
296static const struct amdgpu_ih_funcs si_ih_funcs = {
297	.get_wptr = si_ih_get_wptr,
298	.decode_iv = si_ih_decode_iv,
299	.set_rptr = si_ih_set_rptr
300};
301
302static void si_ih_set_interrupt_funcs(struct amdgpu_device *adev)
303{
304	adev->irq.ih_funcs = &si_ih_funcs;
305}
306
307const struct amdgpu_ip_block_version si_ih_ip_block =
308{
309	.type = AMD_IP_BLOCK_TYPE_IH,
310	.major = 1,
311	.minor = 0,
312	.rev = 0,
313	.funcs = &si_ih_ip_funcs,
314};