Linux Audio

Check our new training course

Loading...
v6.9.4
  1/*
  2 * Copyright 2015 Advanced Micro Devices, Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 *
 22 */
 23
 24#include <linux/pci.h>
 25
 26#include "amdgpu.h"
 27#include "amdgpu_ih.h"
 28#include "sid.h"
 29#include "si_ih.h"
 30#include "oss/oss_1_0_d.h"
 31#include "oss/oss_1_0_sh_mask.h"
 32
 33static void si_ih_set_interrupt_funcs(struct amdgpu_device *adev);
 34
 35static void si_ih_enable_interrupts(struct amdgpu_device *adev)
 36{
 37	u32 ih_cntl = RREG32(IH_CNTL);
 38	u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
 39
 40	ih_cntl |= ENABLE_INTR;
 41	ih_rb_cntl |= IH_RB_ENABLE;
 42	WREG32(IH_CNTL, ih_cntl);
 43	WREG32(IH_RB_CNTL, ih_rb_cntl);
 44	adev->irq.ih.enabled = true;
 45}
 46
 47static void si_ih_disable_interrupts(struct amdgpu_device *adev)
 48{
 49	u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
 50	u32 ih_cntl = RREG32(IH_CNTL);
 51
 52	ih_rb_cntl &= ~IH_RB_ENABLE;
 53	ih_cntl &= ~ENABLE_INTR;
 54	WREG32(IH_RB_CNTL, ih_rb_cntl);
 55	WREG32(IH_CNTL, ih_cntl);
 56	WREG32(IH_RB_RPTR, 0);
 57	WREG32(IH_RB_WPTR, 0);
 58	adev->irq.ih.enabled = false;
 59	adev->irq.ih.rptr = 0;
 60}
 61
 62static int si_ih_irq_init(struct amdgpu_device *adev)
 63{
 64	struct amdgpu_ih_ring *ih = &adev->irq.ih;
 65	int rb_bufsz;
 66	u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
 
 67
 68	si_ih_disable_interrupts(adev);
 69	/* set dummy read address to dummy page address */
 70	WREG32(INTERRUPT_CNTL2, adev->dummy_page_addr >> 8);
 71	interrupt_cntl = RREG32(INTERRUPT_CNTL);
 72	interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
 73	interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
 74	WREG32(INTERRUPT_CNTL, interrupt_cntl);
 75
 76	WREG32(IH_RB_BASE, adev->irq.ih.gpu_addr >> 8);
 77	rb_bufsz = order_base_2(adev->irq.ih.ring_size / 4);
 78
 79	ih_rb_cntl = IH_WPTR_OVERFLOW_ENABLE |
 80		     IH_WPTR_OVERFLOW_CLEAR |
 81		     (rb_bufsz << 1) |
 82		     IH_WPTR_WRITEBACK_ENABLE;
 83
 84	WREG32(IH_RB_WPTR_ADDR_LO, lower_32_bits(ih->wptr_addr));
 85	WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(ih->wptr_addr) & 0xFF);
 
 86	WREG32(IH_RB_CNTL, ih_rb_cntl);
 87	WREG32(IH_RB_RPTR, 0);
 88	WREG32(IH_RB_WPTR, 0);
 89
 90	ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10) | MC_VMID(0);
 91	if (adev->irq.msi_enabled)
 92		ih_cntl |= RPTR_REARM;
 93	WREG32(IH_CNTL, ih_cntl);
 94
 95	pci_set_master(adev->pdev);
 96	si_ih_enable_interrupts(adev);
 97
 98	return 0;
 99}
100
101static void si_ih_irq_disable(struct amdgpu_device *adev)
102{
103	si_ih_disable_interrupts(adev);
104	mdelay(1);
105}
106
107static u32 si_ih_get_wptr(struct amdgpu_device *adev,
108			  struct amdgpu_ih_ring *ih)
109{
110	u32 wptr, tmp;
111
112	wptr = le32_to_cpu(*ih->wptr_cpu);
113
114	if (wptr & IH_RB_WPTR__RB_OVERFLOW_MASK) {
115		wptr &= ~IH_RB_WPTR__RB_OVERFLOW_MASK;
116		dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
117			wptr, ih->rptr, (wptr + 16) & ih->ptr_mask);
118		ih->rptr = (wptr + 16) & ih->ptr_mask;
119		tmp = RREG32(IH_RB_CNTL);
120		tmp |= IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK;
121		WREG32(IH_RB_CNTL, tmp);
122
123		/* Unset the CLEAR_OVERFLOW bit immediately so new overflows
124		 * can be detected.
125		 */
126		tmp &= ~IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK;
127		WREG32(IH_RB_CNTL, tmp);
128	}
129	return (wptr & ih->ptr_mask);
130}
131
132static void si_ih_decode_iv(struct amdgpu_device *adev,
133			    struct amdgpu_ih_ring *ih,
134			    struct amdgpu_iv_entry *entry)
135{
136	u32 ring_index = ih->rptr >> 2;
137	uint32_t dw[4];
138
139	dw[0] = le32_to_cpu(ih->ring[ring_index + 0]);
140	dw[1] = le32_to_cpu(ih->ring[ring_index + 1]);
141	dw[2] = le32_to_cpu(ih->ring[ring_index + 2]);
142	dw[3] = le32_to_cpu(ih->ring[ring_index + 3]);
143
144	entry->client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
145	entry->src_id = dw[0] & 0xff;
146	entry->src_data[0] = dw[1] & 0xfffffff;
147	entry->ring_id = dw[2] & 0xff;
148	entry->vmid = (dw[2] >> 8) & 0xff;
149
150	ih->rptr += 16;
151}
152
153static void si_ih_set_rptr(struct amdgpu_device *adev,
154			   struct amdgpu_ih_ring *ih)
155{
156	WREG32(IH_RB_RPTR, ih->rptr);
157}
158
159static int si_ih_early_init(void *handle)
160{
161	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
162
163	si_ih_set_interrupt_funcs(adev);
164
165	return 0;
166}
167
168static int si_ih_sw_init(void *handle)
169{
170	int r;
171	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
172
173	r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 64 * 1024, false);
174	if (r)
175		return r;
176
177	return amdgpu_irq_init(adev);
178}
179
180static int si_ih_sw_fini(void *handle)
181{
182	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
183
184	amdgpu_irq_fini_sw(adev);
 
185
186	return 0;
187}
188
189static int si_ih_hw_init(void *handle)
190{
191	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
192
193	return si_ih_irq_init(adev);
194}
195
196static int si_ih_hw_fini(void *handle)
197{
198	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
199
200	si_ih_irq_disable(adev);
201
202	return 0;
203}
204
205static int si_ih_suspend(void *handle)
206{
207	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
208
209	return si_ih_hw_fini(adev);
210}
211
212static int si_ih_resume(void *handle)
213{
214	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
215
216	return si_ih_hw_init(adev);
217}
218
219static bool si_ih_is_idle(void *handle)
220{
221	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
222	u32 tmp = RREG32(SRBM_STATUS);
223
224	if (tmp & SRBM_STATUS__IH_BUSY_MASK)
225		return false;
226
227	return true;
228}
229
230static int si_ih_wait_for_idle(void *handle)
231{
232	unsigned i;
233	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
234
235	for (i = 0; i < adev->usec_timeout; i++) {
236		if (si_ih_is_idle(handle))
237			return 0;
238		udelay(1);
239	}
240	return -ETIMEDOUT;
241}
242
243static int si_ih_soft_reset(void *handle)
244{
245	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
246
247	u32 srbm_soft_reset = 0;
248	u32 tmp = RREG32(SRBM_STATUS);
249
250	if (tmp & SRBM_STATUS__IH_BUSY_MASK)
251		srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_IH_MASK;
252
253	if (srbm_soft_reset) {
254		tmp = RREG32(SRBM_SOFT_RESET);
255		tmp |= srbm_soft_reset;
256		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
257		WREG32(SRBM_SOFT_RESET, tmp);
258		tmp = RREG32(SRBM_SOFT_RESET);
259
260		udelay(50);
261
262		tmp &= ~srbm_soft_reset;
263		WREG32(SRBM_SOFT_RESET, tmp);
264		tmp = RREG32(SRBM_SOFT_RESET);
265
266		udelay(50);
267	}
268
269	return 0;
270}
271
272static int si_ih_set_clockgating_state(void *handle,
273					  enum amd_clockgating_state state)
274{
275	return 0;
276}
277
278static int si_ih_set_powergating_state(void *handle,
279					  enum amd_powergating_state state)
280{
281	return 0;
282}
283
284static const struct amd_ip_funcs si_ih_ip_funcs = {
285	.name = "si_ih",
286	.early_init = si_ih_early_init,
287	.late_init = NULL,
288	.sw_init = si_ih_sw_init,
289	.sw_fini = si_ih_sw_fini,
290	.hw_init = si_ih_hw_init,
291	.hw_fini = si_ih_hw_fini,
292	.suspend = si_ih_suspend,
293	.resume = si_ih_resume,
294	.is_idle = si_ih_is_idle,
295	.wait_for_idle = si_ih_wait_for_idle,
296	.soft_reset = si_ih_soft_reset,
297	.set_clockgating_state = si_ih_set_clockgating_state,
298	.set_powergating_state = si_ih_set_powergating_state,
299};
300
301static const struct amdgpu_ih_funcs si_ih_funcs = {
302	.get_wptr = si_ih_get_wptr,
303	.decode_iv = si_ih_decode_iv,
304	.set_rptr = si_ih_set_rptr
305};
306
307static void si_ih_set_interrupt_funcs(struct amdgpu_device *adev)
308{
309	adev->irq.ih_funcs = &si_ih_funcs;
 
310}
311
312const struct amdgpu_ip_block_version si_ih_ip_block =
313{
314	.type = AMD_IP_BLOCK_TYPE_IH,
315	.major = 1,
316	.minor = 0,
317	.rev = 0,
318	.funcs = &si_ih_ip_funcs,
319};
v4.10.11
  1/*
  2 * Copyright 2015 Advanced Micro Devices, Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 *
 22 */
 23#include "drmP.h"
 
 
 24#include "amdgpu.h"
 25#include "amdgpu_ih.h"
 26#include "si/sid.h"
 27#include "si_ih.h"
 
 
 28
 29static void si_ih_set_interrupt_funcs(struct amdgpu_device *adev);
 30
 31static void si_ih_enable_interrupts(struct amdgpu_device *adev)
 32{
 33	u32 ih_cntl = RREG32(IH_CNTL);
 34	u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
 35
 36	ih_cntl |= ENABLE_INTR;
 37	ih_rb_cntl |= IH_RB_ENABLE;
 38	WREG32(IH_CNTL, ih_cntl);
 39	WREG32(IH_RB_CNTL, ih_rb_cntl);
 40	adev->irq.ih.enabled = true;
 41}
 42  
 43static void si_ih_disable_interrupts(struct amdgpu_device *adev)
 44{
 45	u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
 46	u32 ih_cntl = RREG32(IH_CNTL);
 47
 48	ih_rb_cntl &= ~IH_RB_ENABLE;
 49	ih_cntl &= ~ENABLE_INTR;
 50	WREG32(IH_RB_CNTL, ih_rb_cntl);
 51	WREG32(IH_CNTL, ih_cntl);
 52	WREG32(IH_RB_RPTR, 0);
 53	WREG32(IH_RB_WPTR, 0);
 54	adev->irq.ih.enabled = false;
 55	adev->irq.ih.rptr = 0;
 56}
 57
 58static int si_ih_irq_init(struct amdgpu_device *adev)
 59{
 
 60	int rb_bufsz;
 61	u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
 62	u64 wptr_off;
 63
 64	si_ih_disable_interrupts(adev);
 65	WREG32(INTERRUPT_CNTL2, adev->irq.ih.gpu_addr >> 8);
 
 66	interrupt_cntl = RREG32(INTERRUPT_CNTL);
 67	interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
 68	interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
 69	WREG32(INTERRUPT_CNTL, interrupt_cntl);
 70
 71	WREG32(IH_RB_BASE, adev->irq.ih.gpu_addr >> 8);
 72	rb_bufsz = order_base_2(adev->irq.ih.ring_size / 4);
 73
 74	ih_rb_cntl = IH_WPTR_OVERFLOW_ENABLE |
 75		     IH_WPTR_OVERFLOW_CLEAR |
 76		     (rb_bufsz << 1) |
 77		     IH_WPTR_WRITEBACK_ENABLE;
 78
 79	wptr_off = adev->wb.gpu_addr + (adev->irq.ih.wptr_offs * 4);
 80	WREG32(IH_RB_WPTR_ADDR_LO, lower_32_bits(wptr_off));
 81	WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(wptr_off) & 0xFF);
 82	WREG32(IH_RB_CNTL, ih_rb_cntl);
 83	WREG32(IH_RB_RPTR, 0);
 84	WREG32(IH_RB_WPTR, 0);
 85
 86	ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10) | MC_VMID(0);
 87	if (adev->irq.msi_enabled)
 88		ih_cntl |= RPTR_REARM;
 89	WREG32(IH_CNTL, ih_cntl);
 90
 91	pci_set_master(adev->pdev);
 92	si_ih_enable_interrupts(adev);
 93
 94	return 0;
 95}
 96
 97static void si_ih_irq_disable(struct amdgpu_device *adev)
 98{
 99	si_ih_disable_interrupts(adev);
100	mdelay(1);
101}
102
103static u32 si_ih_get_wptr(struct amdgpu_device *adev)
 
104{
105	u32 wptr, tmp;
106
107	wptr = le32_to_cpu(adev->wb.wb[adev->irq.ih.wptr_offs]);
108
109	if (wptr & IH_RB_WPTR__RB_OVERFLOW_MASK) {
110		wptr &= ~IH_RB_WPTR__RB_OVERFLOW_MASK;
111		dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
112			wptr, adev->irq.ih.rptr, (wptr + 16) & adev->irq.ih.ptr_mask);
113		adev->irq.ih.rptr = (wptr + 16) & adev->irq.ih.ptr_mask;
114		tmp = RREG32(IH_RB_CNTL);
115		tmp |= IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK;
116		WREG32(IH_RB_CNTL, tmp);
 
 
 
 
 
 
117	}
118	return (wptr & adev->irq.ih.ptr_mask);
119}
120
121static void si_ih_decode_iv(struct amdgpu_device *adev,
122			     struct amdgpu_iv_entry *entry)
 
123{
124	u32 ring_index = adev->irq.ih.rptr >> 2;
125	uint32_t dw[4];
126
127	dw[0] = le32_to_cpu(adev->irq.ih.ring[ring_index + 0]);
128	dw[1] = le32_to_cpu(adev->irq.ih.ring[ring_index + 1]);
129	dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]);
130	dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]);
131
 
132	entry->src_id = dw[0] & 0xff;
133	entry->src_data = dw[1] & 0xfffffff;
134	entry->ring_id = dw[2] & 0xff;
135	entry->vm_id = (dw[2] >> 8) & 0xff;
136
137	adev->irq.ih.rptr += 16;
138}
139
140static void si_ih_set_rptr(struct amdgpu_device *adev)
 
141{
142	WREG32(IH_RB_RPTR, adev->irq.ih.rptr);
143}
144
145static int si_ih_early_init(void *handle)
146{
147	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
148
149	si_ih_set_interrupt_funcs(adev);
150
151	return 0;
152}
153
154static int si_ih_sw_init(void *handle)
155{
156	int r;
157	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
158
159	r = amdgpu_ih_ring_init(adev, 64 * 1024, false);
160	if (r)
161		return r;
162
163	return amdgpu_irq_init(adev);
164}
165
166static int si_ih_sw_fini(void *handle)
167{
168	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
169
170	amdgpu_irq_fini(adev);
171	amdgpu_ih_ring_fini(adev);
172
173	return 0;
174}
175
176static int si_ih_hw_init(void *handle)
177{
178	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
179
180	return si_ih_irq_init(adev);
181}
182
183static int si_ih_hw_fini(void *handle)
184{
185	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
186
187	si_ih_irq_disable(adev);
188
189	return 0;
190}
191
192static int si_ih_suspend(void *handle)
193{
194	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
195
196	return si_ih_hw_fini(adev);
197}
198
199static int si_ih_resume(void *handle)
200{
201	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
202
203	return si_ih_hw_init(adev);
204}
205
206static bool si_ih_is_idle(void *handle)
207{
208	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
209	u32 tmp = RREG32(SRBM_STATUS);
210
211	if (tmp & SRBM_STATUS__IH_BUSY_MASK)
212		return false;
213
214	return true;
215}
216
217static int si_ih_wait_for_idle(void *handle)
218{
219	unsigned i;
220	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
221
222	for (i = 0; i < adev->usec_timeout; i++) {
223		if (si_ih_is_idle(handle))
224			return 0;
225		udelay(1);
226	}
227	return -ETIMEDOUT;
228}
229
230static int si_ih_soft_reset(void *handle)
231{
232	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
233
234	u32 srbm_soft_reset = 0;
235	u32 tmp = RREG32(SRBM_STATUS);
236
237	if (tmp & SRBM_STATUS__IH_BUSY_MASK)
238		srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_IH_MASK;
239
240	if (srbm_soft_reset) {
241		tmp = RREG32(SRBM_SOFT_RESET);
242		tmp |= srbm_soft_reset;
243		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
244		WREG32(SRBM_SOFT_RESET, tmp);
245		tmp = RREG32(SRBM_SOFT_RESET);
246
247		udelay(50);
248
249		tmp &= ~srbm_soft_reset;
250		WREG32(SRBM_SOFT_RESET, tmp);
251		tmp = RREG32(SRBM_SOFT_RESET);
252
253		udelay(50);
254	}
255
256	return 0;
257}
258
259static int si_ih_set_clockgating_state(void *handle,
260					  enum amd_clockgating_state state)
261{
262	return 0;
263}
264
265static int si_ih_set_powergating_state(void *handle,
266					  enum amd_powergating_state state)
267{
268	return 0;
269}
270
271static const struct amd_ip_funcs si_ih_ip_funcs = {
272	.name = "si_ih",
273	.early_init = si_ih_early_init,
274	.late_init = NULL,
275	.sw_init = si_ih_sw_init,
276	.sw_fini = si_ih_sw_fini,
277	.hw_init = si_ih_hw_init,
278	.hw_fini = si_ih_hw_fini,
279	.suspend = si_ih_suspend,
280	.resume = si_ih_resume,
281	.is_idle = si_ih_is_idle,
282	.wait_for_idle = si_ih_wait_for_idle,
283	.soft_reset = si_ih_soft_reset,
284	.set_clockgating_state = si_ih_set_clockgating_state,
285	.set_powergating_state = si_ih_set_powergating_state,
286};
287
288static const struct amdgpu_ih_funcs si_ih_funcs = {
289	.get_wptr = si_ih_get_wptr,
290	.decode_iv = si_ih_decode_iv,
291	.set_rptr = si_ih_set_rptr
292};
293
294static void si_ih_set_interrupt_funcs(struct amdgpu_device *adev)
295{
296	if (adev->irq.ih_funcs == NULL)
297		adev->irq.ih_funcs = &si_ih_funcs;
298}
299
300const struct amdgpu_ip_block_version si_ih_ip_block =
301{
302	.type = AMD_IP_BLOCK_TYPE_IH,
303	.major = 1,
304	.minor = 0,
305	.rev = 0,
306	.funcs = &si_ih_ip_funcs,
307};