Linux Audio

Check our new training course

Loading...
v6.9.4
  1/*
  2 * Copyright 2015 Advanced Micro Devices, Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 *
 22 */
 23
 24#include <linux/pci.h>
 25
 26#include "amdgpu.h"
 27#include "amdgpu_ih.h"
 28#include "sid.h"
 29#include "si_ih.h"
 30#include "oss/oss_1_0_d.h"
 31#include "oss/oss_1_0_sh_mask.h"
 32
 33static void si_ih_set_interrupt_funcs(struct amdgpu_device *adev);
 34
 35static void si_ih_enable_interrupts(struct amdgpu_device *adev)
 36{
 37	u32 ih_cntl = RREG32(IH_CNTL);
 38	u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
 39
 40	ih_cntl |= ENABLE_INTR;
 41	ih_rb_cntl |= IH_RB_ENABLE;
 42	WREG32(IH_CNTL, ih_cntl);
 43	WREG32(IH_RB_CNTL, ih_rb_cntl);
 44	adev->irq.ih.enabled = true;
 45}
 46
 47static void si_ih_disable_interrupts(struct amdgpu_device *adev)
 48{
 49	u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
 50	u32 ih_cntl = RREG32(IH_CNTL);
 51
 52	ih_rb_cntl &= ~IH_RB_ENABLE;
 53	ih_cntl &= ~ENABLE_INTR;
 54	WREG32(IH_RB_CNTL, ih_rb_cntl);
 55	WREG32(IH_CNTL, ih_cntl);
 56	WREG32(IH_RB_RPTR, 0);
 57	WREG32(IH_RB_WPTR, 0);
 58	adev->irq.ih.enabled = false;
 59	adev->irq.ih.rptr = 0;
 60}
 61
 62static int si_ih_irq_init(struct amdgpu_device *adev)
 63{
 64	struct amdgpu_ih_ring *ih = &adev->irq.ih;
 65	int rb_bufsz;
 66	u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
 
 67
 68	si_ih_disable_interrupts(adev);
 69	/* set dummy read address to dummy page address */
 70	WREG32(INTERRUPT_CNTL2, adev->dummy_page_addr >> 8);
 71	interrupt_cntl = RREG32(INTERRUPT_CNTL);
 72	interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
 73	interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
 74	WREG32(INTERRUPT_CNTL, interrupt_cntl);
 75
 76	WREG32(IH_RB_BASE, adev->irq.ih.gpu_addr >> 8);
 77	rb_bufsz = order_base_2(adev->irq.ih.ring_size / 4);
 78
 79	ih_rb_cntl = IH_WPTR_OVERFLOW_ENABLE |
 80		     IH_WPTR_OVERFLOW_CLEAR |
 81		     (rb_bufsz << 1) |
 82		     IH_WPTR_WRITEBACK_ENABLE;
 83
 84	WREG32(IH_RB_WPTR_ADDR_LO, lower_32_bits(ih->wptr_addr));
 85	WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(ih->wptr_addr) & 0xFF);
 
 86	WREG32(IH_RB_CNTL, ih_rb_cntl);
 87	WREG32(IH_RB_RPTR, 0);
 88	WREG32(IH_RB_WPTR, 0);
 89
 90	ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10) | MC_VMID(0);
 91	if (adev->irq.msi_enabled)
 92		ih_cntl |= RPTR_REARM;
 93	WREG32(IH_CNTL, ih_cntl);
 94
 95	pci_set_master(adev->pdev);
 96	si_ih_enable_interrupts(adev);
 97
 98	return 0;
 99}
100
101static void si_ih_irq_disable(struct amdgpu_device *adev)
102{
103	si_ih_disable_interrupts(adev);
104	mdelay(1);
105}
106
107static u32 si_ih_get_wptr(struct amdgpu_device *adev,
108			  struct amdgpu_ih_ring *ih)
109{
110	u32 wptr, tmp;
111
112	wptr = le32_to_cpu(*ih->wptr_cpu);
113
114	if (wptr & IH_RB_WPTR__RB_OVERFLOW_MASK) {
115		wptr &= ~IH_RB_WPTR__RB_OVERFLOW_MASK;
116		dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
117			wptr, ih->rptr, (wptr + 16) & ih->ptr_mask);
118		ih->rptr = (wptr + 16) & ih->ptr_mask;
119		tmp = RREG32(IH_RB_CNTL);
120		tmp |= IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK;
121		WREG32(IH_RB_CNTL, tmp);
122
123		/* Unset the CLEAR_OVERFLOW bit immediately so new overflows
124		 * can be detected.
125		 */
126		tmp &= ~IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK;
127		WREG32(IH_RB_CNTL, tmp);
128	}
129	return (wptr & ih->ptr_mask);
 
 
 
 
 
 
 
 
 
 
 
 
 
130}
131
132static void si_ih_decode_iv(struct amdgpu_device *adev,
133			    struct amdgpu_ih_ring *ih,
134			    struct amdgpu_iv_entry *entry)
135{
136	u32 ring_index = ih->rptr >> 2;
137	uint32_t dw[4];
138
139	dw[0] = le32_to_cpu(ih->ring[ring_index + 0]);
140	dw[1] = le32_to_cpu(ih->ring[ring_index + 1]);
141	dw[2] = le32_to_cpu(ih->ring[ring_index + 2]);
142	dw[3] = le32_to_cpu(ih->ring[ring_index + 3]);
143
144	entry->client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
145	entry->src_id = dw[0] & 0xff;
146	entry->src_data[0] = dw[1] & 0xfffffff;
147	entry->ring_id = dw[2] & 0xff;
148	entry->vmid = (dw[2] >> 8) & 0xff;
149
150	ih->rptr += 16;
151}
152
153static void si_ih_set_rptr(struct amdgpu_device *adev,
154			   struct amdgpu_ih_ring *ih)
155{
156	WREG32(IH_RB_RPTR, ih->rptr);
157}
158
159static int si_ih_early_init(void *handle)
160{
161	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
162
163	si_ih_set_interrupt_funcs(adev);
164
165	return 0;
166}
167
168static int si_ih_sw_init(void *handle)
169{
170	int r;
171	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
172
173	r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 64 * 1024, false);
174	if (r)
175		return r;
176
177	return amdgpu_irq_init(adev);
178}
179
180static int si_ih_sw_fini(void *handle)
181{
182	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
183
184	amdgpu_irq_fini_sw(adev);
 
185
186	return 0;
187}
188
189static int si_ih_hw_init(void *handle)
190{
191	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
192
193	return si_ih_irq_init(adev);
194}
195
196static int si_ih_hw_fini(void *handle)
197{
198	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
199
200	si_ih_irq_disable(adev);
201
202	return 0;
203}
204
205static int si_ih_suspend(void *handle)
206{
207	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
208
209	return si_ih_hw_fini(adev);
210}
211
212static int si_ih_resume(void *handle)
213{
214	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
215
216	return si_ih_hw_init(adev);
217}
218
219static bool si_ih_is_idle(void *handle)
220{
221	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
222	u32 tmp = RREG32(SRBM_STATUS);
223
224	if (tmp & SRBM_STATUS__IH_BUSY_MASK)
225		return false;
226
227	return true;
228}
229
230static int si_ih_wait_for_idle(void *handle)
231{
232	unsigned i;
233	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
234
235	for (i = 0; i < adev->usec_timeout; i++) {
236		if (si_ih_is_idle(handle))
237			return 0;
238		udelay(1);
239	}
240	return -ETIMEDOUT;
241}
242
243static int si_ih_soft_reset(void *handle)
244{
245	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
246
247	u32 srbm_soft_reset = 0;
248	u32 tmp = RREG32(SRBM_STATUS);
249
250	if (tmp & SRBM_STATUS__IH_BUSY_MASK)
251		srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_IH_MASK;
252
253	if (srbm_soft_reset) {
254		tmp = RREG32(SRBM_SOFT_RESET);
255		tmp |= srbm_soft_reset;
256		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
257		WREG32(SRBM_SOFT_RESET, tmp);
258		tmp = RREG32(SRBM_SOFT_RESET);
259
260		udelay(50);
261
262		tmp &= ~srbm_soft_reset;
263		WREG32(SRBM_SOFT_RESET, tmp);
264		tmp = RREG32(SRBM_SOFT_RESET);
265
266		udelay(50);
267	}
268
269	return 0;
270}
271
272static int si_ih_set_clockgating_state(void *handle,
273					  enum amd_clockgating_state state)
274{
275	return 0;
276}
277
278static int si_ih_set_powergating_state(void *handle,
279					  enum amd_powergating_state state)
280{
281	return 0;
282}
283
284static const struct amd_ip_funcs si_ih_ip_funcs = {
285	.name = "si_ih",
286	.early_init = si_ih_early_init,
287	.late_init = NULL,
288	.sw_init = si_ih_sw_init,
289	.sw_fini = si_ih_sw_fini,
290	.hw_init = si_ih_hw_init,
291	.hw_fini = si_ih_hw_fini,
292	.suspend = si_ih_suspend,
293	.resume = si_ih_resume,
294	.is_idle = si_ih_is_idle,
295	.wait_for_idle = si_ih_wait_for_idle,
296	.soft_reset = si_ih_soft_reset,
297	.set_clockgating_state = si_ih_set_clockgating_state,
298	.set_powergating_state = si_ih_set_powergating_state,
299};
300
301static const struct amdgpu_ih_funcs si_ih_funcs = {
302	.get_wptr = si_ih_get_wptr,
 
303	.decode_iv = si_ih_decode_iv,
304	.set_rptr = si_ih_set_rptr
305};
306
307static void si_ih_set_interrupt_funcs(struct amdgpu_device *adev)
308{
309	adev->irq.ih_funcs = &si_ih_funcs;
 
310}
311
312const struct amdgpu_ip_block_version si_ih_ip_block =
313{
314	.type = AMD_IP_BLOCK_TYPE_IH,
315	.major = 1,
316	.minor = 0,
317	.rev = 0,
318	.funcs = &si_ih_ip_funcs,
319};
v4.17
  1/*
  2 * Copyright 2015 Advanced Micro Devices, Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 *
 22 */
 23#include <drm/drmP.h>
 
 
 24#include "amdgpu.h"
 25#include "amdgpu_ih.h"
 26#include "sid.h"
 27#include "si_ih.h"
 
 
 28
 29static void si_ih_set_interrupt_funcs(struct amdgpu_device *adev);
 30
 31static void si_ih_enable_interrupts(struct amdgpu_device *adev)
 32{
 33	u32 ih_cntl = RREG32(IH_CNTL);
 34	u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
 35
 36	ih_cntl |= ENABLE_INTR;
 37	ih_rb_cntl |= IH_RB_ENABLE;
 38	WREG32(IH_CNTL, ih_cntl);
 39	WREG32(IH_RB_CNTL, ih_rb_cntl);
 40	adev->irq.ih.enabled = true;
 41}
 42  
 43static void si_ih_disable_interrupts(struct amdgpu_device *adev)
 44{
 45	u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
 46	u32 ih_cntl = RREG32(IH_CNTL);
 47
 48	ih_rb_cntl &= ~IH_RB_ENABLE;
 49	ih_cntl &= ~ENABLE_INTR;
 50	WREG32(IH_RB_CNTL, ih_rb_cntl);
 51	WREG32(IH_CNTL, ih_cntl);
 52	WREG32(IH_RB_RPTR, 0);
 53	WREG32(IH_RB_WPTR, 0);
 54	adev->irq.ih.enabled = false;
 55	adev->irq.ih.rptr = 0;
 56}
 57
 58static int si_ih_irq_init(struct amdgpu_device *adev)
 59{
 
 60	int rb_bufsz;
 61	u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
 62	u64 wptr_off;
 63
 64	si_ih_disable_interrupts(adev);
 65	WREG32(INTERRUPT_CNTL2, adev->irq.ih.gpu_addr >> 8);
 
 66	interrupt_cntl = RREG32(INTERRUPT_CNTL);
 67	interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
 68	interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
 69	WREG32(INTERRUPT_CNTL, interrupt_cntl);
 70
 71	WREG32(IH_RB_BASE, adev->irq.ih.gpu_addr >> 8);
 72	rb_bufsz = order_base_2(adev->irq.ih.ring_size / 4);
 73
 74	ih_rb_cntl = IH_WPTR_OVERFLOW_ENABLE |
 75		     IH_WPTR_OVERFLOW_CLEAR |
 76		     (rb_bufsz << 1) |
 77		     IH_WPTR_WRITEBACK_ENABLE;
 78
 79	wptr_off = adev->wb.gpu_addr + (adev->irq.ih.wptr_offs * 4);
 80	WREG32(IH_RB_WPTR_ADDR_LO, lower_32_bits(wptr_off));
 81	WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(wptr_off) & 0xFF);
 82	WREG32(IH_RB_CNTL, ih_rb_cntl);
 83	WREG32(IH_RB_RPTR, 0);
 84	WREG32(IH_RB_WPTR, 0);
 85
 86	ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10) | MC_VMID(0);
 87	if (adev->irq.msi_enabled)
 88		ih_cntl |= RPTR_REARM;
 89	WREG32(IH_CNTL, ih_cntl);
 90
 91	pci_set_master(adev->pdev);
 92	si_ih_enable_interrupts(adev);
 93
 94	return 0;
 95}
 96
 97static void si_ih_irq_disable(struct amdgpu_device *adev)
 98{
 99	si_ih_disable_interrupts(adev);
100	mdelay(1);
101}
102
103static u32 si_ih_get_wptr(struct amdgpu_device *adev)
 
104{
105	u32 wptr, tmp;
106
107	wptr = le32_to_cpu(adev->wb.wb[adev->irq.ih.wptr_offs]);
108
109	if (wptr & IH_RB_WPTR__RB_OVERFLOW_MASK) {
110		wptr &= ~IH_RB_WPTR__RB_OVERFLOW_MASK;
111		dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
112			wptr, adev->irq.ih.rptr, (wptr + 16) & adev->irq.ih.ptr_mask);
113		adev->irq.ih.rptr = (wptr + 16) & adev->irq.ih.ptr_mask;
114		tmp = RREG32(IH_RB_CNTL);
115		tmp |= IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK;
116		WREG32(IH_RB_CNTL, tmp);
 
 
 
 
 
 
117	}
118	return (wptr & adev->irq.ih.ptr_mask);
119}
120
121/**
122 * si_ih_prescreen_iv - prescreen an interrupt vector
123 *
124 * @adev: amdgpu_device pointer
125 *
126 * Returns true if the interrupt vector should be further processed.
127 */
128static bool si_ih_prescreen_iv(struct amdgpu_device *adev)
129{
130	/* Process all interrupts */
131	return true;
132}
133
134static void si_ih_decode_iv(struct amdgpu_device *adev,
135			     struct amdgpu_iv_entry *entry)
 
136{
137	u32 ring_index = adev->irq.ih.rptr >> 2;
138	uint32_t dw[4];
139
140	dw[0] = le32_to_cpu(adev->irq.ih.ring[ring_index + 0]);
141	dw[1] = le32_to_cpu(adev->irq.ih.ring[ring_index + 1]);
142	dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]);
143	dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]);
144
145	entry->client_id = AMDGPU_IH_CLIENTID_LEGACY;
146	entry->src_id = dw[0] & 0xff;
147	entry->src_data[0] = dw[1] & 0xfffffff;
148	entry->ring_id = dw[2] & 0xff;
149	entry->vmid = (dw[2] >> 8) & 0xff;
150
151	adev->irq.ih.rptr += 16;
152}
153
154static void si_ih_set_rptr(struct amdgpu_device *adev)
 
155{
156	WREG32(IH_RB_RPTR, adev->irq.ih.rptr);
157}
158
159static int si_ih_early_init(void *handle)
160{
161	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
162
163	si_ih_set_interrupt_funcs(adev);
164
165	return 0;
166}
167
168static int si_ih_sw_init(void *handle)
169{
170	int r;
171	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
172
173	r = amdgpu_ih_ring_init(adev, 64 * 1024, false);
174	if (r)
175		return r;
176
177	return amdgpu_irq_init(adev);
178}
179
180static int si_ih_sw_fini(void *handle)
181{
182	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
183
184	amdgpu_irq_fini(adev);
185	amdgpu_ih_ring_fini(adev);
186
187	return 0;
188}
189
190static int si_ih_hw_init(void *handle)
191{
192	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
193
194	return si_ih_irq_init(adev);
195}
196
197static int si_ih_hw_fini(void *handle)
198{
199	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
200
201	si_ih_irq_disable(adev);
202
203	return 0;
204}
205
206static int si_ih_suspend(void *handle)
207{
208	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
209
210	return si_ih_hw_fini(adev);
211}
212
213static int si_ih_resume(void *handle)
214{
215	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
216
217	return si_ih_hw_init(adev);
218}
219
220static bool si_ih_is_idle(void *handle)
221{
222	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
223	u32 tmp = RREG32(SRBM_STATUS);
224
225	if (tmp & SRBM_STATUS__IH_BUSY_MASK)
226		return false;
227
228	return true;
229}
230
231static int si_ih_wait_for_idle(void *handle)
232{
233	unsigned i;
234	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
235
236	for (i = 0; i < adev->usec_timeout; i++) {
237		if (si_ih_is_idle(handle))
238			return 0;
239		udelay(1);
240	}
241	return -ETIMEDOUT;
242}
243
244static int si_ih_soft_reset(void *handle)
245{
246	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
247
248	u32 srbm_soft_reset = 0;
249	u32 tmp = RREG32(SRBM_STATUS);
250
251	if (tmp & SRBM_STATUS__IH_BUSY_MASK)
252		srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_IH_MASK;
253
254	if (srbm_soft_reset) {
255		tmp = RREG32(SRBM_SOFT_RESET);
256		tmp |= srbm_soft_reset;
257		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
258		WREG32(SRBM_SOFT_RESET, tmp);
259		tmp = RREG32(SRBM_SOFT_RESET);
260
261		udelay(50);
262
263		tmp &= ~srbm_soft_reset;
264		WREG32(SRBM_SOFT_RESET, tmp);
265		tmp = RREG32(SRBM_SOFT_RESET);
266
267		udelay(50);
268	}
269
270	return 0;
271}
272
273static int si_ih_set_clockgating_state(void *handle,
274					  enum amd_clockgating_state state)
275{
276	return 0;
277}
278
279static int si_ih_set_powergating_state(void *handle,
280					  enum amd_powergating_state state)
281{
282	return 0;
283}
284
285static const struct amd_ip_funcs si_ih_ip_funcs = {
286	.name = "si_ih",
287	.early_init = si_ih_early_init,
288	.late_init = NULL,
289	.sw_init = si_ih_sw_init,
290	.sw_fini = si_ih_sw_fini,
291	.hw_init = si_ih_hw_init,
292	.hw_fini = si_ih_hw_fini,
293	.suspend = si_ih_suspend,
294	.resume = si_ih_resume,
295	.is_idle = si_ih_is_idle,
296	.wait_for_idle = si_ih_wait_for_idle,
297	.soft_reset = si_ih_soft_reset,
298	.set_clockgating_state = si_ih_set_clockgating_state,
299	.set_powergating_state = si_ih_set_powergating_state,
300};
301
302static const struct amdgpu_ih_funcs si_ih_funcs = {
303	.get_wptr = si_ih_get_wptr,
304	.prescreen_iv = si_ih_prescreen_iv,
305	.decode_iv = si_ih_decode_iv,
306	.set_rptr = si_ih_set_rptr
307};
308
309static void si_ih_set_interrupt_funcs(struct amdgpu_device *adev)
310{
311	if (adev->irq.ih_funcs == NULL)
312		adev->irq.ih_funcs = &si_ih_funcs;
313}
314
315const struct amdgpu_ip_block_version si_ih_ip_block =
316{
317	.type = AMD_IP_BLOCK_TYPE_IH,
318	.major = 1,
319	.minor = 0,
320	.rev = 0,
321	.funcs = &si_ih_ip_funcs,
322};