Loading...
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include <drm/drmP.h>
24#include "amdgpu.h"
25#include "amdgpu_ih.h"
26#include "sid.h"
27#include "si_ih.h"
28
29static void si_ih_set_interrupt_funcs(struct amdgpu_device *adev);
30
31static void si_ih_enable_interrupts(struct amdgpu_device *adev)
32{
33 u32 ih_cntl = RREG32(IH_CNTL);
34 u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
35
36 ih_cntl |= ENABLE_INTR;
37 ih_rb_cntl |= IH_RB_ENABLE;
38 WREG32(IH_CNTL, ih_cntl);
39 WREG32(IH_RB_CNTL, ih_rb_cntl);
40 adev->irq.ih.enabled = true;
41}
42
43static void si_ih_disable_interrupts(struct amdgpu_device *adev)
44{
45 u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
46 u32 ih_cntl = RREG32(IH_CNTL);
47
48 ih_rb_cntl &= ~IH_RB_ENABLE;
49 ih_cntl &= ~ENABLE_INTR;
50 WREG32(IH_RB_CNTL, ih_rb_cntl);
51 WREG32(IH_CNTL, ih_cntl);
52 WREG32(IH_RB_RPTR, 0);
53 WREG32(IH_RB_WPTR, 0);
54 adev->irq.ih.enabled = false;
55 adev->irq.ih.rptr = 0;
56}
57
58static int si_ih_irq_init(struct amdgpu_device *adev)
59{
60 int rb_bufsz;
61 u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
62 u64 wptr_off;
63
64 si_ih_disable_interrupts(adev);
65 WREG32(INTERRUPT_CNTL2, adev->irq.ih.gpu_addr >> 8);
66 interrupt_cntl = RREG32(INTERRUPT_CNTL);
67 interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
68 interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
69 WREG32(INTERRUPT_CNTL, interrupt_cntl);
70
71 WREG32(IH_RB_BASE, adev->irq.ih.gpu_addr >> 8);
72 rb_bufsz = order_base_2(adev->irq.ih.ring_size / 4);
73
74 ih_rb_cntl = IH_WPTR_OVERFLOW_ENABLE |
75 IH_WPTR_OVERFLOW_CLEAR |
76 (rb_bufsz << 1) |
77 IH_WPTR_WRITEBACK_ENABLE;
78
79 wptr_off = adev->wb.gpu_addr + (adev->irq.ih.wptr_offs * 4);
80 WREG32(IH_RB_WPTR_ADDR_LO, lower_32_bits(wptr_off));
81 WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(wptr_off) & 0xFF);
82 WREG32(IH_RB_CNTL, ih_rb_cntl);
83 WREG32(IH_RB_RPTR, 0);
84 WREG32(IH_RB_WPTR, 0);
85
86 ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10) | MC_VMID(0);
87 if (adev->irq.msi_enabled)
88 ih_cntl |= RPTR_REARM;
89 WREG32(IH_CNTL, ih_cntl);
90
91 pci_set_master(adev->pdev);
92 si_ih_enable_interrupts(adev);
93
94 return 0;
95}
96
97static void si_ih_irq_disable(struct amdgpu_device *adev)
98{
99 si_ih_disable_interrupts(adev);
100 mdelay(1);
101}
102
103static u32 si_ih_get_wptr(struct amdgpu_device *adev)
104{
105 u32 wptr, tmp;
106
107 wptr = le32_to_cpu(adev->wb.wb[adev->irq.ih.wptr_offs]);
108
109 if (wptr & IH_RB_WPTR__RB_OVERFLOW_MASK) {
110 wptr &= ~IH_RB_WPTR__RB_OVERFLOW_MASK;
111 dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
112 wptr, adev->irq.ih.rptr, (wptr + 16) & adev->irq.ih.ptr_mask);
113 adev->irq.ih.rptr = (wptr + 16) & adev->irq.ih.ptr_mask;
114 tmp = RREG32(IH_RB_CNTL);
115 tmp |= IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK;
116 WREG32(IH_RB_CNTL, tmp);
117 }
118 return (wptr & adev->irq.ih.ptr_mask);
119}
120
121/**
122 * si_ih_prescreen_iv - prescreen an interrupt vector
123 *
124 * @adev: amdgpu_device pointer
125 *
126 * Returns true if the interrupt vector should be further processed.
127 */
128static bool si_ih_prescreen_iv(struct amdgpu_device *adev)
129{
130 /* Process all interrupts */
131 return true;
132}
133
134static void si_ih_decode_iv(struct amdgpu_device *adev,
135 struct amdgpu_iv_entry *entry)
136{
137 u32 ring_index = adev->irq.ih.rptr >> 2;
138 uint32_t dw[4];
139
140 dw[0] = le32_to_cpu(adev->irq.ih.ring[ring_index + 0]);
141 dw[1] = le32_to_cpu(adev->irq.ih.ring[ring_index + 1]);
142 dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]);
143 dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]);
144
145 entry->client_id = AMDGPU_IH_CLIENTID_LEGACY;
146 entry->src_id = dw[0] & 0xff;
147 entry->src_data[0] = dw[1] & 0xfffffff;
148 entry->ring_id = dw[2] & 0xff;
149 entry->vmid = (dw[2] >> 8) & 0xff;
150
151 adev->irq.ih.rptr += 16;
152}
153
154static void si_ih_set_rptr(struct amdgpu_device *adev)
155{
156 WREG32(IH_RB_RPTR, adev->irq.ih.rptr);
157}
158
159static int si_ih_early_init(void *handle)
160{
161 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
162
163 si_ih_set_interrupt_funcs(adev);
164
165 return 0;
166}
167
168static int si_ih_sw_init(void *handle)
169{
170 int r;
171 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
172
173 r = amdgpu_ih_ring_init(adev, 64 * 1024, false);
174 if (r)
175 return r;
176
177 return amdgpu_irq_init(adev);
178}
179
180static int si_ih_sw_fini(void *handle)
181{
182 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
183
184 amdgpu_irq_fini(adev);
185 amdgpu_ih_ring_fini(adev);
186
187 return 0;
188}
189
190static int si_ih_hw_init(void *handle)
191{
192 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
193
194 return si_ih_irq_init(adev);
195}
196
197static int si_ih_hw_fini(void *handle)
198{
199 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
200
201 si_ih_irq_disable(adev);
202
203 return 0;
204}
205
206static int si_ih_suspend(void *handle)
207{
208 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
209
210 return si_ih_hw_fini(adev);
211}
212
213static int si_ih_resume(void *handle)
214{
215 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
216
217 return si_ih_hw_init(adev);
218}
219
220static bool si_ih_is_idle(void *handle)
221{
222 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
223 u32 tmp = RREG32(SRBM_STATUS);
224
225 if (tmp & SRBM_STATUS__IH_BUSY_MASK)
226 return false;
227
228 return true;
229}
230
231static int si_ih_wait_for_idle(void *handle)
232{
233 unsigned i;
234 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
235
236 for (i = 0; i < adev->usec_timeout; i++) {
237 if (si_ih_is_idle(handle))
238 return 0;
239 udelay(1);
240 }
241 return -ETIMEDOUT;
242}
243
244static int si_ih_soft_reset(void *handle)
245{
246 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
247
248 u32 srbm_soft_reset = 0;
249 u32 tmp = RREG32(SRBM_STATUS);
250
251 if (tmp & SRBM_STATUS__IH_BUSY_MASK)
252 srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_IH_MASK;
253
254 if (srbm_soft_reset) {
255 tmp = RREG32(SRBM_SOFT_RESET);
256 tmp |= srbm_soft_reset;
257 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
258 WREG32(SRBM_SOFT_RESET, tmp);
259 tmp = RREG32(SRBM_SOFT_RESET);
260
261 udelay(50);
262
263 tmp &= ~srbm_soft_reset;
264 WREG32(SRBM_SOFT_RESET, tmp);
265 tmp = RREG32(SRBM_SOFT_RESET);
266
267 udelay(50);
268 }
269
270 return 0;
271}
272
273static int si_ih_set_clockgating_state(void *handle,
274 enum amd_clockgating_state state)
275{
276 return 0;
277}
278
279static int si_ih_set_powergating_state(void *handle,
280 enum amd_powergating_state state)
281{
282 return 0;
283}
284
285static const struct amd_ip_funcs si_ih_ip_funcs = {
286 .name = "si_ih",
287 .early_init = si_ih_early_init,
288 .late_init = NULL,
289 .sw_init = si_ih_sw_init,
290 .sw_fini = si_ih_sw_fini,
291 .hw_init = si_ih_hw_init,
292 .hw_fini = si_ih_hw_fini,
293 .suspend = si_ih_suspend,
294 .resume = si_ih_resume,
295 .is_idle = si_ih_is_idle,
296 .wait_for_idle = si_ih_wait_for_idle,
297 .soft_reset = si_ih_soft_reset,
298 .set_clockgating_state = si_ih_set_clockgating_state,
299 .set_powergating_state = si_ih_set_powergating_state,
300};
301
302static const struct amdgpu_ih_funcs si_ih_funcs = {
303 .get_wptr = si_ih_get_wptr,
304 .prescreen_iv = si_ih_prescreen_iv,
305 .decode_iv = si_ih_decode_iv,
306 .set_rptr = si_ih_set_rptr
307};
308
309static void si_ih_set_interrupt_funcs(struct amdgpu_device *adev)
310{
311 if (adev->irq.ih_funcs == NULL)
312 adev->irq.ih_funcs = &si_ih_funcs;
313}
314
315const struct amdgpu_ip_block_version si_ih_ip_block =
316{
317 .type = AMD_IP_BLOCK_TYPE_IH,
318 .major = 1,
319 .minor = 0,
320 .rev = 0,
321 .funcs = &si_ih_ip_funcs,
322};
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/pci.h>
25
26#include "amdgpu.h"
27#include "amdgpu_ih.h"
28#include "sid.h"
29#include "si_ih.h"
30
31static void si_ih_set_interrupt_funcs(struct amdgpu_device *adev);
32
33static void si_ih_enable_interrupts(struct amdgpu_device *adev)
34{
35 u32 ih_cntl = RREG32(IH_CNTL);
36 u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
37
38 ih_cntl |= ENABLE_INTR;
39 ih_rb_cntl |= IH_RB_ENABLE;
40 WREG32(IH_CNTL, ih_cntl);
41 WREG32(IH_RB_CNTL, ih_rb_cntl);
42 adev->irq.ih.enabled = true;
43}
44
45static void si_ih_disable_interrupts(struct amdgpu_device *adev)
46{
47 u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
48 u32 ih_cntl = RREG32(IH_CNTL);
49
50 ih_rb_cntl &= ~IH_RB_ENABLE;
51 ih_cntl &= ~ENABLE_INTR;
52 WREG32(IH_RB_CNTL, ih_rb_cntl);
53 WREG32(IH_CNTL, ih_cntl);
54 WREG32(IH_RB_RPTR, 0);
55 WREG32(IH_RB_WPTR, 0);
56 adev->irq.ih.enabled = false;
57 adev->irq.ih.rptr = 0;
58}
59
60static int si_ih_irq_init(struct amdgpu_device *adev)
61{
62 struct amdgpu_ih_ring *ih = &adev->irq.ih;
63 int rb_bufsz;
64 u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
65
66 si_ih_disable_interrupts(adev);
67 WREG32(INTERRUPT_CNTL2, adev->irq.ih.gpu_addr >> 8);
68 interrupt_cntl = RREG32(INTERRUPT_CNTL);
69 interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
70 interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
71 WREG32(INTERRUPT_CNTL, interrupt_cntl);
72
73 WREG32(IH_RB_BASE, adev->irq.ih.gpu_addr >> 8);
74 rb_bufsz = order_base_2(adev->irq.ih.ring_size / 4);
75
76 ih_rb_cntl = IH_WPTR_OVERFLOW_ENABLE |
77 IH_WPTR_OVERFLOW_CLEAR |
78 (rb_bufsz << 1) |
79 IH_WPTR_WRITEBACK_ENABLE;
80
81 WREG32(IH_RB_WPTR_ADDR_LO, lower_32_bits(ih->wptr_addr));
82 WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(ih->wptr_addr) & 0xFF);
83 WREG32(IH_RB_CNTL, ih_rb_cntl);
84 WREG32(IH_RB_RPTR, 0);
85 WREG32(IH_RB_WPTR, 0);
86
87 ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10) | MC_VMID(0);
88 if (adev->irq.msi_enabled)
89 ih_cntl |= RPTR_REARM;
90 WREG32(IH_CNTL, ih_cntl);
91
92 pci_set_master(adev->pdev);
93 si_ih_enable_interrupts(adev);
94
95 return 0;
96}
97
98static void si_ih_irq_disable(struct amdgpu_device *adev)
99{
100 si_ih_disable_interrupts(adev);
101 mdelay(1);
102}
103
104static u32 si_ih_get_wptr(struct amdgpu_device *adev,
105 struct amdgpu_ih_ring *ih)
106{
107 u32 wptr, tmp;
108
109 wptr = le32_to_cpu(*ih->wptr_cpu);
110
111 if (wptr & IH_RB_WPTR__RB_OVERFLOW_MASK) {
112 wptr &= ~IH_RB_WPTR__RB_OVERFLOW_MASK;
113 dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
114 wptr, ih->rptr, (wptr + 16) & ih->ptr_mask);
115 ih->rptr = (wptr + 16) & ih->ptr_mask;
116 tmp = RREG32(IH_RB_CNTL);
117 tmp |= IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK;
118 WREG32(IH_RB_CNTL, tmp);
119 }
120 return (wptr & ih->ptr_mask);
121}
122
123static void si_ih_decode_iv(struct amdgpu_device *adev,
124 struct amdgpu_ih_ring *ih,
125 struct amdgpu_iv_entry *entry)
126{
127 u32 ring_index = ih->rptr >> 2;
128 uint32_t dw[4];
129
130 dw[0] = le32_to_cpu(ih->ring[ring_index + 0]);
131 dw[1] = le32_to_cpu(ih->ring[ring_index + 1]);
132 dw[2] = le32_to_cpu(ih->ring[ring_index + 2]);
133 dw[3] = le32_to_cpu(ih->ring[ring_index + 3]);
134
135 entry->client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
136 entry->src_id = dw[0] & 0xff;
137 entry->src_data[0] = dw[1] & 0xfffffff;
138 entry->ring_id = dw[2] & 0xff;
139 entry->vmid = (dw[2] >> 8) & 0xff;
140
141 ih->rptr += 16;
142}
143
144static void si_ih_set_rptr(struct amdgpu_device *adev,
145 struct amdgpu_ih_ring *ih)
146{
147 WREG32(IH_RB_RPTR, ih->rptr);
148}
149
150static int si_ih_early_init(void *handle)
151{
152 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
153
154 si_ih_set_interrupt_funcs(adev);
155
156 return 0;
157}
158
159static int si_ih_sw_init(void *handle)
160{
161 int r;
162 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
163
164 r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 64 * 1024, false);
165 if (r)
166 return r;
167
168 return amdgpu_irq_init(adev);
169}
170
171static int si_ih_sw_fini(void *handle)
172{
173 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
174
175 amdgpu_irq_fini(adev);
176 amdgpu_ih_ring_fini(adev, &adev->irq.ih);
177
178 return 0;
179}
180
181static int si_ih_hw_init(void *handle)
182{
183 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
184
185 return si_ih_irq_init(adev);
186}
187
188static int si_ih_hw_fini(void *handle)
189{
190 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
191
192 si_ih_irq_disable(adev);
193
194 return 0;
195}
196
197static int si_ih_suspend(void *handle)
198{
199 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
200
201 return si_ih_hw_fini(adev);
202}
203
204static int si_ih_resume(void *handle)
205{
206 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
207
208 return si_ih_hw_init(adev);
209}
210
211static bool si_ih_is_idle(void *handle)
212{
213 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
214 u32 tmp = RREG32(SRBM_STATUS);
215
216 if (tmp & SRBM_STATUS__IH_BUSY_MASK)
217 return false;
218
219 return true;
220}
221
222static int si_ih_wait_for_idle(void *handle)
223{
224 unsigned i;
225 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
226
227 for (i = 0; i < adev->usec_timeout; i++) {
228 if (si_ih_is_idle(handle))
229 return 0;
230 udelay(1);
231 }
232 return -ETIMEDOUT;
233}
234
235static int si_ih_soft_reset(void *handle)
236{
237 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
238
239 u32 srbm_soft_reset = 0;
240 u32 tmp = RREG32(SRBM_STATUS);
241
242 if (tmp & SRBM_STATUS__IH_BUSY_MASK)
243 srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_IH_MASK;
244
245 if (srbm_soft_reset) {
246 tmp = RREG32(SRBM_SOFT_RESET);
247 tmp |= srbm_soft_reset;
248 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
249 WREG32(SRBM_SOFT_RESET, tmp);
250 tmp = RREG32(SRBM_SOFT_RESET);
251
252 udelay(50);
253
254 tmp &= ~srbm_soft_reset;
255 WREG32(SRBM_SOFT_RESET, tmp);
256 tmp = RREG32(SRBM_SOFT_RESET);
257
258 udelay(50);
259 }
260
261 return 0;
262}
263
264static int si_ih_set_clockgating_state(void *handle,
265 enum amd_clockgating_state state)
266{
267 return 0;
268}
269
270static int si_ih_set_powergating_state(void *handle,
271 enum amd_powergating_state state)
272{
273 return 0;
274}
275
276static const struct amd_ip_funcs si_ih_ip_funcs = {
277 .name = "si_ih",
278 .early_init = si_ih_early_init,
279 .late_init = NULL,
280 .sw_init = si_ih_sw_init,
281 .sw_fini = si_ih_sw_fini,
282 .hw_init = si_ih_hw_init,
283 .hw_fini = si_ih_hw_fini,
284 .suspend = si_ih_suspend,
285 .resume = si_ih_resume,
286 .is_idle = si_ih_is_idle,
287 .wait_for_idle = si_ih_wait_for_idle,
288 .soft_reset = si_ih_soft_reset,
289 .set_clockgating_state = si_ih_set_clockgating_state,
290 .set_powergating_state = si_ih_set_powergating_state,
291};
292
293static const struct amdgpu_ih_funcs si_ih_funcs = {
294 .get_wptr = si_ih_get_wptr,
295 .decode_iv = si_ih_decode_iv,
296 .set_rptr = si_ih_set_rptr
297};
298
299static void si_ih_set_interrupt_funcs(struct amdgpu_device *adev)
300{
301 adev->irq.ih_funcs = &si_ih_funcs;
302}
303
304const struct amdgpu_ip_block_version si_ih_ip_block =
305{
306 .type = AMD_IP_BLOCK_TYPE_IH,
307 .major = 1,
308 .minor = 0,
309 .rev = 0,
310 .funcs = &si_ih_ip_funcs,
311};