Loading...
1/******************************************************************************
2 *
3 * Module Name: evgpeutil - GPE utilities
4 *
5 *****************************************************************************/
6
7/*
8 * Copyright (C) 2000 - 2016, Intel Corp.
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions, and the following disclaimer,
16 * without modification.
17 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
18 * substantially similar to the "NO WARRANTY" disclaimer below
19 * ("Disclaimer") and any redistribution must be conditioned upon
20 * including a substantially similar Disclaimer requirement for further
21 * binary redistribution.
22 * 3. Neither the names of the above-listed copyright holders nor the names
23 * of any contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * Alternatively, this software may be distributed under the terms of the
27 * GNU General Public License ("GPL") version 2 as published by the Free
28 * Software Foundation.
29 *
30 * NO WARRANTY
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
39 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
40 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGES.
42 */
43
44#include <acpi/acpi.h>
45#include "accommon.h"
46#include "acevents.h"
47
48#define _COMPONENT ACPI_EVENTS
49ACPI_MODULE_NAME("evgpeutil")
50
51#if (!ACPI_REDUCED_HARDWARE) /* Entire module */
52/*******************************************************************************
53 *
54 * FUNCTION: acpi_ev_walk_gpe_list
55 *
56 * PARAMETERS: gpe_walk_callback - Routine called for each GPE block
57 * context - Value passed to callback
58 *
59 * RETURN: Status
60 *
61 * DESCRIPTION: Walk the GPE lists.
62 *
63 ******************************************************************************/
64acpi_status
65acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback, void *context)
66{
67 struct acpi_gpe_block_info *gpe_block;
68 struct acpi_gpe_xrupt_info *gpe_xrupt_info;
69 acpi_status status = AE_OK;
70 acpi_cpu_flags flags;
71
72 ACPI_FUNCTION_TRACE(ev_walk_gpe_list);
73
74 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
75
76 /* Walk the interrupt level descriptor list */
77
78 gpe_xrupt_info = acpi_gbl_gpe_xrupt_list_head;
79 while (gpe_xrupt_info) {
80
81 /* Walk all Gpe Blocks attached to this interrupt level */
82
83 gpe_block = gpe_xrupt_info->gpe_block_list_head;
84 while (gpe_block) {
85
86 /* One callback per GPE block */
87
88 status =
89 gpe_walk_callback(gpe_xrupt_info, gpe_block,
90 context);
91 if (ACPI_FAILURE(status)) {
92 if (status == AE_CTRL_END) { /* Callback abort */
93 status = AE_OK;
94 }
95 goto unlock_and_exit;
96 }
97
98 gpe_block = gpe_block->next;
99 }
100
101 gpe_xrupt_info = gpe_xrupt_info->next;
102 }
103
104unlock_and_exit:
105 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
106 return_ACPI_STATUS(status);
107}
108
109/*******************************************************************************
110 *
111 * FUNCTION: acpi_ev_get_gpe_device
112 *
113 * PARAMETERS: GPE_WALK_CALLBACK
114 *
115 * RETURN: Status
116 *
117 * DESCRIPTION: Matches the input GPE index (0-current_gpe_count) with a GPE
118 * block device. NULL if the GPE is one of the FADT-defined GPEs.
119 *
120 ******************************************************************************/
121
122acpi_status
123acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
124 struct acpi_gpe_block_info *gpe_block, void *context)
125{
126 struct acpi_gpe_device_info *info = context;
127
128 /* Increment Index by the number of GPEs in this block */
129
130 info->next_block_base_index += gpe_block->gpe_count;
131
132 if (info->index < info->next_block_base_index) {
133 /*
134 * The GPE index is within this block, get the node. Leave the node
135 * NULL for the FADT-defined GPEs
136 */
137 if ((gpe_block->node)->type == ACPI_TYPE_DEVICE) {
138 info->gpe_device = gpe_block->node;
139 }
140
141 info->status = AE_OK;
142 return (AE_CTRL_END);
143 }
144
145 return (AE_OK);
146}
147
148/*******************************************************************************
149 *
150 * FUNCTION: acpi_ev_get_gpe_xrupt_block
151 *
152 * PARAMETERS: interrupt_number - Interrupt for a GPE block
153 * gpe_xrupt_block - Where the block is returned
154 *
155 * RETURN: Status
156 *
157 * DESCRIPTION: Get or Create a GPE interrupt block. There is one interrupt
158 * block per unique interrupt level used for GPEs. Should be
159 * called only when the GPE lists are semaphore locked and not
160 * subject to change.
161 *
162 ******************************************************************************/
163
164acpi_status
165acpi_ev_get_gpe_xrupt_block(u32 interrupt_number,
166 struct acpi_gpe_xrupt_info ** gpe_xrupt_block)
167{
168 struct acpi_gpe_xrupt_info *next_gpe_xrupt;
169 struct acpi_gpe_xrupt_info *gpe_xrupt;
170 acpi_status status;
171 acpi_cpu_flags flags;
172
173 ACPI_FUNCTION_TRACE(ev_get_gpe_xrupt_block);
174
175 /* No need for lock since we are not changing any list elements here */
176
177 next_gpe_xrupt = acpi_gbl_gpe_xrupt_list_head;
178 while (next_gpe_xrupt) {
179 if (next_gpe_xrupt->interrupt_number == interrupt_number) {
180 *gpe_xrupt_block = next_gpe_xrupt;
181 return_ACPI_STATUS(AE_OK);
182 }
183
184 next_gpe_xrupt = next_gpe_xrupt->next;
185 }
186
187 /* Not found, must allocate a new xrupt descriptor */
188
189 gpe_xrupt = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_xrupt_info));
190 if (!gpe_xrupt) {
191 return_ACPI_STATUS(AE_NO_MEMORY);
192 }
193
194 gpe_xrupt->interrupt_number = interrupt_number;
195
196 /* Install new interrupt descriptor with spin lock */
197
198 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
199 if (acpi_gbl_gpe_xrupt_list_head) {
200 next_gpe_xrupt = acpi_gbl_gpe_xrupt_list_head;
201 while (next_gpe_xrupt->next) {
202 next_gpe_xrupt = next_gpe_xrupt->next;
203 }
204
205 next_gpe_xrupt->next = gpe_xrupt;
206 gpe_xrupt->previous = next_gpe_xrupt;
207 } else {
208 acpi_gbl_gpe_xrupt_list_head = gpe_xrupt;
209 }
210
211 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
212
213 /* Install new interrupt handler if not SCI_INT */
214
215 if (interrupt_number != acpi_gbl_FADT.sci_interrupt) {
216 status = acpi_os_install_interrupt_handler(interrupt_number,
217 acpi_ev_gpe_xrupt_handler,
218 gpe_xrupt);
219 if (ACPI_FAILURE(status)) {
220 ACPI_EXCEPTION((AE_INFO, status,
221 "Could not install GPE interrupt handler at level 0x%X",
222 interrupt_number));
223 return_ACPI_STATUS(status);
224 }
225 }
226
227 *gpe_xrupt_block = gpe_xrupt;
228 return_ACPI_STATUS(AE_OK);
229}
230
231/*******************************************************************************
232 *
233 * FUNCTION: acpi_ev_delete_gpe_xrupt
234 *
235 * PARAMETERS: gpe_xrupt - A GPE interrupt info block
236 *
237 * RETURN: Status
238 *
239 * DESCRIPTION: Remove and free a gpe_xrupt block. Remove an associated
240 * interrupt handler if not the SCI interrupt.
241 *
242 ******************************************************************************/
243
244acpi_status acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt)
245{
246 acpi_status status;
247 acpi_cpu_flags flags;
248
249 ACPI_FUNCTION_TRACE(ev_delete_gpe_xrupt);
250
251 /* We never want to remove the SCI interrupt handler */
252
253 if (gpe_xrupt->interrupt_number == acpi_gbl_FADT.sci_interrupt) {
254 gpe_xrupt->gpe_block_list_head = NULL;
255 return_ACPI_STATUS(AE_OK);
256 }
257
258 /* Disable this interrupt */
259
260 status =
261 acpi_os_remove_interrupt_handler(gpe_xrupt->interrupt_number,
262 acpi_ev_gpe_xrupt_handler);
263 if (ACPI_FAILURE(status)) {
264 return_ACPI_STATUS(status);
265 }
266
267 /* Unlink the interrupt block with lock */
268
269 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
270 if (gpe_xrupt->previous) {
271 gpe_xrupt->previous->next = gpe_xrupt->next;
272 } else {
273 /* No previous, update list head */
274
275 acpi_gbl_gpe_xrupt_list_head = gpe_xrupt->next;
276 }
277
278 if (gpe_xrupt->next) {
279 gpe_xrupt->next->previous = gpe_xrupt->previous;
280 }
281 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
282
283 /* Free the block */
284
285 ACPI_FREE(gpe_xrupt);
286 return_ACPI_STATUS(AE_OK);
287}
288
289/*******************************************************************************
290 *
291 * FUNCTION: acpi_ev_delete_gpe_handlers
292 *
293 * PARAMETERS: gpe_xrupt_info - GPE Interrupt info
294 * gpe_block - Gpe Block info
295 *
296 * RETURN: Status
297 *
298 * DESCRIPTION: Delete all Handler objects found in the GPE data structs.
299 * Used only prior to termination.
300 *
301 ******************************************************************************/
302
303acpi_status
304acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
305 struct acpi_gpe_block_info *gpe_block,
306 void *context)
307{
308 struct acpi_gpe_event_info *gpe_event_info;
309 struct acpi_gpe_notify_info *notify;
310 struct acpi_gpe_notify_info *next;
311 u32 i;
312 u32 j;
313
314 ACPI_FUNCTION_TRACE(ev_delete_gpe_handlers);
315
316 /* Examine each GPE Register within the block */
317
318 for (i = 0; i < gpe_block->register_count; i++) {
319
320 /* Now look at the individual GPEs in this byte register */
321
322 for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
323 gpe_event_info = &gpe_block->event_info[((acpi_size) i *
324 ACPI_GPE_REGISTER_WIDTH)
325 + j];
326
327 if ((ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) ==
328 ACPI_GPE_DISPATCH_HANDLER) ||
329 (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) ==
330 ACPI_GPE_DISPATCH_RAW_HANDLER)) {
331
332 /* Delete an installed handler block */
333
334 ACPI_FREE(gpe_event_info->dispatch.handler);
335 gpe_event_info->dispatch.handler = NULL;
336 gpe_event_info->flags &=
337 ~ACPI_GPE_DISPATCH_MASK;
338 } else if (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags)
339 == ACPI_GPE_DISPATCH_NOTIFY) {
340
341 /* Delete the implicit notification device list */
342
343 notify = gpe_event_info->dispatch.notify_list;
344 while (notify) {
345 next = notify->next;
346 ACPI_FREE(notify);
347 notify = next;
348 }
349
350 gpe_event_info->dispatch.notify_list = NULL;
351 gpe_event_info->flags &=
352 ~ACPI_GPE_DISPATCH_MASK;
353 }
354 }
355 }
356
357 return_ACPI_STATUS(AE_OK);
358}
359
360#endif /* !ACPI_REDUCED_HARDWARE */
1/******************************************************************************
2 *
3 * Module Name: evgpeutil - GPE utilities
4 *
5 *****************************************************************************/
6
7/*
8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions, and the following disclaimer,
16 * without modification.
17 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
18 * substantially similar to the "NO WARRANTY" disclaimer below
19 * ("Disclaimer") and any redistribution must be conditioned upon
20 * including a substantially similar Disclaimer requirement for further
21 * binary redistribution.
22 * 3. Neither the names of the above-listed copyright holders nor the names
23 * of any contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * Alternatively, this software may be distributed under the terms of the
27 * GNU General Public License ("GPL") version 2 as published by the Free
28 * Software Foundation.
29 *
30 * NO WARRANTY
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
39 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
40 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGES.
42 */
43
44#include <acpi/acpi.h>
45#include "accommon.h"
46#include "acevents.h"
47
48#define _COMPONENT ACPI_EVENTS
49ACPI_MODULE_NAME("evgpeutil")
50
51#if (!ACPI_REDUCED_HARDWARE) /* Entire module */
52/*******************************************************************************
53 *
54 * FUNCTION: acpi_ev_walk_gpe_list
55 *
56 * PARAMETERS: gpe_walk_callback - Routine called for each GPE block
57 * Context - Value passed to callback
58 *
59 * RETURN: Status
60 *
61 * DESCRIPTION: Walk the GPE lists.
62 *
63 ******************************************************************************/
64acpi_status
65acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback, void *context)
66{
67 struct acpi_gpe_block_info *gpe_block;
68 struct acpi_gpe_xrupt_info *gpe_xrupt_info;
69 acpi_status status = AE_OK;
70 acpi_cpu_flags flags;
71
72 ACPI_FUNCTION_TRACE(ev_walk_gpe_list);
73
74 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
75
76 /* Walk the interrupt level descriptor list */
77
78 gpe_xrupt_info = acpi_gbl_gpe_xrupt_list_head;
79 while (gpe_xrupt_info) {
80
81 /* Walk all Gpe Blocks attached to this interrupt level */
82
83 gpe_block = gpe_xrupt_info->gpe_block_list_head;
84 while (gpe_block) {
85
86 /* One callback per GPE block */
87
88 status =
89 gpe_walk_callback(gpe_xrupt_info, gpe_block,
90 context);
91 if (ACPI_FAILURE(status)) {
92 if (status == AE_CTRL_END) { /* Callback abort */
93 status = AE_OK;
94 }
95 goto unlock_and_exit;
96 }
97
98 gpe_block = gpe_block->next;
99 }
100
101 gpe_xrupt_info = gpe_xrupt_info->next;
102 }
103
104 unlock_and_exit:
105 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
106 return_ACPI_STATUS(status);
107}
108
109/*******************************************************************************
110 *
111 * FUNCTION: acpi_ev_valid_gpe_event
112 *
113 * PARAMETERS: gpe_event_info - Info for this GPE
114 *
115 * RETURN: TRUE if the gpe_event is valid
116 *
117 * DESCRIPTION: Validate a GPE event. DO NOT CALL FROM INTERRUPT LEVEL.
118 * Should be called only when the GPE lists are semaphore locked
119 * and not subject to change.
120 *
121 ******************************************************************************/
122
123u8 acpi_ev_valid_gpe_event(struct acpi_gpe_event_info *gpe_event_info)
124{
125 struct acpi_gpe_xrupt_info *gpe_xrupt_block;
126 struct acpi_gpe_block_info *gpe_block;
127
128 ACPI_FUNCTION_ENTRY();
129
130 /* No need for spin lock since we are not changing any list elements */
131
132 /* Walk the GPE interrupt levels */
133
134 gpe_xrupt_block = acpi_gbl_gpe_xrupt_list_head;
135 while (gpe_xrupt_block) {
136 gpe_block = gpe_xrupt_block->gpe_block_list_head;
137
138 /* Walk the GPE blocks on this interrupt level */
139
140 while (gpe_block) {
141 if ((&gpe_block->event_info[0] <= gpe_event_info) &&
142 (&gpe_block->event_info[gpe_block->gpe_count] >
143 gpe_event_info)) {
144 return (TRUE);
145 }
146
147 gpe_block = gpe_block->next;
148 }
149
150 gpe_xrupt_block = gpe_xrupt_block->next;
151 }
152
153 return (FALSE);
154}
155
156/*******************************************************************************
157 *
158 * FUNCTION: acpi_ev_get_gpe_device
159 *
160 * PARAMETERS: GPE_WALK_CALLBACK
161 *
162 * RETURN: Status
163 *
164 * DESCRIPTION: Matches the input GPE index (0-current_gpe_count) with a GPE
165 * block device. NULL if the GPE is one of the FADT-defined GPEs.
166 *
167 ******************************************************************************/
168
169acpi_status
170acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
171 struct acpi_gpe_block_info *gpe_block, void *context)
172{
173 struct acpi_gpe_device_info *info = context;
174
175 /* Increment Index by the number of GPEs in this block */
176
177 info->next_block_base_index += gpe_block->gpe_count;
178
179 if (info->index < info->next_block_base_index) {
180 /*
181 * The GPE index is within this block, get the node. Leave the node
182 * NULL for the FADT-defined GPEs
183 */
184 if ((gpe_block->node)->type == ACPI_TYPE_DEVICE) {
185 info->gpe_device = gpe_block->node;
186 }
187
188 info->status = AE_OK;
189 return (AE_CTRL_END);
190 }
191
192 return (AE_OK);
193}
194
195/*******************************************************************************
196 *
197 * FUNCTION: acpi_ev_get_gpe_xrupt_block
198 *
199 * PARAMETERS: interrupt_number - Interrupt for a GPE block
200 *
201 * RETURN: A GPE interrupt block
202 *
203 * DESCRIPTION: Get or Create a GPE interrupt block. There is one interrupt
204 * block per unique interrupt level used for GPEs. Should be
205 * called only when the GPE lists are semaphore locked and not
206 * subject to change.
207 *
208 ******************************************************************************/
209
210struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32 interrupt_number)
211{
212 struct acpi_gpe_xrupt_info *next_gpe_xrupt;
213 struct acpi_gpe_xrupt_info *gpe_xrupt;
214 acpi_status status;
215 acpi_cpu_flags flags;
216
217 ACPI_FUNCTION_TRACE(ev_get_gpe_xrupt_block);
218
219 /* No need for lock since we are not changing any list elements here */
220
221 next_gpe_xrupt = acpi_gbl_gpe_xrupt_list_head;
222 while (next_gpe_xrupt) {
223 if (next_gpe_xrupt->interrupt_number == interrupt_number) {
224 return_PTR(next_gpe_xrupt);
225 }
226
227 next_gpe_xrupt = next_gpe_xrupt->next;
228 }
229
230 /* Not found, must allocate a new xrupt descriptor */
231
232 gpe_xrupt = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_xrupt_info));
233 if (!gpe_xrupt) {
234 return_PTR(NULL);
235 }
236
237 gpe_xrupt->interrupt_number = interrupt_number;
238
239 /* Install new interrupt descriptor with spin lock */
240
241 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
242 if (acpi_gbl_gpe_xrupt_list_head) {
243 next_gpe_xrupt = acpi_gbl_gpe_xrupt_list_head;
244 while (next_gpe_xrupt->next) {
245 next_gpe_xrupt = next_gpe_xrupt->next;
246 }
247
248 next_gpe_xrupt->next = gpe_xrupt;
249 gpe_xrupt->previous = next_gpe_xrupt;
250 } else {
251 acpi_gbl_gpe_xrupt_list_head = gpe_xrupt;
252 }
253 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
254
255 /* Install new interrupt handler if not SCI_INT */
256
257 if (interrupt_number != acpi_gbl_FADT.sci_interrupt) {
258 status = acpi_os_install_interrupt_handler(interrupt_number,
259 acpi_ev_gpe_xrupt_handler,
260 gpe_xrupt);
261 if (ACPI_FAILURE(status)) {
262 ACPI_ERROR((AE_INFO,
263 "Could not install GPE interrupt handler at level 0x%X",
264 interrupt_number));
265 return_PTR(NULL);
266 }
267 }
268
269 return_PTR(gpe_xrupt);
270}
271
272/*******************************************************************************
273 *
274 * FUNCTION: acpi_ev_delete_gpe_xrupt
275 *
276 * PARAMETERS: gpe_xrupt - A GPE interrupt info block
277 *
278 * RETURN: Status
279 *
280 * DESCRIPTION: Remove and free a gpe_xrupt block. Remove an associated
281 * interrupt handler if not the SCI interrupt.
282 *
283 ******************************************************************************/
284
285acpi_status acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt)
286{
287 acpi_status status;
288 acpi_cpu_flags flags;
289
290 ACPI_FUNCTION_TRACE(ev_delete_gpe_xrupt);
291
292 /* We never want to remove the SCI interrupt handler */
293
294 if (gpe_xrupt->interrupt_number == acpi_gbl_FADT.sci_interrupt) {
295 gpe_xrupt->gpe_block_list_head = NULL;
296 return_ACPI_STATUS(AE_OK);
297 }
298
299 /* Disable this interrupt */
300
301 status =
302 acpi_os_remove_interrupt_handler(gpe_xrupt->interrupt_number,
303 acpi_ev_gpe_xrupt_handler);
304 if (ACPI_FAILURE(status)) {
305 return_ACPI_STATUS(status);
306 }
307
308 /* Unlink the interrupt block with lock */
309
310 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
311 if (gpe_xrupt->previous) {
312 gpe_xrupt->previous->next = gpe_xrupt->next;
313 } else {
314 /* No previous, update list head */
315
316 acpi_gbl_gpe_xrupt_list_head = gpe_xrupt->next;
317 }
318
319 if (gpe_xrupt->next) {
320 gpe_xrupt->next->previous = gpe_xrupt->previous;
321 }
322 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
323
324 /* Free the block */
325
326 ACPI_FREE(gpe_xrupt);
327 return_ACPI_STATUS(AE_OK);
328}
329
330/*******************************************************************************
331 *
332 * FUNCTION: acpi_ev_delete_gpe_handlers
333 *
334 * PARAMETERS: gpe_xrupt_info - GPE Interrupt info
335 * gpe_block - Gpe Block info
336 *
337 * RETURN: Status
338 *
339 * DESCRIPTION: Delete all Handler objects found in the GPE data structs.
340 * Used only prior to termination.
341 *
342 ******************************************************************************/
343
344acpi_status
345acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
346 struct acpi_gpe_block_info *gpe_block,
347 void *context)
348{
349 struct acpi_gpe_event_info *gpe_event_info;
350 u32 i;
351 u32 j;
352
353 ACPI_FUNCTION_TRACE(ev_delete_gpe_handlers);
354
355 /* Examine each GPE Register within the block */
356
357 for (i = 0; i < gpe_block->register_count; i++) {
358
359 /* Now look at the individual GPEs in this byte register */
360
361 for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
362 gpe_event_info = &gpe_block->event_info[((acpi_size) i *
363 ACPI_GPE_REGISTER_WIDTH)
364 + j];
365
366 if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
367 ACPI_GPE_DISPATCH_HANDLER) {
368 ACPI_FREE(gpe_event_info->dispatch.handler);
369 gpe_event_info->dispatch.handler = NULL;
370 gpe_event_info->flags &=
371 ~ACPI_GPE_DISPATCH_MASK;
372 }
373 }
374 }
375
376 return_ACPI_STATUS(AE_OK);
377}
378
379#endif /* !ACPI_REDUCED_HARDWARE */