Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
  2/******************************************************************************
  3 *
  4 * Module Name: evgpeutil - GPE utilities
  5 *
  6 * Copyright (C) 2000 - 2023, Intel Corp.
  7 *
  8 *****************************************************************************/
  9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 10#include <acpi/acpi.h>
 11#include "accommon.h"
 12#include "acevents.h"
 13
 14#define _COMPONENT          ACPI_EVENTS
 15ACPI_MODULE_NAME("evgpeutil")
 16
 17#if (!ACPI_REDUCED_HARDWARE)	/* Entire module */
 18/*******************************************************************************
 19 *
 20 * FUNCTION:    acpi_ev_walk_gpe_list
 21 *
 22 * PARAMETERS:  gpe_walk_callback   - Routine called for each GPE block
 23 *              context             - Value passed to callback
 24 *
 25 * RETURN:      Status
 26 *
 27 * DESCRIPTION: Walk the GPE lists.
 28 *
 29 ******************************************************************************/
 30acpi_status
 31acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback, void *context)
 32{
 33	struct acpi_gpe_block_info *gpe_block;
 34	struct acpi_gpe_xrupt_info *gpe_xrupt_info;
 35	acpi_status status = AE_OK;
 36	acpi_cpu_flags flags;
 37
 38	ACPI_FUNCTION_TRACE(ev_walk_gpe_list);
 39
 40	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
 41
 42	/* Walk the interrupt level descriptor list */
 43
 44	gpe_xrupt_info = acpi_gbl_gpe_xrupt_list_head;
 45	while (gpe_xrupt_info) {
 46
 47		/* Walk all Gpe Blocks attached to this interrupt level */
 48
 49		gpe_block = gpe_xrupt_info->gpe_block_list_head;
 50		while (gpe_block) {
 51
 52			/* One callback per GPE block */
 53
 54			status =
 55			    gpe_walk_callback(gpe_xrupt_info, gpe_block,
 56					      context);
 57			if (ACPI_FAILURE(status)) {
 58				if (status == AE_CTRL_END) {	/* Callback abort */
 59					status = AE_OK;
 60				}
 61				goto unlock_and_exit;
 62			}
 63
 64			gpe_block = gpe_block->next;
 65		}
 66
 67		gpe_xrupt_info = gpe_xrupt_info->next;
 68	}
 69
 70unlock_and_exit:
 71	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
 72	return_ACPI_STATUS(status);
 73}
 74
 75/*******************************************************************************
 76 *
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 77 * FUNCTION:    acpi_ev_get_gpe_device
 78 *
 79 * PARAMETERS:  GPE_WALK_CALLBACK
 80 *
 81 * RETURN:      Status
 82 *
 83 * DESCRIPTION: Matches the input GPE index (0-current_gpe_count) with a GPE
 84 *              block device. NULL if the GPE is one of the FADT-defined GPEs.
 85 *
 86 ******************************************************************************/
 87
 88acpi_status
 89acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
 90		       struct acpi_gpe_block_info *gpe_block, void *context)
 91{
 92	struct acpi_gpe_device_info *info = context;
 93
 94	/* Increment Index by the number of GPEs in this block */
 95
 96	info->next_block_base_index += gpe_block->gpe_count;
 97
 98	if (info->index < info->next_block_base_index) {
 99		/*
100		 * The GPE index is within this block, get the node. Leave the node
101		 * NULL for the FADT-defined GPEs
102		 */
103		if ((gpe_block->node)->type == ACPI_TYPE_DEVICE) {
104			info->gpe_device = gpe_block->node;
105		}
106
107		info->status = AE_OK;
108		return (AE_CTRL_END);
109	}
110
111	return (AE_OK);
112}
113
114/*******************************************************************************
115 *
116 * FUNCTION:    acpi_ev_get_gpe_xrupt_block
117 *
118 * PARAMETERS:  interrupt_number            - Interrupt for a GPE block
119 *              gpe_xrupt_block             - Where the block is returned
120 *
121 * RETURN:      Status
122 *
123 * DESCRIPTION: Get or Create a GPE interrupt block. There is one interrupt
124 *              block per unique interrupt level used for GPEs. Should be
125 *              called only when the GPE lists are semaphore locked and not
126 *              subject to change.
127 *
128 ******************************************************************************/
129
130acpi_status
131acpi_ev_get_gpe_xrupt_block(u32 interrupt_number,
132			    struct acpi_gpe_xrupt_info **gpe_xrupt_block)
133{
134	struct acpi_gpe_xrupt_info *next_gpe_xrupt;
135	struct acpi_gpe_xrupt_info *gpe_xrupt;
136	acpi_status status;
137	acpi_cpu_flags flags;
138
139	ACPI_FUNCTION_TRACE(ev_get_gpe_xrupt_block);
140
141	/* No need for lock since we are not changing any list elements here */
142
143	next_gpe_xrupt = acpi_gbl_gpe_xrupt_list_head;
144	while (next_gpe_xrupt) {
145		if (next_gpe_xrupt->interrupt_number == interrupt_number) {
146			*gpe_xrupt_block = next_gpe_xrupt;
147			return_ACPI_STATUS(AE_OK);
148		}
149
150		next_gpe_xrupt = next_gpe_xrupt->next;
151	}
152
153	/* Not found, must allocate a new xrupt descriptor */
154
155	gpe_xrupt = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_xrupt_info));
156	if (!gpe_xrupt) {
157		return_ACPI_STATUS(AE_NO_MEMORY);
158	}
159
160	gpe_xrupt->interrupt_number = interrupt_number;
161
162	/* Install new interrupt descriptor with spin lock */
163
164	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
165	if (acpi_gbl_gpe_xrupt_list_head) {
166		next_gpe_xrupt = acpi_gbl_gpe_xrupt_list_head;
167		while (next_gpe_xrupt->next) {
168			next_gpe_xrupt = next_gpe_xrupt->next;
169		}
170
171		next_gpe_xrupt->next = gpe_xrupt;
172		gpe_xrupt->previous = next_gpe_xrupt;
173	} else {
174		acpi_gbl_gpe_xrupt_list_head = gpe_xrupt;
175	}
176
177	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
178
179	/* Install new interrupt handler if not SCI_INT */
180
181	if (interrupt_number != acpi_gbl_FADT.sci_interrupt) {
182		status = acpi_os_install_interrupt_handler(interrupt_number,
183							   acpi_ev_gpe_xrupt_handler,
184							   gpe_xrupt);
185		if (ACPI_FAILURE(status)) {
186			ACPI_EXCEPTION((AE_INFO, status,
187					"Could not install GPE interrupt handler at level 0x%X",
188					interrupt_number));
189			return_ACPI_STATUS(status);
190		}
191	}
192
193	*gpe_xrupt_block = gpe_xrupt;
194	return_ACPI_STATUS(AE_OK);
195}
196
197/*******************************************************************************
198 *
199 * FUNCTION:    acpi_ev_delete_gpe_xrupt
200 *
201 * PARAMETERS:  gpe_xrupt       - A GPE interrupt info block
202 *
203 * RETURN:      Status
204 *
205 * DESCRIPTION: Remove and free a gpe_xrupt block. Remove an associated
206 *              interrupt handler if not the SCI interrupt.
207 *
208 ******************************************************************************/
209
210acpi_status acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt)
211{
212	acpi_status status;
213	acpi_cpu_flags flags;
214
215	ACPI_FUNCTION_TRACE(ev_delete_gpe_xrupt);
216
217	/* We never want to remove the SCI interrupt handler */
218
219	if (gpe_xrupt->interrupt_number == acpi_gbl_FADT.sci_interrupt) {
220		gpe_xrupt->gpe_block_list_head = NULL;
221		return_ACPI_STATUS(AE_OK);
222	}
223
224	/* Disable this interrupt */
225
226	status =
227	    acpi_os_remove_interrupt_handler(gpe_xrupt->interrupt_number,
228					     acpi_ev_gpe_xrupt_handler);
229	if (ACPI_FAILURE(status)) {
230		return_ACPI_STATUS(status);
231	}
232
233	/* Unlink the interrupt block with lock */
234
235	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
236	if (gpe_xrupt->previous) {
237		gpe_xrupt->previous->next = gpe_xrupt->next;
238	} else {
239		/* No previous, update list head */
240
241		acpi_gbl_gpe_xrupt_list_head = gpe_xrupt->next;
242	}
243
244	if (gpe_xrupt->next) {
245		gpe_xrupt->next->previous = gpe_xrupt->previous;
246	}
247	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
248
249	/* Free the block */
250
251	ACPI_FREE(gpe_xrupt);
252	return_ACPI_STATUS(AE_OK);
253}
254
255/*******************************************************************************
256 *
257 * FUNCTION:    acpi_ev_delete_gpe_handlers
258 *
259 * PARAMETERS:  gpe_xrupt_info      - GPE Interrupt info
260 *              gpe_block           - Gpe Block info
261 *
262 * RETURN:      Status
263 *
264 * DESCRIPTION: Delete all Handler objects found in the GPE data structs.
265 *              Used only prior to termination.
266 *
267 ******************************************************************************/
268
269acpi_status
270acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
271			    struct acpi_gpe_block_info *gpe_block,
272			    void *context)
273{
274	struct acpi_gpe_event_info *gpe_event_info;
275	struct acpi_gpe_notify_info *notify;
276	struct acpi_gpe_notify_info *next;
277	u32 i;
278	u32 j;
279
280	ACPI_FUNCTION_TRACE(ev_delete_gpe_handlers);
281
282	/* Examine each GPE Register within the block */
283
284	for (i = 0; i < gpe_block->register_count; i++) {
285
286		/* Now look at the individual GPEs in this byte register */
287
288		for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
289			gpe_event_info = &gpe_block->event_info[((acpi_size)i *
290								 ACPI_GPE_REGISTER_WIDTH)
291								+ j];
292
293			if ((ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) ==
294			     ACPI_GPE_DISPATCH_HANDLER) ||
295			    (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) ==
296			     ACPI_GPE_DISPATCH_RAW_HANDLER)) {
297
298				/* Delete an installed handler block */
299
300				ACPI_FREE(gpe_event_info->dispatch.handler);
301				gpe_event_info->dispatch.handler = NULL;
302				gpe_event_info->flags &=
303				    ~ACPI_GPE_DISPATCH_MASK;
304			} else if (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags)
305				   == ACPI_GPE_DISPATCH_NOTIFY) {
306
307				/* Delete the implicit notification device list */
308
309				notify = gpe_event_info->dispatch.notify_list;
310				while (notify) {
311					next = notify->next;
312					ACPI_FREE(notify);
313					notify = next;
314				}
315
316				gpe_event_info->dispatch.notify_list = NULL;
317				gpe_event_info->flags &=
318				    ~ACPI_GPE_DISPATCH_MASK;
319			}
320		}
321	}
322
323	return_ACPI_STATUS(AE_OK);
324}
325
326#endif				/* !ACPI_REDUCED_HARDWARE */
v3.1
 
  1/******************************************************************************
  2 *
  3 * Module Name: evgpeutil - GPE utilities
  4 *
 
 
  5 *****************************************************************************/
  6
  7/*
  8 * Copyright (C) 2000 - 2011, Intel Corp.
  9 * All rights reserved.
 10 *
 11 * Redistribution and use in source and binary forms, with or without
 12 * modification, are permitted provided that the following conditions
 13 * are met:
 14 * 1. Redistributions of source code must retain the above copyright
 15 *    notice, this list of conditions, and the following disclaimer,
 16 *    without modification.
 17 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
 18 *    substantially similar to the "NO WARRANTY" disclaimer below
 19 *    ("Disclaimer") and any redistribution must be conditioned upon
 20 *    including a substantially similar Disclaimer requirement for further
 21 *    binary redistribution.
 22 * 3. Neither the names of the above-listed copyright holders nor the names
 23 *    of any contributors may be used to endorse or promote products derived
 24 *    from this software without specific prior written permission.
 25 *
 26 * Alternatively, this software may be distributed under the terms of the
 27 * GNU General Public License ("GPL") version 2 as published by the Free
 28 * Software Foundation.
 29 *
 30 * NO WARRANTY
 31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
 34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 35 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
 36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
 37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
 38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
 39 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
 40 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 41 * POSSIBILITY OF SUCH DAMAGES.
 42 */
 43
 44#include <acpi/acpi.h>
 45#include "accommon.h"
 46#include "acevents.h"
 47
 48#define _COMPONENT          ACPI_EVENTS
 49ACPI_MODULE_NAME("evgpeutil")
 50
 
 51/*******************************************************************************
 52 *
 53 * FUNCTION:    acpi_ev_walk_gpe_list
 54 *
 55 * PARAMETERS:  gpe_walk_callback   - Routine called for each GPE block
 56 *              Context             - Value passed to callback
 57 *
 58 * RETURN:      Status
 59 *
 60 * DESCRIPTION: Walk the GPE lists.
 61 *
 62 ******************************************************************************/
 63acpi_status
 64acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback, void *context)
 65{
 66	struct acpi_gpe_block_info *gpe_block;
 67	struct acpi_gpe_xrupt_info *gpe_xrupt_info;
 68	acpi_status status = AE_OK;
 69	acpi_cpu_flags flags;
 70
 71	ACPI_FUNCTION_TRACE(ev_walk_gpe_list);
 72
 73	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
 74
 75	/* Walk the interrupt level descriptor list */
 76
 77	gpe_xrupt_info = acpi_gbl_gpe_xrupt_list_head;
 78	while (gpe_xrupt_info) {
 79
 80		/* Walk all Gpe Blocks attached to this interrupt level */
 81
 82		gpe_block = gpe_xrupt_info->gpe_block_list_head;
 83		while (gpe_block) {
 84
 85			/* One callback per GPE block */
 86
 87			status =
 88			    gpe_walk_callback(gpe_xrupt_info, gpe_block,
 89					      context);
 90			if (ACPI_FAILURE(status)) {
 91				if (status == AE_CTRL_END) {	/* Callback abort */
 92					status = AE_OK;
 93				}
 94				goto unlock_and_exit;
 95			}
 96
 97			gpe_block = gpe_block->next;
 98		}
 99
100		gpe_xrupt_info = gpe_xrupt_info->next;
101	}
102
103      unlock_and_exit:
104	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
105	return_ACPI_STATUS(status);
106}
107
108/*******************************************************************************
109 *
110 * FUNCTION:    acpi_ev_valid_gpe_event
111 *
112 * PARAMETERS:  gpe_event_info              - Info for this GPE
113 *
114 * RETURN:      TRUE if the gpe_event is valid
115 *
116 * DESCRIPTION: Validate a GPE event. DO NOT CALL FROM INTERRUPT LEVEL.
117 *              Should be called only when the GPE lists are semaphore locked
118 *              and not subject to change.
119 *
120 ******************************************************************************/
121
122u8 acpi_ev_valid_gpe_event(struct acpi_gpe_event_info *gpe_event_info)
123{
124	struct acpi_gpe_xrupt_info *gpe_xrupt_block;
125	struct acpi_gpe_block_info *gpe_block;
126
127	ACPI_FUNCTION_ENTRY();
128
129	/* No need for spin lock since we are not changing any list elements */
130
131	/* Walk the GPE interrupt levels */
132
133	gpe_xrupt_block = acpi_gbl_gpe_xrupt_list_head;
134	while (gpe_xrupt_block) {
135		gpe_block = gpe_xrupt_block->gpe_block_list_head;
136
137		/* Walk the GPE blocks on this interrupt level */
138
139		while (gpe_block) {
140			if ((&gpe_block->event_info[0] <= gpe_event_info) &&
141			    (&gpe_block->event_info[gpe_block->gpe_count] >
142			     gpe_event_info)) {
143				return (TRUE);
144			}
145
146			gpe_block = gpe_block->next;
147		}
148
149		gpe_xrupt_block = gpe_xrupt_block->next;
150	}
151
152	return (FALSE);
153}
154
155/*******************************************************************************
156 *
157 * FUNCTION:    acpi_ev_get_gpe_device
158 *
159 * PARAMETERS:  GPE_WALK_CALLBACK
160 *
161 * RETURN:      Status
162 *
163 * DESCRIPTION: Matches the input GPE index (0-current_gpe_count) with a GPE
164 *              block device. NULL if the GPE is one of the FADT-defined GPEs.
165 *
166 ******************************************************************************/
167
168acpi_status
169acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
170		       struct acpi_gpe_block_info *gpe_block, void *context)
171{
172	struct acpi_gpe_device_info *info = context;
173
174	/* Increment Index by the number of GPEs in this block */
175
176	info->next_block_base_index += gpe_block->gpe_count;
177
178	if (info->index < info->next_block_base_index) {
179		/*
180		 * The GPE index is within this block, get the node. Leave the node
181		 * NULL for the FADT-defined GPEs
182		 */
183		if ((gpe_block->node)->type == ACPI_TYPE_DEVICE) {
184			info->gpe_device = gpe_block->node;
185		}
186
187		info->status = AE_OK;
188		return (AE_CTRL_END);
189	}
190
191	return (AE_OK);
192}
193
194/*******************************************************************************
195 *
196 * FUNCTION:    acpi_ev_get_gpe_xrupt_block
197 *
198 * PARAMETERS:  interrupt_number     - Interrupt for a GPE block
 
199 *
200 * RETURN:      A GPE interrupt block
201 *
202 * DESCRIPTION: Get or Create a GPE interrupt block. There is one interrupt
203 *              block per unique interrupt level used for GPEs. Should be
204 *              called only when the GPE lists are semaphore locked and not
205 *              subject to change.
206 *
207 ******************************************************************************/
208
209struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32 interrupt_number)
 
 
210{
211	struct acpi_gpe_xrupt_info *next_gpe_xrupt;
212	struct acpi_gpe_xrupt_info *gpe_xrupt;
213	acpi_status status;
214	acpi_cpu_flags flags;
215
216	ACPI_FUNCTION_TRACE(ev_get_gpe_xrupt_block);
217
218	/* No need for lock since we are not changing any list elements here */
219
220	next_gpe_xrupt = acpi_gbl_gpe_xrupt_list_head;
221	while (next_gpe_xrupt) {
222		if (next_gpe_xrupt->interrupt_number == interrupt_number) {
223			return_PTR(next_gpe_xrupt);
 
224		}
225
226		next_gpe_xrupt = next_gpe_xrupt->next;
227	}
228
229	/* Not found, must allocate a new xrupt descriptor */
230
231	gpe_xrupt = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_xrupt_info));
232	if (!gpe_xrupt) {
233		return_PTR(NULL);
234	}
235
236	gpe_xrupt->interrupt_number = interrupt_number;
237
238	/* Install new interrupt descriptor with spin lock */
239
240	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
241	if (acpi_gbl_gpe_xrupt_list_head) {
242		next_gpe_xrupt = acpi_gbl_gpe_xrupt_list_head;
243		while (next_gpe_xrupt->next) {
244			next_gpe_xrupt = next_gpe_xrupt->next;
245		}
246
247		next_gpe_xrupt->next = gpe_xrupt;
248		gpe_xrupt->previous = next_gpe_xrupt;
249	} else {
250		acpi_gbl_gpe_xrupt_list_head = gpe_xrupt;
251	}
 
252	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
253
254	/* Install new interrupt handler if not SCI_INT */
255
256	if (interrupt_number != acpi_gbl_FADT.sci_interrupt) {
257		status = acpi_os_install_interrupt_handler(interrupt_number,
258							   acpi_ev_gpe_xrupt_handler,
259							   gpe_xrupt);
260		if (ACPI_FAILURE(status)) {
261			ACPI_ERROR((AE_INFO,
262				    "Could not install GPE interrupt handler at level 0x%X",
263				    interrupt_number));
264			return_PTR(NULL);
265		}
266	}
267
268	return_PTR(gpe_xrupt);
 
269}
270
271/*******************************************************************************
272 *
273 * FUNCTION:    acpi_ev_delete_gpe_xrupt
274 *
275 * PARAMETERS:  gpe_xrupt       - A GPE interrupt info block
276 *
277 * RETURN:      Status
278 *
279 * DESCRIPTION: Remove and free a gpe_xrupt block. Remove an associated
280 *              interrupt handler if not the SCI interrupt.
281 *
282 ******************************************************************************/
283
284acpi_status acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt)
285{
286	acpi_status status;
287	acpi_cpu_flags flags;
288
289	ACPI_FUNCTION_TRACE(ev_delete_gpe_xrupt);
290
291	/* We never want to remove the SCI interrupt handler */
292
293	if (gpe_xrupt->interrupt_number == acpi_gbl_FADT.sci_interrupt) {
294		gpe_xrupt->gpe_block_list_head = NULL;
295		return_ACPI_STATUS(AE_OK);
296	}
297
298	/* Disable this interrupt */
299
300	status =
301	    acpi_os_remove_interrupt_handler(gpe_xrupt->interrupt_number,
302					     acpi_ev_gpe_xrupt_handler);
303	if (ACPI_FAILURE(status)) {
304		return_ACPI_STATUS(status);
305	}
306
307	/* Unlink the interrupt block with lock */
308
309	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
310	if (gpe_xrupt->previous) {
311		gpe_xrupt->previous->next = gpe_xrupt->next;
312	} else {
313		/* No previous, update list head */
314
315		acpi_gbl_gpe_xrupt_list_head = gpe_xrupt->next;
316	}
317
318	if (gpe_xrupt->next) {
319		gpe_xrupt->next->previous = gpe_xrupt->previous;
320	}
321	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
322
323	/* Free the block */
324
325	ACPI_FREE(gpe_xrupt);
326	return_ACPI_STATUS(AE_OK);
327}
328
329/*******************************************************************************
330 *
331 * FUNCTION:    acpi_ev_delete_gpe_handlers
332 *
333 * PARAMETERS:  gpe_xrupt_info      - GPE Interrupt info
334 *              gpe_block           - Gpe Block info
335 *
336 * RETURN:      Status
337 *
338 * DESCRIPTION: Delete all Handler objects found in the GPE data structs.
339 *              Used only prior to termination.
340 *
341 ******************************************************************************/
342
343acpi_status
344acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
345			    struct acpi_gpe_block_info *gpe_block,
346			    void *context)
347{
348	struct acpi_gpe_event_info *gpe_event_info;
 
 
349	u32 i;
350	u32 j;
351
352	ACPI_FUNCTION_TRACE(ev_delete_gpe_handlers);
353
354	/* Examine each GPE Register within the block */
355
356	for (i = 0; i < gpe_block->register_count; i++) {
357
358		/* Now look at the individual GPEs in this byte register */
359
360		for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
361			gpe_event_info = &gpe_block->event_info[((acpi_size) i *
362								 ACPI_GPE_REGISTER_WIDTH)
363								+ j];
364
365			if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
366			    ACPI_GPE_DISPATCH_HANDLER) {
 
 
 
 
 
367				ACPI_FREE(gpe_event_info->dispatch.handler);
368				gpe_event_info->dispatch.handler = NULL;
369				gpe_event_info->flags &=
370				    ~ACPI_GPE_DISPATCH_MASK;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
371			}
372		}
373	}
374
375	return_ACPI_STATUS(AE_OK);
376}