Loading...
1/******************************************************************************
2 *
3 * Module Name: evgpeinit - System GPE initialization and update
4 *
5 *****************************************************************************/
6
7/*
8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions, and the following disclaimer,
16 * without modification.
17 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
18 * substantially similar to the "NO WARRANTY" disclaimer below
19 * ("Disclaimer") and any redistribution must be conditioned upon
20 * including a substantially similar Disclaimer requirement for further
21 * binary redistribution.
22 * 3. Neither the names of the above-listed copyright holders nor the names
23 * of any contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * Alternatively, this software may be distributed under the terms of the
27 * GNU General Public License ("GPL") version 2 as published by the Free
28 * Software Foundation.
29 *
30 * NO WARRANTY
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
39 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
40 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGES.
42 */
43
44#include <acpi/acpi.h>
45#include "accommon.h"
46#include "acevents.h"
47#include "acnamesp.h"
48
49#define _COMPONENT ACPI_EVENTS
50ACPI_MODULE_NAME("evgpeinit")
51#if (!ACPI_REDUCED_HARDWARE) /* Entire module */
52/*
53 * Note: History of _PRW support in ACPICA
54 *
55 * Originally (2000 - 2010), the GPE initialization code performed a walk of
56 * the entire namespace to execute the _PRW methods and detect all GPEs
57 * capable of waking the system.
58 *
59 * As of 10/2010, the _PRW method execution has been removed since it is
60 * actually unnecessary. The host OS must in fact execute all _PRW methods
61 * in order to identify the device/power-resource dependencies. We now put
62 * the onus on the host OS to identify the wake GPEs as part of this process
63 * and to inform ACPICA of these GPEs via the acpi_setup_gpe_for_wake interface. This
64 * not only reduces the complexity of the ACPICA initialization code, but in
65 * some cases (on systems with very large namespaces) it should reduce the
66 * kernel boot time as well.
67 */
68
69/*******************************************************************************
70 *
71 * FUNCTION: acpi_ev_gpe_initialize
72 *
73 * PARAMETERS: None
74 *
75 * RETURN: Status
76 *
77 * DESCRIPTION: Initialize the GPE data structures and the FADT GPE 0/1 blocks
78 *
79 ******************************************************************************/
80acpi_status acpi_ev_gpe_initialize(void)
81{
82 u32 register_count0 = 0;
83 u32 register_count1 = 0;
84 u32 gpe_number_max = 0;
85 acpi_status status;
86
87 ACPI_FUNCTION_TRACE(ev_gpe_initialize);
88
89 status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
90 if (ACPI_FAILURE(status)) {
91 return_ACPI_STATUS(status);
92 }
93
94 /*
95 * Initialize the GPE Block(s) defined in the FADT
96 *
97 * Why the GPE register block lengths are divided by 2: From the ACPI
98 * Spec, section "General-Purpose Event Registers", we have:
99 *
100 * "Each register block contains two registers of equal length
101 * GPEx_STS and GPEx_EN (where x is 0 or 1). The length of the
102 * GPE0_STS and GPE0_EN registers is equal to half the GPE0_LEN
103 * The length of the GPE1_STS and GPE1_EN registers is equal to
104 * half the GPE1_LEN. If a generic register block is not supported
105 * then its respective block pointer and block length values in the
106 * FADT table contain zeros. The GPE0_LEN and GPE1_LEN do not need
107 * to be the same size."
108 */
109
110 /*
111 * Determine the maximum GPE number for this machine.
112 *
113 * Note: both GPE0 and GPE1 are optional, and either can exist without
114 * the other.
115 *
116 * If EITHER the register length OR the block address are zero, then that
117 * particular block is not supported.
118 */
119 if (acpi_gbl_FADT.gpe0_block_length &&
120 acpi_gbl_FADT.xgpe0_block.address) {
121
122 /* GPE block 0 exists (has both length and address > 0) */
123
124 register_count0 = (u16)(acpi_gbl_FADT.gpe0_block_length / 2);
125
126 gpe_number_max =
127 (register_count0 * ACPI_GPE_REGISTER_WIDTH) - 1;
128
129 /* Install GPE Block 0 */
130
131 status = acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device,
132 &acpi_gbl_FADT.xgpe0_block,
133 register_count0, 0,
134 acpi_gbl_FADT.sci_interrupt,
135 &acpi_gbl_gpe_fadt_blocks[0]);
136
137 if (ACPI_FAILURE(status)) {
138 ACPI_EXCEPTION((AE_INFO, status,
139 "Could not create GPE Block 0"));
140 }
141 }
142
143 if (acpi_gbl_FADT.gpe1_block_length &&
144 acpi_gbl_FADT.xgpe1_block.address) {
145
146 /* GPE block 1 exists (has both length and address > 0) */
147
148 register_count1 = (u16)(acpi_gbl_FADT.gpe1_block_length / 2);
149
150 /* Check for GPE0/GPE1 overlap (if both banks exist) */
151
152 if ((register_count0) &&
153 (gpe_number_max >= acpi_gbl_FADT.gpe1_base)) {
154 ACPI_ERROR((AE_INFO,
155 "GPE0 block (GPE 0 to %u) overlaps the GPE1 block "
156 "(GPE %u to %u) - Ignoring GPE1",
157 gpe_number_max, acpi_gbl_FADT.gpe1_base,
158 acpi_gbl_FADT.gpe1_base +
159 ((register_count1 *
160 ACPI_GPE_REGISTER_WIDTH) - 1)));
161
162 /* Ignore GPE1 block by setting the register count to zero */
163
164 register_count1 = 0;
165 } else {
166 /* Install GPE Block 1 */
167
168 status =
169 acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device,
170 &acpi_gbl_FADT.xgpe1_block,
171 register_count1,
172 acpi_gbl_FADT.gpe1_base,
173 acpi_gbl_FADT.
174 sci_interrupt,
175 &acpi_gbl_gpe_fadt_blocks
176 [1]);
177
178 if (ACPI_FAILURE(status)) {
179 ACPI_EXCEPTION((AE_INFO, status,
180 "Could not create GPE Block 1"));
181 }
182
183 /*
184 * GPE0 and GPE1 do not have to be contiguous in the GPE number
185 * space. However, GPE0 always starts at GPE number zero.
186 */
187 gpe_number_max = acpi_gbl_FADT.gpe1_base +
188 ((register_count1 * ACPI_GPE_REGISTER_WIDTH) - 1);
189 }
190 }
191
192 /* Exit if there are no GPE registers */
193
194 if ((register_count0 + register_count1) == 0) {
195
196 /* GPEs are not required by ACPI, this is OK */
197
198 ACPI_DEBUG_PRINT((ACPI_DB_INIT,
199 "There are no GPE blocks defined in the FADT\n"));
200 status = AE_OK;
201 goto cleanup;
202 }
203
204 /* Check for Max GPE number out-of-range */
205
206 if (gpe_number_max > ACPI_GPE_MAX) {
207 ACPI_ERROR((AE_INFO,
208 "Maximum GPE number from FADT is too large: 0x%X",
209 gpe_number_max));
210 status = AE_BAD_VALUE;
211 goto cleanup;
212 }
213
214 cleanup:
215 (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
216 return_ACPI_STATUS(AE_OK);
217}
218
219/*******************************************************************************
220 *
221 * FUNCTION: acpi_ev_update_gpes
222 *
223 * PARAMETERS: table_owner_id - ID of the newly-loaded ACPI table
224 *
225 * RETURN: None
226 *
227 * DESCRIPTION: Check for new GPE methods (_Lxx/_Exx) made available as a
228 * result of a Load() or load_table() operation. If new GPE
229 * methods have been installed, register the new methods.
230 *
231 ******************************************************************************/
232
233void acpi_ev_update_gpes(acpi_owner_id table_owner_id)
234{
235 struct acpi_gpe_xrupt_info *gpe_xrupt_info;
236 struct acpi_gpe_block_info *gpe_block;
237 struct acpi_gpe_walk_info walk_info;
238 acpi_status status = AE_OK;
239
240 /*
241 * Find any _Lxx/_Exx GPE methods that have just been loaded.
242 *
243 * Any GPEs that correspond to new _Lxx/_Exx methods are immediately
244 * enabled.
245 *
246 * Examine the namespace underneath each gpe_device within the
247 * gpe_block lists.
248 */
249 status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
250 if (ACPI_FAILURE(status)) {
251 return;
252 }
253
254 walk_info.count = 0;
255 walk_info.owner_id = table_owner_id;
256 walk_info.execute_by_owner_id = TRUE;
257
258 /* Walk the interrupt level descriptor list */
259
260 gpe_xrupt_info = acpi_gbl_gpe_xrupt_list_head;
261 while (gpe_xrupt_info) {
262
263 /* Walk all Gpe Blocks attached to this interrupt level */
264
265 gpe_block = gpe_xrupt_info->gpe_block_list_head;
266 while (gpe_block) {
267 walk_info.gpe_block = gpe_block;
268 walk_info.gpe_device = gpe_block->node;
269
270 status = acpi_ns_walk_namespace(ACPI_TYPE_METHOD,
271 walk_info.gpe_device,
272 ACPI_UINT32_MAX,
273 ACPI_NS_WALK_NO_UNLOCK,
274 acpi_ev_match_gpe_method,
275 NULL, &walk_info, NULL);
276 if (ACPI_FAILURE(status)) {
277 ACPI_EXCEPTION((AE_INFO, status,
278 "While decoding _Lxx/_Exx methods"));
279 }
280
281 gpe_block = gpe_block->next;
282 }
283
284 gpe_xrupt_info = gpe_xrupt_info->next;
285 }
286
287 if (walk_info.count) {
288 ACPI_INFO((AE_INFO, "Enabled %u new GPEs", walk_info.count));
289 }
290
291 (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
292 return;
293}
294
295/*******************************************************************************
296 *
297 * FUNCTION: acpi_ev_match_gpe_method
298 *
299 * PARAMETERS: Callback from walk_namespace
300 *
301 * RETURN: Status
302 *
303 * DESCRIPTION: Called from acpi_walk_namespace. Expects each object to be a
304 * control method under the _GPE portion of the namespace.
305 * Extract the name and GPE type from the object, saving this
306 * information for quick lookup during GPE dispatch. Allows a
307 * per-owner_id evaluation if execute_by_owner_id is TRUE in the
308 * walk_info parameter block.
309 *
310 * The name of each GPE control method is of the form:
311 * "_Lxx" or "_Exx", where:
312 * L - means that the GPE is level triggered
313 * E - means that the GPE is edge triggered
314 * xx - is the GPE number [in HEX]
315 *
316 * If walk_info->execute_by_owner_id is TRUE, we only execute examine GPE methods
317 * with that owner.
318 *
319 ******************************************************************************/
320
321acpi_status
322acpi_ev_match_gpe_method(acpi_handle obj_handle,
323 u32 level, void *context, void **return_value)
324{
325 struct acpi_namespace_node *method_node =
326 ACPI_CAST_PTR(struct acpi_namespace_node, obj_handle);
327 struct acpi_gpe_walk_info *walk_info =
328 ACPI_CAST_PTR(struct acpi_gpe_walk_info, context);
329 struct acpi_gpe_event_info *gpe_event_info;
330 u32 gpe_number;
331 char name[ACPI_NAME_SIZE + 1];
332 u8 type;
333
334 ACPI_FUNCTION_TRACE(ev_match_gpe_method);
335
336 /* Check if requested owner_id matches this owner_id */
337
338 if ((walk_info->execute_by_owner_id) &&
339 (method_node->owner_id != walk_info->owner_id)) {
340 return_ACPI_STATUS(AE_OK);
341 }
342
343 /*
344 * Match and decode the _Lxx and _Exx GPE method names
345 *
346 * 1) Extract the method name and null terminate it
347 */
348 ACPI_MOVE_32_TO_32(name, &method_node->name.integer);
349 name[ACPI_NAME_SIZE] = 0;
350
351 /* 2) Name must begin with an underscore */
352
353 if (name[0] != '_') {
354 return_ACPI_STATUS(AE_OK); /* Ignore this method */
355 }
356
357 /*
358 * 3) Edge/Level determination is based on the 2nd character
359 * of the method name
360 */
361 switch (name[1]) {
362 case 'L':
363 type = ACPI_GPE_LEVEL_TRIGGERED;
364 break;
365
366 case 'E':
367 type = ACPI_GPE_EDGE_TRIGGERED;
368 break;
369
370 default:
371 /* Unknown method type, just ignore it */
372
373 ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
374 "Ignoring unknown GPE method type: %s "
375 "(name not of form _Lxx or _Exx)", name));
376 return_ACPI_STATUS(AE_OK);
377 }
378
379 /* 4) The last two characters of the name are the hex GPE Number */
380
381 gpe_number = ACPI_STRTOUL(&name[2], NULL, 16);
382 if (gpe_number == ACPI_UINT32_MAX) {
383
384 /* Conversion failed; invalid method, just ignore it */
385
386 ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
387 "Could not extract GPE number from name: %s "
388 "(name is not of form _Lxx or _Exx)", name));
389 return_ACPI_STATUS(AE_OK);
390 }
391
392 /* Ensure that we have a valid GPE number for this GPE block */
393
394 gpe_event_info =
395 acpi_ev_low_get_gpe_info(gpe_number, walk_info->gpe_block);
396 if (!gpe_event_info) {
397 /*
398 * This gpe_number is not valid for this GPE block, just ignore it.
399 * However, it may be valid for a different GPE block, since GPE0
400 * and GPE1 methods both appear under \_GPE.
401 */
402 return_ACPI_STATUS(AE_OK);
403 }
404
405 if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
406 ACPI_GPE_DISPATCH_HANDLER) {
407
408 /* If there is already a handler, ignore this GPE method */
409
410 return_ACPI_STATUS(AE_OK);
411 }
412
413 if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
414 ACPI_GPE_DISPATCH_METHOD) {
415 /*
416 * If there is already a method, ignore this method. But check
417 * for a type mismatch (if both the _Lxx AND _Exx exist)
418 */
419 if (type != (gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK)) {
420 ACPI_ERROR((AE_INFO,
421 "For GPE 0x%.2X, found both _L%2.2X and _E%2.2X methods",
422 gpe_number, gpe_number, gpe_number));
423 }
424 return_ACPI_STATUS(AE_OK);
425 }
426
427 /* Disable the GPE in case it's been enabled already. */
428 (void)acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE);
429
430 /*
431 * Add the GPE information from above to the gpe_event_info block for
432 * use during dispatch of this GPE.
433 */
434 gpe_event_info->flags &= ~(ACPI_GPE_DISPATCH_MASK);
435 gpe_event_info->flags |= (u8)(type | ACPI_GPE_DISPATCH_METHOD);
436 gpe_event_info->dispatch.method_node = method_node;
437
438 ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
439 "Registered GPE method %s as GPE number 0x%.2X\n",
440 name, gpe_number));
441 return_ACPI_STATUS(AE_OK);
442}
443
444#endif /* !ACPI_REDUCED_HARDWARE */
1// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2/******************************************************************************
3 *
4 * Module Name: evgpeinit - System GPE initialization and update
5 *
6 * Copyright (C) 2000 - 2022, Intel Corp.
7 *
8 *****************************************************************************/
9
10#include <acpi/acpi.h>
11#include "accommon.h"
12#include "acevents.h"
13#include "acnamesp.h"
14
15#define _COMPONENT ACPI_EVENTS
16ACPI_MODULE_NAME("evgpeinit")
17#if (!ACPI_REDUCED_HARDWARE) /* Entire module */
18/*
19 * Note: History of _PRW support in ACPICA
20 *
21 * Originally (2000 - 2010), the GPE initialization code performed a walk of
22 * the entire namespace to execute the _PRW methods and detect all GPEs
23 * capable of waking the system.
24 *
25 * As of 10/2010, the _PRW method execution has been removed since it is
26 * actually unnecessary. The host OS must in fact execute all _PRW methods
27 * in order to identify the device/power-resource dependencies. We now put
28 * the onus on the host OS to identify the wake GPEs as part of this process
29 * and to inform ACPICA of these GPEs via the acpi_setup_gpe_for_wake interface. This
30 * not only reduces the complexity of the ACPICA initialization code, but in
31 * some cases (on systems with very large namespaces) it should reduce the
32 * kernel boot time as well.
33 */
34
35#ifdef ACPI_GPE_USE_LOGICAL_ADDRESSES
36#define ACPI_FADT_GPE_BLOCK_ADDRESS(N) \
37 acpi_gbl_FADT.xgpe##N##_block.space_id == \
38 ACPI_ADR_SPACE_SYSTEM_MEMORY ? \
39 (u64)acpi_gbl_xgpe##N##_block_logical_address : \
40 acpi_gbl_FADT.xgpe##N##_block.address
41#else
42#define ACPI_FADT_GPE_BLOCK_ADDRESS(N) acpi_gbl_FADT.xgpe##N##_block.address
43#endif /* ACPI_GPE_USE_LOGICAL_ADDRESSES */
44
45/*******************************************************************************
46 *
47 * FUNCTION: acpi_ev_gpe_initialize
48 *
49 * PARAMETERS: None
50 *
51 * RETURN: Status
52 *
53 * DESCRIPTION: Initialize the GPE data structures and the FADT GPE 0/1 blocks
54 *
55 ******************************************************************************/
56acpi_status acpi_ev_gpe_initialize(void)
57{
58 u32 register_count0 = 0;
59 u32 register_count1 = 0;
60 u32 gpe_number_max = 0;
61 acpi_status status;
62 u64 address;
63
64 ACPI_FUNCTION_TRACE(ev_gpe_initialize);
65
66 ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
67 "Initializing General Purpose Events (GPEs):\n"));
68
69 status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
70 if (ACPI_FAILURE(status)) {
71 return_ACPI_STATUS(status);
72 }
73
74 /*
75 * Initialize the GPE Block(s) defined in the FADT
76 *
77 * Why the GPE register block lengths are divided by 2: From the ACPI
78 * Spec, section "General-Purpose Event Registers", we have:
79 *
80 * "Each register block contains two registers of equal length
81 * GPEx_STS and GPEx_EN (where x is 0 or 1). The length of the
82 * GPE0_STS and GPE0_EN registers is equal to half the GPE0_LEN
83 * The length of the GPE1_STS and GPE1_EN registers is equal to
84 * half the GPE1_LEN. If a generic register block is not supported
85 * then its respective block pointer and block length values in the
86 * FADT table contain zeros. The GPE0_LEN and GPE1_LEN do not need
87 * to be the same size."
88 */
89
90 /*
91 * Determine the maximum GPE number for this machine.
92 *
93 * Note: both GPE0 and GPE1 are optional, and either can exist without
94 * the other.
95 *
96 * If EITHER the register length OR the block address are zero, then that
97 * particular block is not supported.
98 */
99 address = ACPI_FADT_GPE_BLOCK_ADDRESS(0);
100
101 if (acpi_gbl_FADT.gpe0_block_length && address) {
102
103 /* GPE block 0 exists (has both length and address > 0) */
104
105 register_count0 = (u16)(acpi_gbl_FADT.gpe0_block_length / 2);
106 gpe_number_max =
107 (register_count0 * ACPI_GPE_REGISTER_WIDTH) - 1;
108
109 /* Install GPE Block 0 */
110
111 status = acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device,
112 address,
113 acpi_gbl_FADT.xgpe0_block.
114 space_id, register_count0, 0,
115 acpi_gbl_FADT.sci_interrupt,
116 &acpi_gbl_gpe_fadt_blocks[0]);
117
118 if (ACPI_FAILURE(status)) {
119 ACPI_EXCEPTION((AE_INFO, status,
120 "Could not create GPE Block 0"));
121 }
122 }
123
124 address = ACPI_FADT_GPE_BLOCK_ADDRESS(1);
125
126 if (acpi_gbl_FADT.gpe1_block_length && address) {
127
128 /* GPE block 1 exists (has both length and address > 0) */
129
130 register_count1 = (u16)(acpi_gbl_FADT.gpe1_block_length / 2);
131
132 /* Check for GPE0/GPE1 overlap (if both banks exist) */
133
134 if ((register_count0) &&
135 (gpe_number_max >= acpi_gbl_FADT.gpe1_base)) {
136 ACPI_ERROR((AE_INFO,
137 "GPE0 block (GPE 0 to %u) overlaps the GPE1 block "
138 "(GPE %u to %u) - Ignoring GPE1",
139 gpe_number_max, acpi_gbl_FADT.gpe1_base,
140 acpi_gbl_FADT.gpe1_base +
141 ((register_count1 *
142 ACPI_GPE_REGISTER_WIDTH) - 1)));
143
144 /* Ignore GPE1 block by setting the register count to zero */
145
146 register_count1 = 0;
147 } else {
148 /* Install GPE Block 1 */
149
150 status =
151 acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device,
152 address,
153 acpi_gbl_FADT.xgpe1_block.
154 space_id, register_count1,
155 acpi_gbl_FADT.gpe1_base,
156 acpi_gbl_FADT.
157 sci_interrupt,
158 &acpi_gbl_gpe_fadt_blocks
159 [1]);
160
161 if (ACPI_FAILURE(status)) {
162 ACPI_EXCEPTION((AE_INFO, status,
163 "Could not create GPE Block 1"));
164 }
165
166 /*
167 * GPE0 and GPE1 do not have to be contiguous in the GPE number
168 * space. However, GPE0 always starts at GPE number zero.
169 */
170 }
171 }
172
173 /* Exit if there are no GPE registers */
174
175 if ((register_count0 + register_count1) == 0) {
176
177 /* GPEs are not required by ACPI, this is OK */
178
179 ACPI_DEBUG_PRINT((ACPI_DB_INIT,
180 "There are no GPE blocks defined in the FADT\n"));
181 goto cleanup;
182 }
183
184cleanup:
185 (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
186 return_ACPI_STATUS(AE_OK);
187}
188
189/*******************************************************************************
190 *
191 * FUNCTION: acpi_ev_update_gpes
192 *
193 * PARAMETERS: table_owner_id - ID of the newly-loaded ACPI table
194 *
195 * RETURN: None
196 *
197 * DESCRIPTION: Check for new GPE methods (_Lxx/_Exx) made available as a
198 * result of a Load() or load_table() operation. If new GPE
199 * methods have been installed, register the new methods.
200 *
201 ******************************************************************************/
202
203void acpi_ev_update_gpes(acpi_owner_id table_owner_id)
204{
205 struct acpi_gpe_xrupt_info *gpe_xrupt_info;
206 struct acpi_gpe_block_info *gpe_block;
207 struct acpi_gpe_walk_info walk_info;
208 acpi_status status = AE_OK;
209
210 /*
211 * Find any _Lxx/_Exx GPE methods that have just been loaded.
212 *
213 * Any GPEs that correspond to new _Lxx/_Exx methods are immediately
214 * enabled.
215 *
216 * Examine the namespace underneath each gpe_device within the
217 * gpe_block lists.
218 */
219 status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
220 if (ACPI_FAILURE(status)) {
221 return;
222 }
223
224 walk_info.count = 0;
225 walk_info.owner_id = table_owner_id;
226 walk_info.execute_by_owner_id = TRUE;
227
228 /* Walk the interrupt level descriptor list */
229
230 gpe_xrupt_info = acpi_gbl_gpe_xrupt_list_head;
231 while (gpe_xrupt_info) {
232
233 /* Walk all Gpe Blocks attached to this interrupt level */
234
235 gpe_block = gpe_xrupt_info->gpe_block_list_head;
236 while (gpe_block) {
237 walk_info.gpe_block = gpe_block;
238 walk_info.gpe_device = gpe_block->node;
239
240 status = acpi_ns_walk_namespace(ACPI_TYPE_METHOD,
241 walk_info.gpe_device,
242 ACPI_UINT32_MAX,
243 ACPI_NS_WALK_NO_UNLOCK,
244 acpi_ev_match_gpe_method,
245 NULL, &walk_info, NULL);
246 if (ACPI_FAILURE(status)) {
247 ACPI_EXCEPTION((AE_INFO, status,
248 "While decoding _Lxx/_Exx methods"));
249 }
250
251 gpe_block = gpe_block->next;
252 }
253
254 gpe_xrupt_info = gpe_xrupt_info->next;
255 }
256
257 if (walk_info.count) {
258 ACPI_INFO(("Enabled %u new GPEs", walk_info.count));
259 }
260
261 (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
262 return;
263}
264
265/*******************************************************************************
266 *
267 * FUNCTION: acpi_ev_match_gpe_method
268 *
269 * PARAMETERS: Callback from walk_namespace
270 *
271 * RETURN: Status
272 *
273 * DESCRIPTION: Called from acpi_walk_namespace. Expects each object to be a
274 * control method under the _GPE portion of the namespace.
275 * Extract the name and GPE type from the object, saving this
276 * information for quick lookup during GPE dispatch. Allows a
277 * per-owner_id evaluation if execute_by_owner_id is TRUE in the
278 * walk_info parameter block.
279 *
280 * The name of each GPE control method is of the form:
281 * "_Lxx" or "_Exx", where:
282 * L - means that the GPE is level triggered
283 * E - means that the GPE is edge triggered
284 * xx - is the GPE number [in HEX]
285 *
286 * If walk_info->execute_by_owner_id is TRUE, we only execute examine GPE methods
287 * with that owner.
288 *
289 ******************************************************************************/
290
291acpi_status
292acpi_ev_match_gpe_method(acpi_handle obj_handle,
293 u32 level, void *context, void **return_value)
294{
295 struct acpi_namespace_node *method_node =
296 ACPI_CAST_PTR(struct acpi_namespace_node, obj_handle);
297 struct acpi_gpe_walk_info *walk_info =
298 ACPI_CAST_PTR(struct acpi_gpe_walk_info, context);
299 struct acpi_gpe_event_info *gpe_event_info;
300 acpi_status status;
301 u32 gpe_number;
302 u8 temp_gpe_number;
303 char name[ACPI_NAMESEG_SIZE + 1];
304 u8 type;
305
306 ACPI_FUNCTION_TRACE(ev_match_gpe_method);
307
308 /* Check if requested owner_id matches this owner_id */
309
310 if ((walk_info->execute_by_owner_id) &&
311 (method_node->owner_id != walk_info->owner_id)) {
312 return_ACPI_STATUS(AE_OK);
313 }
314
315 /*
316 * Match and decode the _Lxx and _Exx GPE method names
317 *
318 * 1) Extract the method name and null terminate it
319 */
320 ACPI_MOVE_32_TO_32(name, &method_node->name.integer);
321 name[ACPI_NAMESEG_SIZE] = 0;
322
323 /* 2) Name must begin with an underscore */
324
325 if (name[0] != '_') {
326 return_ACPI_STATUS(AE_OK); /* Ignore this method */
327 }
328
329 /*
330 * 3) Edge/Level determination is based on the 2nd character
331 * of the method name
332 */
333 switch (name[1]) {
334 case 'L':
335
336 type = ACPI_GPE_LEVEL_TRIGGERED;
337 break;
338
339 case 'E':
340
341 type = ACPI_GPE_EDGE_TRIGGERED;
342 break;
343
344 default:
345
346 /* Unknown method type, just ignore it */
347
348 ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
349 "Ignoring unknown GPE method type: %s "
350 "(name not of form _Lxx or _Exx)", name));
351 return_ACPI_STATUS(AE_OK);
352 }
353
354 /* 4) The last two characters of the name are the hex GPE Number */
355
356 status = acpi_ut_ascii_to_hex_byte(&name[2], &temp_gpe_number);
357 if (ACPI_FAILURE(status)) {
358
359 /* Conversion failed; invalid method, just ignore it */
360
361 ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
362 "Could not extract GPE number from name: %s "
363 "(name is not of form _Lxx or _Exx)", name));
364 return_ACPI_STATUS(AE_OK);
365 }
366
367 /* Ensure that we have a valid GPE number for this GPE block */
368
369 gpe_number = (u32)temp_gpe_number;
370 gpe_event_info =
371 acpi_ev_low_get_gpe_info(gpe_number, walk_info->gpe_block);
372 if (!gpe_event_info) {
373 /*
374 * This gpe_number is not valid for this GPE block, just ignore it.
375 * However, it may be valid for a different GPE block, since GPE0
376 * and GPE1 methods both appear under \_GPE.
377 */
378 return_ACPI_STATUS(AE_OK);
379 }
380
381 if ((ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) ==
382 ACPI_GPE_DISPATCH_HANDLER) ||
383 (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) ==
384 ACPI_GPE_DISPATCH_RAW_HANDLER)) {
385
386 /* If there is already a handler, ignore this GPE method */
387
388 return_ACPI_STATUS(AE_OK);
389 }
390
391 if (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) ==
392 ACPI_GPE_DISPATCH_METHOD) {
393 /*
394 * If there is already a method, ignore this method. But check
395 * for a type mismatch (if both the _Lxx AND _Exx exist)
396 */
397 if (type != (gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK)) {
398 ACPI_ERROR((AE_INFO,
399 "For GPE 0x%.2X, found both _L%2.2X and _E%2.2X methods",
400 gpe_number, gpe_number, gpe_number));
401 }
402 return_ACPI_STATUS(AE_OK);
403 }
404
405 /* Disable the GPE in case it's been enabled already. */
406
407 (void)acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE);
408
409 /*
410 * Add the GPE information from above to the gpe_event_info block for
411 * use during dispatch of this GPE.
412 */
413 gpe_event_info->flags &= ~(ACPI_GPE_DISPATCH_MASK);
414 gpe_event_info->flags |= (u8)(type | ACPI_GPE_DISPATCH_METHOD);
415 gpe_event_info->dispatch.method_node = method_node;
416
417 ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
418 "Registered GPE method %s as GPE number 0x%.2X\n",
419 name, gpe_number));
420 return_ACPI_STATUS(AE_OK);
421}
422
423#endif /* !ACPI_REDUCED_HARDWARE */