Loading...
1/******************************************************************************
2 *
3 * Module Name: evgpeblk - GPE block creation and initialization.
4 *
5 *****************************************************************************/
6
7/*
8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions, and the following disclaimer,
16 * without modification.
17 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
18 * substantially similar to the "NO WARRANTY" disclaimer below
19 * ("Disclaimer") and any redistribution must be conditioned upon
20 * including a substantially similar Disclaimer requirement for further
21 * binary redistribution.
22 * 3. Neither the names of the above-listed copyright holders nor the names
23 * of any contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * Alternatively, this software may be distributed under the terms of the
27 * GNU General Public License ("GPL") version 2 as published by the Free
28 * Software Foundation.
29 *
30 * NO WARRANTY
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
39 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
40 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGES.
42 */
43
44#include <acpi/acpi.h>
45#include "accommon.h"
46#include "acevents.h"
47#include "acnamesp.h"
48
49#define _COMPONENT ACPI_EVENTS
50ACPI_MODULE_NAME("evgpeblk")
51
52/* Local prototypes */
53static acpi_status
54acpi_ev_install_gpe_block(struct acpi_gpe_block_info *gpe_block,
55 u32 interrupt_number);
56
57static acpi_status
58acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block);
59
60/*******************************************************************************
61 *
62 * FUNCTION: acpi_ev_install_gpe_block
63 *
64 * PARAMETERS: gpe_block - New GPE block
65 * interrupt_number - Xrupt to be associated with this
66 * GPE block
67 *
68 * RETURN: Status
69 *
70 * DESCRIPTION: Install new GPE block with mutex support
71 *
72 ******************************************************************************/
73
74static acpi_status
75acpi_ev_install_gpe_block(struct acpi_gpe_block_info *gpe_block,
76 u32 interrupt_number)
77{
78 struct acpi_gpe_block_info *next_gpe_block;
79 struct acpi_gpe_xrupt_info *gpe_xrupt_block;
80 acpi_status status;
81 acpi_cpu_flags flags;
82
83 ACPI_FUNCTION_TRACE(ev_install_gpe_block);
84
85 status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
86 if (ACPI_FAILURE(status)) {
87 return_ACPI_STATUS(status);
88 }
89
90 gpe_xrupt_block = acpi_ev_get_gpe_xrupt_block(interrupt_number);
91 if (!gpe_xrupt_block) {
92 status = AE_NO_MEMORY;
93 goto unlock_and_exit;
94 }
95
96 /* Install the new block at the end of the list with lock */
97
98 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
99 if (gpe_xrupt_block->gpe_block_list_head) {
100 next_gpe_block = gpe_xrupt_block->gpe_block_list_head;
101 while (next_gpe_block->next) {
102 next_gpe_block = next_gpe_block->next;
103 }
104
105 next_gpe_block->next = gpe_block;
106 gpe_block->previous = next_gpe_block;
107 } else {
108 gpe_xrupt_block->gpe_block_list_head = gpe_block;
109 }
110
111 gpe_block->xrupt_block = gpe_xrupt_block;
112 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
113
114 unlock_and_exit:
115 status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
116 return_ACPI_STATUS(status);
117}
118
119/*******************************************************************************
120 *
121 * FUNCTION: acpi_ev_delete_gpe_block
122 *
123 * PARAMETERS: gpe_block - Existing GPE block
124 *
125 * RETURN: Status
126 *
127 * DESCRIPTION: Remove a GPE block
128 *
129 ******************************************************************************/
130
131acpi_status acpi_ev_delete_gpe_block(struct acpi_gpe_block_info *gpe_block)
132{
133 acpi_status status;
134 acpi_cpu_flags flags;
135
136 ACPI_FUNCTION_TRACE(ev_install_gpe_block);
137
138 status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
139 if (ACPI_FAILURE(status)) {
140 return_ACPI_STATUS(status);
141 }
142
143 /* Disable all GPEs in this block */
144
145 status =
146 acpi_hw_disable_gpe_block(gpe_block->xrupt_block, gpe_block, NULL);
147
148 if (!gpe_block->previous && !gpe_block->next) {
149
150 /* This is the last gpe_block on this interrupt */
151
152 status = acpi_ev_delete_gpe_xrupt(gpe_block->xrupt_block);
153 if (ACPI_FAILURE(status)) {
154 goto unlock_and_exit;
155 }
156 } else {
157 /* Remove the block on this interrupt with lock */
158
159 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
160 if (gpe_block->previous) {
161 gpe_block->previous->next = gpe_block->next;
162 } else {
163 gpe_block->xrupt_block->gpe_block_list_head =
164 gpe_block->next;
165 }
166
167 if (gpe_block->next) {
168 gpe_block->next->previous = gpe_block->previous;
169 }
170 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
171 }
172
173 acpi_current_gpe_count -= gpe_block->gpe_count;
174
175 /* Free the gpe_block */
176
177 ACPI_FREE(gpe_block->register_info);
178 ACPI_FREE(gpe_block->event_info);
179 ACPI_FREE(gpe_block);
180
181 unlock_and_exit:
182 status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
183 return_ACPI_STATUS(status);
184}
185
186/*******************************************************************************
187 *
188 * FUNCTION: acpi_ev_create_gpe_info_blocks
189 *
190 * PARAMETERS: gpe_block - New GPE block
191 *
192 * RETURN: Status
193 *
194 * DESCRIPTION: Create the register_info and event_info blocks for this GPE block
195 *
196 ******************************************************************************/
197
198static acpi_status
199acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block)
200{
201 struct acpi_gpe_register_info *gpe_register_info = NULL;
202 struct acpi_gpe_event_info *gpe_event_info = NULL;
203 struct acpi_gpe_event_info *this_event;
204 struct acpi_gpe_register_info *this_register;
205 u32 i;
206 u32 j;
207 acpi_status status;
208
209 ACPI_FUNCTION_TRACE(ev_create_gpe_info_blocks);
210
211 /* Allocate the GPE register information block */
212
213 gpe_register_info = ACPI_ALLOCATE_ZEROED((acpi_size) gpe_block->
214 register_count *
215 sizeof(struct
216 acpi_gpe_register_info));
217 if (!gpe_register_info) {
218 ACPI_ERROR((AE_INFO,
219 "Could not allocate the GpeRegisterInfo table"));
220 return_ACPI_STATUS(AE_NO_MEMORY);
221 }
222
223 /*
224 * Allocate the GPE event_info block. There are eight distinct GPEs
225 * per register. Initialization to zeros is sufficient.
226 */
227 gpe_event_info = ACPI_ALLOCATE_ZEROED((acpi_size) gpe_block->gpe_count *
228 sizeof(struct
229 acpi_gpe_event_info));
230 if (!gpe_event_info) {
231 ACPI_ERROR((AE_INFO,
232 "Could not allocate the GpeEventInfo table"));
233 status = AE_NO_MEMORY;
234 goto error_exit;
235 }
236
237 /* Save the new Info arrays in the GPE block */
238
239 gpe_block->register_info = gpe_register_info;
240 gpe_block->event_info = gpe_event_info;
241
242 /*
243 * Initialize the GPE Register and Event structures. A goal of these
244 * tables is to hide the fact that there are two separate GPE register
245 * sets in a given GPE hardware block, the status registers occupy the
246 * first half, and the enable registers occupy the second half.
247 */
248 this_register = gpe_register_info;
249 this_event = gpe_event_info;
250
251 for (i = 0; i < gpe_block->register_count; i++) {
252
253 /* Init the register_info for this GPE register (8 GPEs) */
254
255 this_register->base_gpe_number =
256 (u8) (gpe_block->block_base_number +
257 (i * ACPI_GPE_REGISTER_WIDTH));
258
259 this_register->status_address.address =
260 gpe_block->block_address.address + i;
261
262 this_register->enable_address.address =
263 gpe_block->block_address.address + i +
264 gpe_block->register_count;
265
266 this_register->status_address.space_id =
267 gpe_block->block_address.space_id;
268 this_register->enable_address.space_id =
269 gpe_block->block_address.space_id;
270 this_register->status_address.bit_width =
271 ACPI_GPE_REGISTER_WIDTH;
272 this_register->enable_address.bit_width =
273 ACPI_GPE_REGISTER_WIDTH;
274 this_register->status_address.bit_offset = 0;
275 this_register->enable_address.bit_offset = 0;
276
277 /* Init the event_info for each GPE within this register */
278
279 for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
280 this_event->gpe_number =
281 (u8) (this_register->base_gpe_number + j);
282 this_event->register_info = this_register;
283 this_event++;
284 }
285
286 /* Disable all GPEs within this register */
287
288 status = acpi_hw_write(0x00, &this_register->enable_address);
289 if (ACPI_FAILURE(status)) {
290 goto error_exit;
291 }
292
293 /* Clear any pending GPE events within this register */
294
295 status = acpi_hw_write(0xFF, &this_register->status_address);
296 if (ACPI_FAILURE(status)) {
297 goto error_exit;
298 }
299
300 this_register++;
301 }
302
303 return_ACPI_STATUS(AE_OK);
304
305 error_exit:
306 if (gpe_register_info) {
307 ACPI_FREE(gpe_register_info);
308 }
309 if (gpe_event_info) {
310 ACPI_FREE(gpe_event_info);
311 }
312
313 return_ACPI_STATUS(status);
314}
315
316/*******************************************************************************
317 *
318 * FUNCTION: acpi_ev_create_gpe_block
319 *
320 * PARAMETERS: gpe_device - Handle to the parent GPE block
321 * gpe_block_address - Address and space_iD
322 * register_count - Number of GPE register pairs in the block
323 * gpe_block_base_number - Starting GPE number for the block
324 * interrupt_number - H/W interrupt for the block
325 * return_gpe_block - Where the new block descriptor is returned
326 *
327 * RETURN: Status
328 *
329 * DESCRIPTION: Create and Install a block of GPE registers. All GPEs within
330 * the block are disabled at exit.
331 * Note: Assumes namespace is locked.
332 *
333 ******************************************************************************/
334
335acpi_status
336acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
337 struct acpi_generic_address *gpe_block_address,
338 u32 register_count,
339 u8 gpe_block_base_number,
340 u32 interrupt_number,
341 struct acpi_gpe_block_info **return_gpe_block)
342{
343 acpi_status status;
344 struct acpi_gpe_block_info *gpe_block;
345 struct acpi_gpe_walk_info walk_info;
346
347 ACPI_FUNCTION_TRACE(ev_create_gpe_block);
348
349 if (!register_count) {
350 return_ACPI_STATUS(AE_OK);
351 }
352
353 /* Allocate a new GPE block */
354
355 gpe_block = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_block_info));
356 if (!gpe_block) {
357 return_ACPI_STATUS(AE_NO_MEMORY);
358 }
359
360 /* Initialize the new GPE block */
361
362 gpe_block->node = gpe_device;
363 gpe_block->gpe_count = (u16)(register_count * ACPI_GPE_REGISTER_WIDTH);
364 gpe_block->initialized = FALSE;
365 gpe_block->register_count = register_count;
366 gpe_block->block_base_number = gpe_block_base_number;
367
368 ACPI_MEMCPY(&gpe_block->block_address, gpe_block_address,
369 sizeof(struct acpi_generic_address));
370
371 /*
372 * Create the register_info and event_info sub-structures
373 * Note: disables and clears all GPEs in the block
374 */
375 status = acpi_ev_create_gpe_info_blocks(gpe_block);
376 if (ACPI_FAILURE(status)) {
377 ACPI_FREE(gpe_block);
378 return_ACPI_STATUS(status);
379 }
380
381 /* Install the new block in the global lists */
382
383 status = acpi_ev_install_gpe_block(gpe_block, interrupt_number);
384 if (ACPI_FAILURE(status)) {
385 ACPI_FREE(gpe_block);
386 return_ACPI_STATUS(status);
387 }
388
389 acpi_gbl_all_gpes_initialized = FALSE;
390
391 /* Find all GPE methods (_Lxx or_Exx) for this block */
392
393 walk_info.gpe_block = gpe_block;
394 walk_info.gpe_device = gpe_device;
395 walk_info.execute_by_owner_id = FALSE;
396
397 status = acpi_ns_walk_namespace(ACPI_TYPE_METHOD, gpe_device,
398 ACPI_UINT32_MAX, ACPI_NS_WALK_NO_UNLOCK,
399 acpi_ev_match_gpe_method, NULL,
400 &walk_info, NULL);
401
402 /* Return the new block */
403
404 if (return_gpe_block) {
405 (*return_gpe_block) = gpe_block;
406 }
407
408 ACPI_DEBUG_PRINT((ACPI_DB_INIT,
409 "GPE %02X to %02X [%4.4s] %u regs on int 0x%X\n",
410 (u32) gpe_block->block_base_number,
411 (u32) (gpe_block->block_base_number +
412 (gpe_block->gpe_count - 1)),
413 gpe_device->name.ascii, gpe_block->register_count,
414 interrupt_number));
415
416 /* Update global count of currently available GPEs */
417
418 acpi_current_gpe_count += gpe_block->gpe_count;
419 return_ACPI_STATUS(AE_OK);
420}
421
422/*******************************************************************************
423 *
424 * FUNCTION: acpi_ev_initialize_gpe_block
425 *
426 * PARAMETERS: acpi_gpe_callback
427 *
428 * RETURN: Status
429 *
430 * DESCRIPTION: Initialize and enable a GPE block. Enable GPEs that have
431 * associated methods.
432 * Note: Assumes namespace is locked.
433 *
434 ******************************************************************************/
435
436acpi_status
437acpi_ev_initialize_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
438 struct acpi_gpe_block_info *gpe_block,
439 void *ignored)
440{
441 acpi_status status;
442 struct acpi_gpe_event_info *gpe_event_info;
443 u32 gpe_enabled_count;
444 u32 gpe_index;
445 u32 i;
446 u32 j;
447
448 ACPI_FUNCTION_TRACE(ev_initialize_gpe_block);
449
450 /*
451 * Ignore a null GPE block (e.g., if no GPE block 1 exists), and
452 * any GPE blocks that have been initialized already.
453 */
454 if (!gpe_block || gpe_block->initialized) {
455 return_ACPI_STATUS(AE_OK);
456 }
457
458 /*
459 * Enable all GPEs that have a corresponding method and have the
460 * ACPI_GPE_CAN_WAKE flag unset. Any other GPEs within this block
461 * must be enabled via the acpi_enable_gpe() interface.
462 */
463 gpe_enabled_count = 0;
464
465 for (i = 0; i < gpe_block->register_count; i++) {
466 for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
467
468 /* Get the info block for this particular GPE */
469
470 gpe_index = (i * ACPI_GPE_REGISTER_WIDTH) + j;
471 gpe_event_info = &gpe_block->event_info[gpe_index];
472
473 /*
474 * Ignore GPEs that have no corresponding _Lxx/_Exx method
475 * and GPEs that are used to wake the system
476 */
477 if (((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
478 ACPI_GPE_DISPATCH_NONE)
479 || ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK)
480 == ACPI_GPE_DISPATCH_HANDLER)
481 || (gpe_event_info->flags & ACPI_GPE_CAN_WAKE)) {
482 continue;
483 }
484
485 status = acpi_ev_add_gpe_reference(gpe_event_info);
486 if (ACPI_FAILURE(status)) {
487 ACPI_EXCEPTION((AE_INFO, status,
488 "Could not enable GPE 0x%02X",
489 gpe_index + gpe_block->block_base_number));
490 continue;
491 }
492
493 gpe_enabled_count++;
494 }
495 }
496
497 if (gpe_enabled_count) {
498 ACPI_DEBUG_PRINT((ACPI_DB_INIT,
499 "Enabled %u GPEs in this block\n",
500 gpe_enabled_count));
501 }
502
503 gpe_block->initialized = TRUE;
504
505 return_ACPI_STATUS(AE_OK);
506}
1// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2/******************************************************************************
3 *
4 * Module Name: evgpeblk - GPE block creation and initialization.
5 *
6 * Copyright (C) 2000 - 2019, Intel Corp.
7 *
8 *****************************************************************************/
9
10#include <acpi/acpi.h>
11#include "accommon.h"
12#include "acevents.h"
13#include "acnamesp.h"
14
15#define _COMPONENT ACPI_EVENTS
16ACPI_MODULE_NAME("evgpeblk")
17#if (!ACPI_REDUCED_HARDWARE) /* Entire module */
18/* Local prototypes */
19static acpi_status
20acpi_ev_install_gpe_block(struct acpi_gpe_block_info *gpe_block,
21 u32 interrupt_number);
22
23static acpi_status
24acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block);
25
26/*******************************************************************************
27 *
28 * FUNCTION: acpi_ev_install_gpe_block
29 *
30 * PARAMETERS: gpe_block - New GPE block
31 * interrupt_number - Xrupt to be associated with this
32 * GPE block
33 *
34 * RETURN: Status
35 *
36 * DESCRIPTION: Install new GPE block with mutex support
37 *
38 ******************************************************************************/
39
40static acpi_status
41acpi_ev_install_gpe_block(struct acpi_gpe_block_info *gpe_block,
42 u32 interrupt_number)
43{
44 struct acpi_gpe_block_info *next_gpe_block;
45 struct acpi_gpe_xrupt_info *gpe_xrupt_block;
46 acpi_status status;
47 acpi_cpu_flags flags;
48
49 ACPI_FUNCTION_TRACE(ev_install_gpe_block);
50
51 status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
52 if (ACPI_FAILURE(status)) {
53 return_ACPI_STATUS(status);
54 }
55
56 status =
57 acpi_ev_get_gpe_xrupt_block(interrupt_number, &gpe_xrupt_block);
58 if (ACPI_FAILURE(status)) {
59 goto unlock_and_exit;
60 }
61
62 /* Install the new block at the end of the list with lock */
63
64 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
65 if (gpe_xrupt_block->gpe_block_list_head) {
66 next_gpe_block = gpe_xrupt_block->gpe_block_list_head;
67 while (next_gpe_block->next) {
68 next_gpe_block = next_gpe_block->next;
69 }
70
71 next_gpe_block->next = gpe_block;
72 gpe_block->previous = next_gpe_block;
73 } else {
74 gpe_xrupt_block->gpe_block_list_head = gpe_block;
75 }
76
77 gpe_block->xrupt_block = gpe_xrupt_block;
78 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
79
80unlock_and_exit:
81 (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
82 return_ACPI_STATUS(status);
83}
84
85/*******************************************************************************
86 *
87 * FUNCTION: acpi_ev_delete_gpe_block
88 *
89 * PARAMETERS: gpe_block - Existing GPE block
90 *
91 * RETURN: Status
92 *
93 * DESCRIPTION: Remove a GPE block
94 *
95 ******************************************************************************/
96
97acpi_status acpi_ev_delete_gpe_block(struct acpi_gpe_block_info *gpe_block)
98{
99 acpi_status status;
100 acpi_cpu_flags flags;
101
102 ACPI_FUNCTION_TRACE(ev_install_gpe_block);
103
104 status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
105 if (ACPI_FAILURE(status)) {
106 return_ACPI_STATUS(status);
107 }
108
109 /* Disable all GPEs in this block */
110
111 status =
112 acpi_hw_disable_gpe_block(gpe_block->xrupt_block, gpe_block, NULL);
113
114 if (!gpe_block->previous && !gpe_block->next) {
115
116 /* This is the last gpe_block on this interrupt */
117
118 status = acpi_ev_delete_gpe_xrupt(gpe_block->xrupt_block);
119 if (ACPI_FAILURE(status)) {
120 goto unlock_and_exit;
121 }
122 } else {
123 /* Remove the block on this interrupt with lock */
124
125 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
126 if (gpe_block->previous) {
127 gpe_block->previous->next = gpe_block->next;
128 } else {
129 gpe_block->xrupt_block->gpe_block_list_head =
130 gpe_block->next;
131 }
132
133 if (gpe_block->next) {
134 gpe_block->next->previous = gpe_block->previous;
135 }
136
137 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
138 }
139
140 acpi_current_gpe_count -= gpe_block->gpe_count;
141
142 /* Free the gpe_block */
143
144 ACPI_FREE(gpe_block->register_info);
145 ACPI_FREE(gpe_block->event_info);
146 ACPI_FREE(gpe_block);
147
148unlock_and_exit:
149 status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
150 return_ACPI_STATUS(status);
151}
152
153/*******************************************************************************
154 *
155 * FUNCTION: acpi_ev_create_gpe_info_blocks
156 *
157 * PARAMETERS: gpe_block - New GPE block
158 *
159 * RETURN: Status
160 *
161 * DESCRIPTION: Create the register_info and event_info blocks for this GPE block
162 *
163 ******************************************************************************/
164
165static acpi_status
166acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block)
167{
168 struct acpi_gpe_register_info *gpe_register_info = NULL;
169 struct acpi_gpe_event_info *gpe_event_info = NULL;
170 struct acpi_gpe_event_info *this_event;
171 struct acpi_gpe_register_info *this_register;
172 u32 i;
173 u32 j;
174 acpi_status status;
175
176 ACPI_FUNCTION_TRACE(ev_create_gpe_info_blocks);
177
178 /* Allocate the GPE register information block */
179
180 gpe_register_info = ACPI_ALLOCATE_ZEROED((acpi_size)gpe_block->
181 register_count *
182 sizeof(struct
183 acpi_gpe_register_info));
184 if (!gpe_register_info) {
185 ACPI_ERROR((AE_INFO,
186 "Could not allocate the GpeRegisterInfo table"));
187 return_ACPI_STATUS(AE_NO_MEMORY);
188 }
189
190 /*
191 * Allocate the GPE event_info block. There are eight distinct GPEs
192 * per register. Initialization to zeros is sufficient.
193 */
194 gpe_event_info = ACPI_ALLOCATE_ZEROED((acpi_size)gpe_block->gpe_count *
195 sizeof(struct
196 acpi_gpe_event_info));
197 if (!gpe_event_info) {
198 ACPI_ERROR((AE_INFO,
199 "Could not allocate the GpeEventInfo table"));
200 status = AE_NO_MEMORY;
201 goto error_exit;
202 }
203
204 /* Save the new Info arrays in the GPE block */
205
206 gpe_block->register_info = gpe_register_info;
207 gpe_block->event_info = gpe_event_info;
208
209 /*
210 * Initialize the GPE Register and Event structures. A goal of these
211 * tables is to hide the fact that there are two separate GPE register
212 * sets in a given GPE hardware block, the status registers occupy the
213 * first half, and the enable registers occupy the second half.
214 */
215 this_register = gpe_register_info;
216 this_event = gpe_event_info;
217
218 for (i = 0; i < gpe_block->register_count; i++) {
219
220 /* Init the register_info for this GPE register (8 GPEs) */
221
222 this_register->base_gpe_number = (u16)
223 (gpe_block->block_base_number +
224 (i * ACPI_GPE_REGISTER_WIDTH));
225
226 this_register->status_address.address = gpe_block->address + i;
227
228 this_register->enable_address.address =
229 gpe_block->address + i + gpe_block->register_count;
230
231 this_register->status_address.space_id = gpe_block->space_id;
232 this_register->enable_address.space_id = gpe_block->space_id;
233 this_register->status_address.bit_width =
234 ACPI_GPE_REGISTER_WIDTH;
235 this_register->enable_address.bit_width =
236 ACPI_GPE_REGISTER_WIDTH;
237 this_register->status_address.bit_offset = 0;
238 this_register->enable_address.bit_offset = 0;
239
240 /* Init the event_info for each GPE within this register */
241
242 for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
243 this_event->gpe_number =
244 (u8) (this_register->base_gpe_number + j);
245 this_event->register_info = this_register;
246 this_event++;
247 }
248
249 /* Disable all GPEs within this register */
250
251 status = acpi_hw_write(0x00, &this_register->enable_address);
252 if (ACPI_FAILURE(status)) {
253 goto error_exit;
254 }
255
256 /* Clear any pending GPE events within this register */
257
258 status = acpi_hw_write(0xFF, &this_register->status_address);
259 if (ACPI_FAILURE(status)) {
260 goto error_exit;
261 }
262
263 this_register++;
264 }
265
266 return_ACPI_STATUS(AE_OK);
267
268error_exit:
269 if (gpe_register_info) {
270 ACPI_FREE(gpe_register_info);
271 }
272 if (gpe_event_info) {
273 ACPI_FREE(gpe_event_info);
274 }
275
276 return_ACPI_STATUS(status);
277}
278
279/*******************************************************************************
280 *
281 * FUNCTION: acpi_ev_create_gpe_block
282 *
283 * PARAMETERS: gpe_device - Handle to the parent GPE block
284 * gpe_block_address - Address and space_ID
285 * register_count - Number of GPE register pairs in the block
286 * gpe_block_base_number - Starting GPE number for the block
287 * interrupt_number - H/W interrupt for the block
288 * return_gpe_block - Where the new block descriptor is returned
289 *
290 * RETURN: Status
291 *
292 * DESCRIPTION: Create and Install a block of GPE registers. All GPEs within
293 * the block are disabled at exit.
294 * Note: Assumes namespace is locked.
295 *
296 ******************************************************************************/
297
298acpi_status
299acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
300 u64 address,
301 u8 space_id,
302 u32 register_count,
303 u16 gpe_block_base_number,
304 u32 interrupt_number,
305 struct acpi_gpe_block_info **return_gpe_block)
306{
307 acpi_status status;
308 struct acpi_gpe_block_info *gpe_block;
309 struct acpi_gpe_walk_info walk_info;
310
311 ACPI_FUNCTION_TRACE(ev_create_gpe_block);
312
313 if (!register_count) {
314 return_ACPI_STATUS(AE_OK);
315 }
316
317 /* Allocate a new GPE block */
318
319 gpe_block = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_block_info));
320 if (!gpe_block) {
321 return_ACPI_STATUS(AE_NO_MEMORY);
322 }
323
324 /* Initialize the new GPE block */
325
326 gpe_block->address = address;
327 gpe_block->space_id = space_id;
328 gpe_block->node = gpe_device;
329 gpe_block->gpe_count = (u16)(register_count * ACPI_GPE_REGISTER_WIDTH);
330 gpe_block->initialized = FALSE;
331 gpe_block->register_count = register_count;
332 gpe_block->block_base_number = gpe_block_base_number;
333
334 /*
335 * Create the register_info and event_info sub-structures
336 * Note: disables and clears all GPEs in the block
337 */
338 status = acpi_ev_create_gpe_info_blocks(gpe_block);
339 if (ACPI_FAILURE(status)) {
340 ACPI_FREE(gpe_block);
341 return_ACPI_STATUS(status);
342 }
343
344 /* Install the new block in the global lists */
345
346 status = acpi_ev_install_gpe_block(gpe_block, interrupt_number);
347 if (ACPI_FAILURE(status)) {
348 ACPI_FREE(gpe_block->register_info);
349 ACPI_FREE(gpe_block->event_info);
350 ACPI_FREE(gpe_block);
351 return_ACPI_STATUS(status);
352 }
353
354 acpi_gbl_all_gpes_initialized = FALSE;
355
356 /* Find all GPE methods (_Lxx or_Exx) for this block */
357
358 walk_info.gpe_block = gpe_block;
359 walk_info.gpe_device = gpe_device;
360 walk_info.execute_by_owner_id = FALSE;
361
362 status = acpi_ns_walk_namespace(ACPI_TYPE_METHOD, gpe_device,
363 ACPI_UINT32_MAX, ACPI_NS_WALK_NO_UNLOCK,
364 acpi_ev_match_gpe_method, NULL,
365 &walk_info, NULL);
366
367 /* Return the new block */
368
369 if (return_gpe_block) {
370 (*return_gpe_block) = gpe_block;
371 }
372
373 ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
374 " Initialized GPE %02X to %02X [%4.4s] %u regs on interrupt 0x%X%s\n",
375 (u32)gpe_block->block_base_number,
376 (u32)(gpe_block->block_base_number +
377 (gpe_block->gpe_count - 1)),
378 gpe_device->name.ascii, gpe_block->register_count,
379 interrupt_number,
380 interrupt_number ==
381 acpi_gbl_FADT.sci_interrupt ? " (SCI)" : ""));
382
383 /* Update global count of currently available GPEs */
384
385 acpi_current_gpe_count += gpe_block->gpe_count;
386 return_ACPI_STATUS(AE_OK);
387}
388
389/*******************************************************************************
390 *
391 * FUNCTION: acpi_ev_initialize_gpe_block
392 *
393 * PARAMETERS: acpi_gpe_callback
394 *
395 * RETURN: Status
396 *
397 * DESCRIPTION: Initialize and enable a GPE block. Enable GPEs that have
398 * associated methods.
399 * Note: Assumes namespace is locked.
400 *
401 ******************************************************************************/
402
403acpi_status
404acpi_ev_initialize_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
405 struct acpi_gpe_block_info *gpe_block,
406 void *context)
407{
408 acpi_status status;
409 struct acpi_gpe_event_info *gpe_event_info;
410 u32 gpe_enabled_count;
411 u32 gpe_index;
412 u32 i;
413 u32 j;
414 u8 *is_polling_needed = context;
415 ACPI_ERROR_ONLY(u32 gpe_number);
416
417 ACPI_FUNCTION_TRACE(ev_initialize_gpe_block);
418
419 /*
420 * Ignore a null GPE block (e.g., if no GPE block 1 exists), and
421 * any GPE blocks that have been initialized already.
422 */
423 if (!gpe_block || gpe_block->initialized) {
424 return_ACPI_STATUS(AE_OK);
425 }
426
427 /*
428 * Enable all GPEs that have a corresponding method and have the
429 * ACPI_GPE_CAN_WAKE flag unset. Any other GPEs within this block
430 * must be enabled via the acpi_enable_gpe() interface.
431 */
432 gpe_enabled_count = 0;
433
434 for (i = 0; i < gpe_block->register_count; i++) {
435 for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
436
437 /* Get the info block for this particular GPE */
438
439 gpe_index = (i * ACPI_GPE_REGISTER_WIDTH) + j;
440 gpe_event_info = &gpe_block->event_info[gpe_index];
441 ACPI_ERROR_ONLY(gpe_number =
442 gpe_block->block_base_number +
443 gpe_index);
444 gpe_event_info->flags |= ACPI_GPE_INITIALIZED;
445
446 /*
447 * Ignore GPEs that have no corresponding _Lxx/_Exx method
448 * and GPEs that are used for wakeup
449 */
450 if ((ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) !=
451 ACPI_GPE_DISPATCH_METHOD)
452 || (gpe_event_info->flags & ACPI_GPE_CAN_WAKE)) {
453 continue;
454 }
455
456 status = acpi_ev_add_gpe_reference(gpe_event_info, FALSE);
457 if (ACPI_FAILURE(status)) {
458 ACPI_EXCEPTION((AE_INFO, status,
459 "Could not enable GPE 0x%02X",
460 gpe_number));
461 continue;
462 }
463
464 gpe_event_info->flags |= ACPI_GPE_AUTO_ENABLED;
465
466 if (is_polling_needed &&
467 ACPI_GPE_IS_POLLING_NEEDED(gpe_event_info)) {
468 *is_polling_needed = TRUE;
469 }
470
471 gpe_enabled_count++;
472 }
473 }
474
475 if (gpe_enabled_count) {
476 ACPI_INFO(("Enabled %u GPEs in block %02X to %02X",
477 gpe_enabled_count, (u32)gpe_block->block_base_number,
478 (u32)(gpe_block->block_base_number +
479 (gpe_block->gpe_count - 1))));
480 }
481
482 gpe_block->initialized = TRUE;
483
484 return_ACPI_STATUS(AE_OK);
485}
486
487#endif /* !ACPI_REDUCED_HARDWARE */