Loading...
1/*
2 * acpi_osl.c - OS-dependent functions ($Revision: 83 $)
3 *
4 * Copyright (C) 2000 Andrew Henroid
5 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
6 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
7 * Copyright (c) 2008 Intel Corporation
8 * Author: Matthew Wilcox <willy@linux.intel.com>
9 *
10 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
23 *
24 */
25
26#include <linux/module.h>
27#include <linux/kernel.h>
28#include <linux/slab.h>
29#include <linux/mm.h>
30#include <linux/highmem.h>
31#include <linux/pci.h>
32#include <linux/interrupt.h>
33#include <linux/kmod.h>
34#include <linux/delay.h>
35#include <linux/workqueue.h>
36#include <linux/nmi.h>
37#include <linux/acpi.h>
38#include <linux/efi.h>
39#include <linux/ioport.h>
40#include <linux/list.h>
41#include <linux/jiffies.h>
42#include <linux/semaphore.h>
43
44#include <asm/io.h>
45#include <asm/uaccess.h>
46#include <linux/io-64-nonatomic-lo-hi.h>
47
48#include "internal.h"
49
50#define _COMPONENT ACPI_OS_SERVICES
51ACPI_MODULE_NAME("osl");
52
53struct acpi_os_dpc {
54 acpi_osd_exec_callback function;
55 void *context;
56 struct work_struct work;
57};
58
59#ifdef CONFIG_ACPI_CUSTOM_DSDT
60#include CONFIG_ACPI_CUSTOM_DSDT_FILE
61#endif
62
63#ifdef ENABLE_DEBUGGER
64#include <linux/kdb.h>
65
66/* stuff for debugger support */
67int acpi_in_debugger;
68EXPORT_SYMBOL(acpi_in_debugger);
69#endif /*ENABLE_DEBUGGER */
70
71static int (*__acpi_os_prepare_sleep)(u8 sleep_state, u32 pm1a_ctrl,
72 u32 pm1b_ctrl);
73static int (*__acpi_os_prepare_extended_sleep)(u8 sleep_state, u32 val_a,
74 u32 val_b);
75
76static acpi_osd_handler acpi_irq_handler;
77static void *acpi_irq_context;
78static struct workqueue_struct *kacpid_wq;
79static struct workqueue_struct *kacpi_notify_wq;
80static struct workqueue_struct *kacpi_hotplug_wq;
81static bool acpi_os_initialized;
82unsigned int acpi_sci_irq = INVALID_ACPI_IRQ;
83
84/*
85 * This list of permanent mappings is for memory that may be accessed from
86 * interrupt context, where we can't do the ioremap().
87 */
88struct acpi_ioremap {
89 struct list_head list;
90 void __iomem *virt;
91 acpi_physical_address phys;
92 acpi_size size;
93 unsigned long refcount;
94};
95
96static LIST_HEAD(acpi_ioremaps);
97static DEFINE_MUTEX(acpi_ioremap_lock);
98
99static void __init acpi_osi_setup_late(void);
100
101/*
102 * The story of _OSI(Linux)
103 *
104 * From pre-history through Linux-2.6.22,
105 * Linux responded TRUE upon a BIOS OSI(Linux) query.
106 *
107 * Unfortunately, reference BIOS writers got wind of this
108 * and put OSI(Linux) in their example code, quickly exposing
109 * this string as ill-conceived and opening the door to
110 * an un-bounded number of BIOS incompatibilities.
111 *
112 * For example, OSI(Linux) was used on resume to re-POST a
113 * video card on one system, because Linux at that time
114 * could not do a speedy restore in its native driver.
115 * But then upon gaining quick native restore capability,
116 * Linux has no way to tell the BIOS to skip the time-consuming
117 * POST -- putting Linux at a permanent performance disadvantage.
118 * On another system, the BIOS writer used OSI(Linux)
119 * to infer native OS support for IPMI! On other systems,
120 * OSI(Linux) simply got in the way of Linux claiming to
121 * be compatible with other operating systems, exposing
122 * BIOS issues such as skipped device initialization.
123 *
124 * So "Linux" turned out to be a really poor chose of
125 * OSI string, and from Linux-2.6.23 onward we respond FALSE.
126 *
127 * BIOS writers should NOT query _OSI(Linux) on future systems.
128 * Linux will complain on the console when it sees it, and return FALSE.
129 * To get Linux to return TRUE for your system will require
130 * a kernel source update to add a DMI entry,
131 * or boot with "acpi_osi=Linux"
132 */
133
134static struct osi_linux {
135 unsigned int enable:1;
136 unsigned int dmi:1;
137 unsigned int cmdline:1;
138 unsigned int default_disabling:1;
139} osi_linux = {0, 0, 0, 0};
140
141static u32 acpi_osi_handler(acpi_string interface, u32 supported)
142{
143 if (!strcmp("Linux", interface)) {
144
145 printk_once(KERN_NOTICE FW_BUG PREFIX
146 "BIOS _OSI(Linux) query %s%s\n",
147 osi_linux.enable ? "honored" : "ignored",
148 osi_linux.cmdline ? " via cmdline" :
149 osi_linux.dmi ? " via DMI" : "");
150 }
151
152 if (!strcmp("Darwin", interface)) {
153 /*
154 * Apple firmware will behave poorly if it receives positive
155 * answers to "Darwin" and any other OS. Respond positively
156 * to Darwin and then disable all other vendor strings.
157 */
158 acpi_update_interfaces(ACPI_DISABLE_ALL_VENDOR_STRINGS);
159 supported = ACPI_UINT32_MAX;
160 }
161
162 return supported;
163}
164
165static void __init acpi_request_region (struct acpi_generic_address *gas,
166 unsigned int length, char *desc)
167{
168 u64 addr;
169
170 /* Handle possible alignment issues */
171 memcpy(&addr, &gas->address, sizeof(addr));
172 if (!addr || !length)
173 return;
174
175 /* Resources are never freed */
176 if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO)
177 request_region(addr, length, desc);
178 else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
179 request_mem_region(addr, length, desc);
180}
181
182static int __init acpi_reserve_resources(void)
183{
184 acpi_request_region(&acpi_gbl_FADT.xpm1a_event_block, acpi_gbl_FADT.pm1_event_length,
185 "ACPI PM1a_EVT_BLK");
186
187 acpi_request_region(&acpi_gbl_FADT.xpm1b_event_block, acpi_gbl_FADT.pm1_event_length,
188 "ACPI PM1b_EVT_BLK");
189
190 acpi_request_region(&acpi_gbl_FADT.xpm1a_control_block, acpi_gbl_FADT.pm1_control_length,
191 "ACPI PM1a_CNT_BLK");
192
193 acpi_request_region(&acpi_gbl_FADT.xpm1b_control_block, acpi_gbl_FADT.pm1_control_length,
194 "ACPI PM1b_CNT_BLK");
195
196 if (acpi_gbl_FADT.pm_timer_length == 4)
197 acpi_request_region(&acpi_gbl_FADT.xpm_timer_block, 4, "ACPI PM_TMR");
198
199 acpi_request_region(&acpi_gbl_FADT.xpm2_control_block, acpi_gbl_FADT.pm2_control_length,
200 "ACPI PM2_CNT_BLK");
201
202 /* Length of GPE blocks must be a non-negative multiple of 2 */
203
204 if (!(acpi_gbl_FADT.gpe0_block_length & 0x1))
205 acpi_request_region(&acpi_gbl_FADT.xgpe0_block,
206 acpi_gbl_FADT.gpe0_block_length, "ACPI GPE0_BLK");
207
208 if (!(acpi_gbl_FADT.gpe1_block_length & 0x1))
209 acpi_request_region(&acpi_gbl_FADT.xgpe1_block,
210 acpi_gbl_FADT.gpe1_block_length, "ACPI GPE1_BLK");
211
212 return 0;
213}
214fs_initcall_sync(acpi_reserve_resources);
215
216void acpi_os_printf(const char *fmt, ...)
217{
218 va_list args;
219 va_start(args, fmt);
220 acpi_os_vprintf(fmt, args);
221 va_end(args);
222}
223EXPORT_SYMBOL(acpi_os_printf);
224
225void acpi_os_vprintf(const char *fmt, va_list args)
226{
227 static char buffer[512];
228
229 vsprintf(buffer, fmt, args);
230
231#ifdef ENABLE_DEBUGGER
232 if (acpi_in_debugger) {
233 kdb_printf("%s", buffer);
234 } else {
235 printk(KERN_CONT "%s", buffer);
236 }
237#else
238 if (acpi_debugger_write_log(buffer) < 0)
239 printk(KERN_CONT "%s", buffer);
240#endif
241}
242
243#ifdef CONFIG_KEXEC
244static unsigned long acpi_rsdp;
245static int __init setup_acpi_rsdp(char *arg)
246{
247 if (kstrtoul(arg, 16, &acpi_rsdp))
248 return -EINVAL;
249 return 0;
250}
251early_param("acpi_rsdp", setup_acpi_rsdp);
252#endif
253
254acpi_physical_address __init acpi_os_get_root_pointer(void)
255{
256#ifdef CONFIG_KEXEC
257 if (acpi_rsdp)
258 return acpi_rsdp;
259#endif
260
261 if (efi_enabled(EFI_CONFIG_TABLES)) {
262 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
263 return efi.acpi20;
264 else if (efi.acpi != EFI_INVALID_TABLE_ADDR)
265 return efi.acpi;
266 else {
267 printk(KERN_ERR PREFIX
268 "System description tables not found\n");
269 return 0;
270 }
271 } else if (IS_ENABLED(CONFIG_ACPI_LEGACY_TABLES_LOOKUP)) {
272 acpi_physical_address pa = 0;
273
274 acpi_find_root_pointer(&pa);
275 return pa;
276 }
277
278 return 0;
279}
280
281/* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
282static struct acpi_ioremap *
283acpi_map_lookup(acpi_physical_address phys, acpi_size size)
284{
285 struct acpi_ioremap *map;
286
287 list_for_each_entry_rcu(map, &acpi_ioremaps, list)
288 if (map->phys <= phys &&
289 phys + size <= map->phys + map->size)
290 return map;
291
292 return NULL;
293}
294
295/* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
296static void __iomem *
297acpi_map_vaddr_lookup(acpi_physical_address phys, unsigned int size)
298{
299 struct acpi_ioremap *map;
300
301 map = acpi_map_lookup(phys, size);
302 if (map)
303 return map->virt + (phys - map->phys);
304
305 return NULL;
306}
307
308void __iomem *acpi_os_get_iomem(acpi_physical_address phys, unsigned int size)
309{
310 struct acpi_ioremap *map;
311 void __iomem *virt = NULL;
312
313 mutex_lock(&acpi_ioremap_lock);
314 map = acpi_map_lookup(phys, size);
315 if (map) {
316 virt = map->virt + (phys - map->phys);
317 map->refcount++;
318 }
319 mutex_unlock(&acpi_ioremap_lock);
320 return virt;
321}
322EXPORT_SYMBOL_GPL(acpi_os_get_iomem);
323
324/* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
325static struct acpi_ioremap *
326acpi_map_lookup_virt(void __iomem *virt, acpi_size size)
327{
328 struct acpi_ioremap *map;
329
330 list_for_each_entry_rcu(map, &acpi_ioremaps, list)
331 if (map->virt <= virt &&
332 virt + size <= map->virt + map->size)
333 return map;
334
335 return NULL;
336}
337
338#if defined(CONFIG_IA64) || defined(CONFIG_ARM64)
339/* ioremap will take care of cache attributes */
340#define should_use_kmap(pfn) 0
341#else
342#define should_use_kmap(pfn) page_is_ram(pfn)
343#endif
344
345static void __iomem *acpi_map(acpi_physical_address pg_off, unsigned long pg_sz)
346{
347 unsigned long pfn;
348
349 pfn = pg_off >> PAGE_SHIFT;
350 if (should_use_kmap(pfn)) {
351 if (pg_sz > PAGE_SIZE)
352 return NULL;
353 return (void __iomem __force *)kmap(pfn_to_page(pfn));
354 } else
355 return acpi_os_ioremap(pg_off, pg_sz);
356}
357
358static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr)
359{
360 unsigned long pfn;
361
362 pfn = pg_off >> PAGE_SHIFT;
363 if (should_use_kmap(pfn))
364 kunmap(pfn_to_page(pfn));
365 else
366 iounmap(vaddr);
367}
368
369/**
370 * acpi_os_map_iomem - Get a virtual address for a given physical address range.
371 * @phys: Start of the physical address range to map.
372 * @size: Size of the physical address range to map.
373 *
374 * Look up the given physical address range in the list of existing ACPI memory
375 * mappings. If found, get a reference to it and return a pointer to it (its
376 * virtual address). If not found, map it, add it to that list and return a
377 * pointer to it.
378 *
379 * During early init (when acpi_gbl_permanent_mmap has not been set yet) this
380 * routine simply calls __acpi_map_table() to get the job done.
381 */
382void __iomem *__init_refok
383acpi_os_map_iomem(acpi_physical_address phys, acpi_size size)
384{
385 struct acpi_ioremap *map;
386 void __iomem *virt;
387 acpi_physical_address pg_off;
388 acpi_size pg_sz;
389
390 if (phys > ULONG_MAX) {
391 printk(KERN_ERR PREFIX "Cannot map memory that high\n");
392 return NULL;
393 }
394
395 if (!acpi_gbl_permanent_mmap)
396 return __acpi_map_table((unsigned long)phys, size);
397
398 mutex_lock(&acpi_ioremap_lock);
399 /* Check if there's a suitable mapping already. */
400 map = acpi_map_lookup(phys, size);
401 if (map) {
402 map->refcount++;
403 goto out;
404 }
405
406 map = kzalloc(sizeof(*map), GFP_KERNEL);
407 if (!map) {
408 mutex_unlock(&acpi_ioremap_lock);
409 return NULL;
410 }
411
412 pg_off = round_down(phys, PAGE_SIZE);
413 pg_sz = round_up(phys + size, PAGE_SIZE) - pg_off;
414 virt = acpi_map(pg_off, pg_sz);
415 if (!virt) {
416 mutex_unlock(&acpi_ioremap_lock);
417 kfree(map);
418 return NULL;
419 }
420
421 INIT_LIST_HEAD(&map->list);
422 map->virt = virt;
423 map->phys = pg_off;
424 map->size = pg_sz;
425 map->refcount = 1;
426
427 list_add_tail_rcu(&map->list, &acpi_ioremaps);
428
429out:
430 mutex_unlock(&acpi_ioremap_lock);
431 return map->virt + (phys - map->phys);
432}
433EXPORT_SYMBOL_GPL(acpi_os_map_iomem);
434
435void *__init_refok
436acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
437{
438 return (void *)acpi_os_map_iomem(phys, size);
439}
440EXPORT_SYMBOL_GPL(acpi_os_map_memory);
441
442static void acpi_os_drop_map_ref(struct acpi_ioremap *map)
443{
444 if (!--map->refcount)
445 list_del_rcu(&map->list);
446}
447
448static void acpi_os_map_cleanup(struct acpi_ioremap *map)
449{
450 if (!map->refcount) {
451 synchronize_rcu_expedited();
452 acpi_unmap(map->phys, map->virt);
453 kfree(map);
454 }
455}
456
457/**
458 * acpi_os_unmap_iomem - Drop a memory mapping reference.
459 * @virt: Start of the address range to drop a reference to.
460 * @size: Size of the address range to drop a reference to.
461 *
462 * Look up the given virtual address range in the list of existing ACPI memory
463 * mappings, drop a reference to it and unmap it if there are no more active
464 * references to it.
465 *
466 * During early init (when acpi_gbl_permanent_mmap has not been set yet) this
467 * routine simply calls __acpi_unmap_table() to get the job done. Since
468 * __acpi_unmap_table() is an __init function, the __ref annotation is needed
469 * here.
470 */
471void __ref acpi_os_unmap_iomem(void __iomem *virt, acpi_size size)
472{
473 struct acpi_ioremap *map;
474
475 if (!acpi_gbl_permanent_mmap) {
476 __acpi_unmap_table(virt, size);
477 return;
478 }
479
480 mutex_lock(&acpi_ioremap_lock);
481 map = acpi_map_lookup_virt(virt, size);
482 if (!map) {
483 mutex_unlock(&acpi_ioremap_lock);
484 WARN(true, PREFIX "%s: bad address %p\n", __func__, virt);
485 return;
486 }
487 acpi_os_drop_map_ref(map);
488 mutex_unlock(&acpi_ioremap_lock);
489
490 acpi_os_map_cleanup(map);
491}
492EXPORT_SYMBOL_GPL(acpi_os_unmap_iomem);
493
494void __ref acpi_os_unmap_memory(void *virt, acpi_size size)
495{
496 return acpi_os_unmap_iomem((void __iomem *)virt, size);
497}
498EXPORT_SYMBOL_GPL(acpi_os_unmap_memory);
499
500void __init early_acpi_os_unmap_memory(void __iomem *virt, acpi_size size)
501{
502 if (!acpi_gbl_permanent_mmap)
503 __acpi_unmap_table(virt, size);
504}
505
506int acpi_os_map_generic_address(struct acpi_generic_address *gas)
507{
508 u64 addr;
509 void __iomem *virt;
510
511 if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
512 return 0;
513
514 /* Handle possible alignment issues */
515 memcpy(&addr, &gas->address, sizeof(addr));
516 if (!addr || !gas->bit_width)
517 return -EINVAL;
518
519 virt = acpi_os_map_iomem(addr, gas->bit_width / 8);
520 if (!virt)
521 return -EIO;
522
523 return 0;
524}
525EXPORT_SYMBOL(acpi_os_map_generic_address);
526
527void acpi_os_unmap_generic_address(struct acpi_generic_address *gas)
528{
529 u64 addr;
530 struct acpi_ioremap *map;
531
532 if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
533 return;
534
535 /* Handle possible alignment issues */
536 memcpy(&addr, &gas->address, sizeof(addr));
537 if (!addr || !gas->bit_width)
538 return;
539
540 mutex_lock(&acpi_ioremap_lock);
541 map = acpi_map_lookup(addr, gas->bit_width / 8);
542 if (!map) {
543 mutex_unlock(&acpi_ioremap_lock);
544 return;
545 }
546 acpi_os_drop_map_ref(map);
547 mutex_unlock(&acpi_ioremap_lock);
548
549 acpi_os_map_cleanup(map);
550}
551EXPORT_SYMBOL(acpi_os_unmap_generic_address);
552
553#ifdef ACPI_FUTURE_USAGE
554acpi_status
555acpi_os_get_physical_address(void *virt, acpi_physical_address * phys)
556{
557 if (!phys || !virt)
558 return AE_BAD_PARAMETER;
559
560 *phys = virt_to_phys(virt);
561
562 return AE_OK;
563}
564#endif
565
566#ifdef CONFIG_ACPI_REV_OVERRIDE_POSSIBLE
567static bool acpi_rev_override;
568
569int __init acpi_rev_override_setup(char *str)
570{
571 acpi_rev_override = true;
572 return 1;
573}
574__setup("acpi_rev_override", acpi_rev_override_setup);
575#else
576#define acpi_rev_override false
577#endif
578
579#define ACPI_MAX_OVERRIDE_LEN 100
580
581static char acpi_os_name[ACPI_MAX_OVERRIDE_LEN];
582
583acpi_status
584acpi_os_predefined_override(const struct acpi_predefined_names *init_val,
585 char **new_val)
586{
587 if (!init_val || !new_val)
588 return AE_BAD_PARAMETER;
589
590 *new_val = NULL;
591 if (!memcmp(init_val->name, "_OS_", 4) && strlen(acpi_os_name)) {
592 printk(KERN_INFO PREFIX "Overriding _OS definition to '%s'\n",
593 acpi_os_name);
594 *new_val = acpi_os_name;
595 }
596
597 if (!memcmp(init_val->name, "_REV", 4) && acpi_rev_override) {
598 printk(KERN_INFO PREFIX "Overriding _REV return value to 5\n");
599 *new_val = (char *)5;
600 }
601
602 return AE_OK;
603}
604
605static void acpi_table_taint(struct acpi_table_header *table)
606{
607 pr_warn(PREFIX
608 "Override [%4.4s-%8.8s], this is unsafe: tainting kernel\n",
609 table->signature, table->oem_table_id);
610 add_taint(TAINT_OVERRIDDEN_ACPI_TABLE, LOCKDEP_NOW_UNRELIABLE);
611}
612
613#ifdef CONFIG_ACPI_INITRD_TABLE_OVERRIDE
614#include <linux/earlycpio.h>
615#include <linux/memblock.h>
616
617static u64 acpi_tables_addr;
618static int all_tables_size;
619
620/* Copied from acpica/tbutils.c:acpi_tb_checksum() */
621static u8 __init acpi_table_checksum(u8 *buffer, u32 length)
622{
623 u8 sum = 0;
624 u8 *end = buffer + length;
625
626 while (buffer < end)
627 sum = (u8) (sum + *(buffer++));
628 return sum;
629}
630
631/* All but ACPI_SIG_RSDP and ACPI_SIG_FACS: */
632static const char * const table_sigs[] = {
633 ACPI_SIG_BERT, ACPI_SIG_CPEP, ACPI_SIG_ECDT, ACPI_SIG_EINJ,
634 ACPI_SIG_ERST, ACPI_SIG_HEST, ACPI_SIG_MADT, ACPI_SIG_MSCT,
635 ACPI_SIG_SBST, ACPI_SIG_SLIT, ACPI_SIG_SRAT, ACPI_SIG_ASF,
636 ACPI_SIG_BOOT, ACPI_SIG_DBGP, ACPI_SIG_DMAR, ACPI_SIG_HPET,
637 ACPI_SIG_IBFT, ACPI_SIG_IVRS, ACPI_SIG_MCFG, ACPI_SIG_MCHI,
638 ACPI_SIG_SLIC, ACPI_SIG_SPCR, ACPI_SIG_SPMI, ACPI_SIG_TCPA,
639 ACPI_SIG_UEFI, ACPI_SIG_WAET, ACPI_SIG_WDAT, ACPI_SIG_WDDT,
640 ACPI_SIG_WDRT, ACPI_SIG_DSDT, ACPI_SIG_FADT, ACPI_SIG_PSDT,
641 ACPI_SIG_RSDT, ACPI_SIG_XSDT, ACPI_SIG_SSDT, NULL };
642
643#define ACPI_HEADER_SIZE sizeof(struct acpi_table_header)
644
645#define ACPI_OVERRIDE_TABLES 64
646static struct cpio_data __initdata acpi_initrd_files[ACPI_OVERRIDE_TABLES];
647static DECLARE_BITMAP(acpi_initrd_installed, ACPI_OVERRIDE_TABLES);
648
649#define MAP_CHUNK_SIZE (NR_FIX_BTMAPS << PAGE_SHIFT)
650
651void __init acpi_initrd_override(void *data, size_t size)
652{
653 int sig, no, table_nr = 0, total_offset = 0;
654 long offset = 0;
655 struct acpi_table_header *table;
656 char cpio_path[32] = "kernel/firmware/acpi/";
657 struct cpio_data file;
658
659 if (data == NULL || size == 0)
660 return;
661
662 for (no = 0; no < ACPI_OVERRIDE_TABLES; no++) {
663 file = find_cpio_data(cpio_path, data, size, &offset);
664 if (!file.data)
665 break;
666
667 data += offset;
668 size -= offset;
669
670 if (file.size < sizeof(struct acpi_table_header)) {
671 pr_err("ACPI OVERRIDE: Table smaller than ACPI header [%s%s]\n",
672 cpio_path, file.name);
673 continue;
674 }
675
676 table = file.data;
677
678 for (sig = 0; table_sigs[sig]; sig++)
679 if (!memcmp(table->signature, table_sigs[sig], 4))
680 break;
681
682 if (!table_sigs[sig]) {
683 pr_err("ACPI OVERRIDE: Unknown signature [%s%s]\n",
684 cpio_path, file.name);
685 continue;
686 }
687 if (file.size != table->length) {
688 pr_err("ACPI OVERRIDE: File length does not match table length [%s%s]\n",
689 cpio_path, file.name);
690 continue;
691 }
692 if (acpi_table_checksum(file.data, table->length)) {
693 pr_err("ACPI OVERRIDE: Bad table checksum [%s%s]\n",
694 cpio_path, file.name);
695 continue;
696 }
697
698 pr_info("%4.4s ACPI table found in initrd [%s%s][0x%x]\n",
699 table->signature, cpio_path, file.name, table->length);
700
701 all_tables_size += table->length;
702 acpi_initrd_files[table_nr].data = file.data;
703 acpi_initrd_files[table_nr].size = file.size;
704 table_nr++;
705 }
706 if (table_nr == 0)
707 return;
708
709 acpi_tables_addr =
710 memblock_find_in_range(0, max_low_pfn_mapped << PAGE_SHIFT,
711 all_tables_size, PAGE_SIZE);
712 if (!acpi_tables_addr) {
713 WARN_ON(1);
714 return;
715 }
716 /*
717 * Only calling e820_add_reserve does not work and the
718 * tables are invalid (memory got used) later.
719 * memblock_reserve works as expected and the tables won't get modified.
720 * But it's not enough on X86 because ioremap will
721 * complain later (used by acpi_os_map_memory) that the pages
722 * that should get mapped are not marked "reserved".
723 * Both memblock_reserve and e820_add_region (via arch_reserve_mem_area)
724 * works fine.
725 */
726 memblock_reserve(acpi_tables_addr, all_tables_size);
727 arch_reserve_mem_area(acpi_tables_addr, all_tables_size);
728
729 /*
730 * early_ioremap only can remap 256k one time. If we map all
731 * tables one time, we will hit the limit. Need to map chunks
732 * one by one during copying the same as that in relocate_initrd().
733 */
734 for (no = 0; no < table_nr; no++) {
735 unsigned char *src_p = acpi_initrd_files[no].data;
736 phys_addr_t size = acpi_initrd_files[no].size;
737 phys_addr_t dest_addr = acpi_tables_addr + total_offset;
738 phys_addr_t slop, clen;
739 char *dest_p;
740
741 total_offset += size;
742
743 while (size) {
744 slop = dest_addr & ~PAGE_MASK;
745 clen = size;
746 if (clen > MAP_CHUNK_SIZE - slop)
747 clen = MAP_CHUNK_SIZE - slop;
748 dest_p = early_ioremap(dest_addr & PAGE_MASK,
749 clen + slop);
750 memcpy(dest_p + slop, src_p, clen);
751 early_iounmap(dest_p, clen + slop);
752 src_p += clen;
753 dest_addr += clen;
754 size -= clen;
755 }
756 }
757}
758
759acpi_status
760acpi_os_physical_table_override(struct acpi_table_header *existing_table,
761 acpi_physical_address *address, u32 *length)
762{
763 int table_offset = 0;
764 int table_index = 0;
765 struct acpi_table_header *table;
766 u32 table_length;
767
768 *length = 0;
769 *address = 0;
770 if (!acpi_tables_addr)
771 return AE_OK;
772
773 while (table_offset + ACPI_HEADER_SIZE <= all_tables_size) {
774 table = acpi_os_map_memory(acpi_tables_addr + table_offset,
775 ACPI_HEADER_SIZE);
776 if (table_offset + table->length > all_tables_size) {
777 acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
778 WARN_ON(1);
779 return AE_OK;
780 }
781
782 table_length = table->length;
783
784 /* Only override tables matched */
785 if (test_bit(table_index, acpi_initrd_installed) ||
786 memcmp(existing_table->signature, table->signature, 4) ||
787 memcmp(table->oem_table_id, existing_table->oem_table_id,
788 ACPI_OEM_TABLE_ID_SIZE)) {
789 acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
790 goto next_table;
791 }
792
793 *length = table_length;
794 *address = acpi_tables_addr + table_offset;
795 acpi_table_taint(existing_table);
796 acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
797 set_bit(table_index, acpi_initrd_installed);
798 break;
799
800next_table:
801 table_offset += table_length;
802 table_index++;
803 }
804 return AE_OK;
805}
806
807void __init acpi_initrd_initialize_tables(void)
808{
809 int table_offset = 0;
810 int table_index = 0;
811 u32 table_length;
812 struct acpi_table_header *table;
813
814 if (!acpi_tables_addr)
815 return;
816
817 while (table_offset + ACPI_HEADER_SIZE <= all_tables_size) {
818 table = acpi_os_map_memory(acpi_tables_addr + table_offset,
819 ACPI_HEADER_SIZE);
820 if (table_offset + table->length > all_tables_size) {
821 acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
822 WARN_ON(1);
823 return;
824 }
825
826 table_length = table->length;
827
828 /* Skip RSDT/XSDT which should only be used for override */
829 if (test_bit(table_index, acpi_initrd_installed) ||
830 ACPI_COMPARE_NAME(table->signature, ACPI_SIG_RSDT) ||
831 ACPI_COMPARE_NAME(table->signature, ACPI_SIG_XSDT)) {
832 acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
833 goto next_table;
834 }
835
836 acpi_table_taint(table);
837 acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
838 acpi_install_table(acpi_tables_addr + table_offset, TRUE);
839 set_bit(table_index, acpi_initrd_installed);
840next_table:
841 table_offset += table_length;
842 table_index++;
843 }
844}
845#else
846acpi_status
847acpi_os_physical_table_override(struct acpi_table_header *existing_table,
848 acpi_physical_address *address,
849 u32 *table_length)
850{
851 *table_length = 0;
852 *address = 0;
853 return AE_OK;
854}
855
856void __init acpi_initrd_initialize_tables(void)
857{
858}
859#endif /* CONFIG_ACPI_INITRD_TABLE_OVERRIDE */
860
861acpi_status
862acpi_os_table_override(struct acpi_table_header *existing_table,
863 struct acpi_table_header **new_table)
864{
865 if (!existing_table || !new_table)
866 return AE_BAD_PARAMETER;
867
868 *new_table = NULL;
869
870#ifdef CONFIG_ACPI_CUSTOM_DSDT
871 if (strncmp(existing_table->signature, "DSDT", 4) == 0)
872 *new_table = (struct acpi_table_header *)AmlCode;
873#endif
874 if (*new_table != NULL)
875 acpi_table_taint(existing_table);
876 return AE_OK;
877}
878
879static irqreturn_t acpi_irq(int irq, void *dev_id)
880{
881 u32 handled;
882
883 handled = (*acpi_irq_handler) (acpi_irq_context);
884
885 if (handled) {
886 acpi_irq_handled++;
887 return IRQ_HANDLED;
888 } else {
889 acpi_irq_not_handled++;
890 return IRQ_NONE;
891 }
892}
893
894acpi_status
895acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler,
896 void *context)
897{
898 unsigned int irq;
899
900 acpi_irq_stats_init();
901
902 /*
903 * ACPI interrupts different from the SCI in our copy of the FADT are
904 * not supported.
905 */
906 if (gsi != acpi_gbl_FADT.sci_interrupt)
907 return AE_BAD_PARAMETER;
908
909 if (acpi_irq_handler)
910 return AE_ALREADY_ACQUIRED;
911
912 if (acpi_gsi_to_irq(gsi, &irq) < 0) {
913 printk(KERN_ERR PREFIX "SCI (ACPI GSI %d) not registered\n",
914 gsi);
915 return AE_OK;
916 }
917
918 acpi_irq_handler = handler;
919 acpi_irq_context = context;
920 if (request_irq(irq, acpi_irq, IRQF_SHARED, "acpi", acpi_irq)) {
921 printk(KERN_ERR PREFIX "SCI (IRQ%d) allocation failed\n", irq);
922 acpi_irq_handler = NULL;
923 return AE_NOT_ACQUIRED;
924 }
925 acpi_sci_irq = irq;
926
927 return AE_OK;
928}
929
930acpi_status acpi_os_remove_interrupt_handler(u32 gsi, acpi_osd_handler handler)
931{
932 if (gsi != acpi_gbl_FADT.sci_interrupt || !acpi_sci_irq_valid())
933 return AE_BAD_PARAMETER;
934
935 free_irq(acpi_sci_irq, acpi_irq);
936 acpi_irq_handler = NULL;
937 acpi_sci_irq = INVALID_ACPI_IRQ;
938
939 return AE_OK;
940}
941
942/*
943 * Running in interpreter thread context, safe to sleep
944 */
945
946void acpi_os_sleep(u64 ms)
947{
948 msleep(ms);
949}
950
951void acpi_os_stall(u32 us)
952{
953 while (us) {
954 u32 delay = 1000;
955
956 if (delay > us)
957 delay = us;
958 udelay(delay);
959 touch_nmi_watchdog();
960 us -= delay;
961 }
962}
963
964/*
965 * Support ACPI 3.0 AML Timer operand
966 * Returns 64-bit free-running, monotonically increasing timer
967 * with 100ns granularity
968 */
969u64 acpi_os_get_timer(void)
970{
971 u64 time_ns = ktime_to_ns(ktime_get());
972 do_div(time_ns, 100);
973 return time_ns;
974}
975
976acpi_status acpi_os_read_port(acpi_io_address port, u32 * value, u32 width)
977{
978 u32 dummy;
979
980 if (!value)
981 value = &dummy;
982
983 *value = 0;
984 if (width <= 8) {
985 *(u8 *) value = inb(port);
986 } else if (width <= 16) {
987 *(u16 *) value = inw(port);
988 } else if (width <= 32) {
989 *(u32 *) value = inl(port);
990 } else {
991 BUG();
992 }
993
994 return AE_OK;
995}
996
997EXPORT_SYMBOL(acpi_os_read_port);
998
999acpi_status acpi_os_write_port(acpi_io_address port, u32 value, u32 width)
1000{
1001 if (width <= 8) {
1002 outb(value, port);
1003 } else if (width <= 16) {
1004 outw(value, port);
1005 } else if (width <= 32) {
1006 outl(value, port);
1007 } else {
1008 BUG();
1009 }
1010
1011 return AE_OK;
1012}
1013
1014EXPORT_SYMBOL(acpi_os_write_port);
1015
1016acpi_status
1017acpi_os_read_memory(acpi_physical_address phys_addr, u64 *value, u32 width)
1018{
1019 void __iomem *virt_addr;
1020 unsigned int size = width / 8;
1021 bool unmap = false;
1022 u64 dummy;
1023
1024 rcu_read_lock();
1025 virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
1026 if (!virt_addr) {
1027 rcu_read_unlock();
1028 virt_addr = acpi_os_ioremap(phys_addr, size);
1029 if (!virt_addr)
1030 return AE_BAD_ADDRESS;
1031 unmap = true;
1032 }
1033
1034 if (!value)
1035 value = &dummy;
1036
1037 switch (width) {
1038 case 8:
1039 *(u8 *) value = readb(virt_addr);
1040 break;
1041 case 16:
1042 *(u16 *) value = readw(virt_addr);
1043 break;
1044 case 32:
1045 *(u32 *) value = readl(virt_addr);
1046 break;
1047 case 64:
1048 *(u64 *) value = readq(virt_addr);
1049 break;
1050 default:
1051 BUG();
1052 }
1053
1054 if (unmap)
1055 iounmap(virt_addr);
1056 else
1057 rcu_read_unlock();
1058
1059 return AE_OK;
1060}
1061
1062acpi_status
1063acpi_os_write_memory(acpi_physical_address phys_addr, u64 value, u32 width)
1064{
1065 void __iomem *virt_addr;
1066 unsigned int size = width / 8;
1067 bool unmap = false;
1068
1069 rcu_read_lock();
1070 virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
1071 if (!virt_addr) {
1072 rcu_read_unlock();
1073 virt_addr = acpi_os_ioremap(phys_addr, size);
1074 if (!virt_addr)
1075 return AE_BAD_ADDRESS;
1076 unmap = true;
1077 }
1078
1079 switch (width) {
1080 case 8:
1081 writeb(value, virt_addr);
1082 break;
1083 case 16:
1084 writew(value, virt_addr);
1085 break;
1086 case 32:
1087 writel(value, virt_addr);
1088 break;
1089 case 64:
1090 writeq(value, virt_addr);
1091 break;
1092 default:
1093 BUG();
1094 }
1095
1096 if (unmap)
1097 iounmap(virt_addr);
1098 else
1099 rcu_read_unlock();
1100
1101 return AE_OK;
1102}
1103
1104acpi_status
1105acpi_os_read_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
1106 u64 *value, u32 width)
1107{
1108 int result, size;
1109 u32 value32;
1110
1111 if (!value)
1112 return AE_BAD_PARAMETER;
1113
1114 switch (width) {
1115 case 8:
1116 size = 1;
1117 break;
1118 case 16:
1119 size = 2;
1120 break;
1121 case 32:
1122 size = 4;
1123 break;
1124 default:
1125 return AE_ERROR;
1126 }
1127
1128 result = raw_pci_read(pci_id->segment, pci_id->bus,
1129 PCI_DEVFN(pci_id->device, pci_id->function),
1130 reg, size, &value32);
1131 *value = value32;
1132
1133 return (result ? AE_ERROR : AE_OK);
1134}
1135
1136acpi_status
1137acpi_os_write_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
1138 u64 value, u32 width)
1139{
1140 int result, size;
1141
1142 switch (width) {
1143 case 8:
1144 size = 1;
1145 break;
1146 case 16:
1147 size = 2;
1148 break;
1149 case 32:
1150 size = 4;
1151 break;
1152 default:
1153 return AE_ERROR;
1154 }
1155
1156 result = raw_pci_write(pci_id->segment, pci_id->bus,
1157 PCI_DEVFN(pci_id->device, pci_id->function),
1158 reg, size, value);
1159
1160 return (result ? AE_ERROR : AE_OK);
1161}
1162
1163static void acpi_os_execute_deferred(struct work_struct *work)
1164{
1165 struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work);
1166
1167 dpc->function(dpc->context);
1168 kfree(dpc);
1169}
1170
1171#ifdef CONFIG_ACPI_DEBUGGER
1172static struct acpi_debugger acpi_debugger;
1173static bool acpi_debugger_initialized;
1174
1175int acpi_register_debugger(struct module *owner,
1176 const struct acpi_debugger_ops *ops)
1177{
1178 int ret = 0;
1179
1180 mutex_lock(&acpi_debugger.lock);
1181 if (acpi_debugger.ops) {
1182 ret = -EBUSY;
1183 goto err_lock;
1184 }
1185
1186 acpi_debugger.owner = owner;
1187 acpi_debugger.ops = ops;
1188
1189err_lock:
1190 mutex_unlock(&acpi_debugger.lock);
1191 return ret;
1192}
1193EXPORT_SYMBOL(acpi_register_debugger);
1194
1195void acpi_unregister_debugger(const struct acpi_debugger_ops *ops)
1196{
1197 mutex_lock(&acpi_debugger.lock);
1198 if (ops == acpi_debugger.ops) {
1199 acpi_debugger.ops = NULL;
1200 acpi_debugger.owner = NULL;
1201 }
1202 mutex_unlock(&acpi_debugger.lock);
1203}
1204EXPORT_SYMBOL(acpi_unregister_debugger);
1205
1206int acpi_debugger_create_thread(acpi_osd_exec_callback function, void *context)
1207{
1208 int ret;
1209 int (*func)(acpi_osd_exec_callback, void *);
1210 struct module *owner;
1211
1212 if (!acpi_debugger_initialized)
1213 return -ENODEV;
1214 mutex_lock(&acpi_debugger.lock);
1215 if (!acpi_debugger.ops) {
1216 ret = -ENODEV;
1217 goto err_lock;
1218 }
1219 if (!try_module_get(acpi_debugger.owner)) {
1220 ret = -ENODEV;
1221 goto err_lock;
1222 }
1223 func = acpi_debugger.ops->create_thread;
1224 owner = acpi_debugger.owner;
1225 mutex_unlock(&acpi_debugger.lock);
1226
1227 ret = func(function, context);
1228
1229 mutex_lock(&acpi_debugger.lock);
1230 module_put(owner);
1231err_lock:
1232 mutex_unlock(&acpi_debugger.lock);
1233 return ret;
1234}
1235
1236ssize_t acpi_debugger_write_log(const char *msg)
1237{
1238 ssize_t ret;
1239 ssize_t (*func)(const char *);
1240 struct module *owner;
1241
1242 if (!acpi_debugger_initialized)
1243 return -ENODEV;
1244 mutex_lock(&acpi_debugger.lock);
1245 if (!acpi_debugger.ops) {
1246 ret = -ENODEV;
1247 goto err_lock;
1248 }
1249 if (!try_module_get(acpi_debugger.owner)) {
1250 ret = -ENODEV;
1251 goto err_lock;
1252 }
1253 func = acpi_debugger.ops->write_log;
1254 owner = acpi_debugger.owner;
1255 mutex_unlock(&acpi_debugger.lock);
1256
1257 ret = func(msg);
1258
1259 mutex_lock(&acpi_debugger.lock);
1260 module_put(owner);
1261err_lock:
1262 mutex_unlock(&acpi_debugger.lock);
1263 return ret;
1264}
1265
1266ssize_t acpi_debugger_read_cmd(char *buffer, size_t buffer_length)
1267{
1268 ssize_t ret;
1269 ssize_t (*func)(char *, size_t);
1270 struct module *owner;
1271
1272 if (!acpi_debugger_initialized)
1273 return -ENODEV;
1274 mutex_lock(&acpi_debugger.lock);
1275 if (!acpi_debugger.ops) {
1276 ret = -ENODEV;
1277 goto err_lock;
1278 }
1279 if (!try_module_get(acpi_debugger.owner)) {
1280 ret = -ENODEV;
1281 goto err_lock;
1282 }
1283 func = acpi_debugger.ops->read_cmd;
1284 owner = acpi_debugger.owner;
1285 mutex_unlock(&acpi_debugger.lock);
1286
1287 ret = func(buffer, buffer_length);
1288
1289 mutex_lock(&acpi_debugger.lock);
1290 module_put(owner);
1291err_lock:
1292 mutex_unlock(&acpi_debugger.lock);
1293 return ret;
1294}
1295
1296int acpi_debugger_wait_command_ready(void)
1297{
1298 int ret;
1299 int (*func)(bool, char *, size_t);
1300 struct module *owner;
1301
1302 if (!acpi_debugger_initialized)
1303 return -ENODEV;
1304 mutex_lock(&acpi_debugger.lock);
1305 if (!acpi_debugger.ops) {
1306 ret = -ENODEV;
1307 goto err_lock;
1308 }
1309 if (!try_module_get(acpi_debugger.owner)) {
1310 ret = -ENODEV;
1311 goto err_lock;
1312 }
1313 func = acpi_debugger.ops->wait_command_ready;
1314 owner = acpi_debugger.owner;
1315 mutex_unlock(&acpi_debugger.lock);
1316
1317 ret = func(acpi_gbl_method_executing,
1318 acpi_gbl_db_line_buf, ACPI_DB_LINE_BUFFER_SIZE);
1319
1320 mutex_lock(&acpi_debugger.lock);
1321 module_put(owner);
1322err_lock:
1323 mutex_unlock(&acpi_debugger.lock);
1324 return ret;
1325}
1326
1327int acpi_debugger_notify_command_complete(void)
1328{
1329 int ret;
1330 int (*func)(void);
1331 struct module *owner;
1332
1333 if (!acpi_debugger_initialized)
1334 return -ENODEV;
1335 mutex_lock(&acpi_debugger.lock);
1336 if (!acpi_debugger.ops) {
1337 ret = -ENODEV;
1338 goto err_lock;
1339 }
1340 if (!try_module_get(acpi_debugger.owner)) {
1341 ret = -ENODEV;
1342 goto err_lock;
1343 }
1344 func = acpi_debugger.ops->notify_command_complete;
1345 owner = acpi_debugger.owner;
1346 mutex_unlock(&acpi_debugger.lock);
1347
1348 ret = func();
1349
1350 mutex_lock(&acpi_debugger.lock);
1351 module_put(owner);
1352err_lock:
1353 mutex_unlock(&acpi_debugger.lock);
1354 return ret;
1355}
1356
1357int __init acpi_debugger_init(void)
1358{
1359 mutex_init(&acpi_debugger.lock);
1360 acpi_debugger_initialized = true;
1361 return 0;
1362}
1363#endif
1364
1365/*******************************************************************************
1366 *
1367 * FUNCTION: acpi_os_execute
1368 *
1369 * PARAMETERS: Type - Type of the callback
1370 * Function - Function to be executed
1371 * Context - Function parameters
1372 *
1373 * RETURN: Status
1374 *
1375 * DESCRIPTION: Depending on type, either queues function for deferred execution or
1376 * immediately executes function on a separate thread.
1377 *
1378 ******************************************************************************/
1379
1380acpi_status acpi_os_execute(acpi_execute_type type,
1381 acpi_osd_exec_callback function, void *context)
1382{
1383 acpi_status status = AE_OK;
1384 struct acpi_os_dpc *dpc;
1385 struct workqueue_struct *queue;
1386 int ret;
1387 ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
1388 "Scheduling function [%p(%p)] for deferred execution.\n",
1389 function, context));
1390
1391 if (type == OSL_DEBUGGER_MAIN_THREAD) {
1392 ret = acpi_debugger_create_thread(function, context);
1393 if (ret) {
1394 pr_err("Call to kthread_create() failed.\n");
1395 status = AE_ERROR;
1396 }
1397 goto out_thread;
1398 }
1399
1400 /*
1401 * Allocate/initialize DPC structure. Note that this memory will be
1402 * freed by the callee. The kernel handles the work_struct list in a
1403 * way that allows us to also free its memory inside the callee.
1404 * Because we may want to schedule several tasks with different
1405 * parameters we can't use the approach some kernel code uses of
1406 * having a static work_struct.
1407 */
1408
1409 dpc = kzalloc(sizeof(struct acpi_os_dpc), GFP_ATOMIC);
1410 if (!dpc)
1411 return AE_NO_MEMORY;
1412
1413 dpc->function = function;
1414 dpc->context = context;
1415
1416 /*
1417 * To prevent lockdep from complaining unnecessarily, make sure that
1418 * there is a different static lockdep key for each workqueue by using
1419 * INIT_WORK() for each of them separately.
1420 */
1421 if (type == OSL_NOTIFY_HANDLER) {
1422 queue = kacpi_notify_wq;
1423 INIT_WORK(&dpc->work, acpi_os_execute_deferred);
1424 } else if (type == OSL_GPE_HANDLER) {
1425 queue = kacpid_wq;
1426 INIT_WORK(&dpc->work, acpi_os_execute_deferred);
1427 } else {
1428 pr_err("Unsupported os_execute type %d.\n", type);
1429 status = AE_ERROR;
1430 }
1431
1432 if (ACPI_FAILURE(status))
1433 goto err_workqueue;
1434
1435 /*
1436 * On some machines, a software-initiated SMI causes corruption unless
1437 * the SMI runs on CPU 0. An SMI can be initiated by any AML, but
1438 * typically it's done in GPE-related methods that are run via
1439 * workqueues, so we can avoid the known corruption cases by always
1440 * queueing on CPU 0.
1441 */
1442 ret = queue_work_on(0, queue, &dpc->work);
1443 if (!ret) {
1444 printk(KERN_ERR PREFIX
1445 "Call to queue_work() failed.\n");
1446 status = AE_ERROR;
1447 }
1448err_workqueue:
1449 if (ACPI_FAILURE(status))
1450 kfree(dpc);
1451out_thread:
1452 return status;
1453}
1454EXPORT_SYMBOL(acpi_os_execute);
1455
1456void acpi_os_wait_events_complete(void)
1457{
1458 /*
1459 * Make sure the GPE handler or the fixed event handler is not used
1460 * on another CPU after removal.
1461 */
1462 if (acpi_sci_irq_valid())
1463 synchronize_hardirq(acpi_sci_irq);
1464 flush_workqueue(kacpid_wq);
1465 flush_workqueue(kacpi_notify_wq);
1466}
1467
1468struct acpi_hp_work {
1469 struct work_struct work;
1470 struct acpi_device *adev;
1471 u32 src;
1472};
1473
1474static void acpi_hotplug_work_fn(struct work_struct *work)
1475{
1476 struct acpi_hp_work *hpw = container_of(work, struct acpi_hp_work, work);
1477
1478 acpi_os_wait_events_complete();
1479 acpi_device_hotplug(hpw->adev, hpw->src);
1480 kfree(hpw);
1481}
1482
1483acpi_status acpi_hotplug_schedule(struct acpi_device *adev, u32 src)
1484{
1485 struct acpi_hp_work *hpw;
1486
1487 ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
1488 "Scheduling hotplug event (%p, %u) for deferred execution.\n",
1489 adev, src));
1490
1491 hpw = kmalloc(sizeof(*hpw), GFP_KERNEL);
1492 if (!hpw)
1493 return AE_NO_MEMORY;
1494
1495 INIT_WORK(&hpw->work, acpi_hotplug_work_fn);
1496 hpw->adev = adev;
1497 hpw->src = src;
1498 /*
1499 * We can't run hotplug code in kacpid_wq/kacpid_notify_wq etc., because
1500 * the hotplug code may call driver .remove() functions, which may
1501 * invoke flush_scheduled_work()/acpi_os_wait_events_complete() to flush
1502 * these workqueues.
1503 */
1504 if (!queue_work(kacpi_hotplug_wq, &hpw->work)) {
1505 kfree(hpw);
1506 return AE_ERROR;
1507 }
1508 return AE_OK;
1509}
1510
1511bool acpi_queue_hotplug_work(struct work_struct *work)
1512{
1513 return queue_work(kacpi_hotplug_wq, work);
1514}
1515
1516acpi_status
1517acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle)
1518{
1519 struct semaphore *sem = NULL;
1520
1521 sem = acpi_os_allocate_zeroed(sizeof(struct semaphore));
1522 if (!sem)
1523 return AE_NO_MEMORY;
1524
1525 sema_init(sem, initial_units);
1526
1527 *handle = (acpi_handle *) sem;
1528
1529 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Creating semaphore[%p|%d].\n",
1530 *handle, initial_units));
1531
1532 return AE_OK;
1533}
1534
1535/*
1536 * TODO: A better way to delete semaphores? Linux doesn't have a
1537 * 'delete_semaphore()' function -- may result in an invalid
1538 * pointer dereference for non-synchronized consumers. Should
1539 * we at least check for blocked threads and signal/cancel them?
1540 */
1541
1542acpi_status acpi_os_delete_semaphore(acpi_handle handle)
1543{
1544 struct semaphore *sem = (struct semaphore *)handle;
1545
1546 if (!sem)
1547 return AE_BAD_PARAMETER;
1548
1549 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Deleting semaphore[%p].\n", handle));
1550
1551 BUG_ON(!list_empty(&sem->wait_list));
1552 kfree(sem);
1553 sem = NULL;
1554
1555 return AE_OK;
1556}
1557
1558/*
1559 * TODO: Support for units > 1?
1560 */
1561acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout)
1562{
1563 acpi_status status = AE_OK;
1564 struct semaphore *sem = (struct semaphore *)handle;
1565 long jiffies;
1566 int ret = 0;
1567
1568 if (!acpi_os_initialized)
1569 return AE_OK;
1570
1571 if (!sem || (units < 1))
1572 return AE_BAD_PARAMETER;
1573
1574 if (units > 1)
1575 return AE_SUPPORT;
1576
1577 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Waiting for semaphore[%p|%d|%d]\n",
1578 handle, units, timeout));
1579
1580 if (timeout == ACPI_WAIT_FOREVER)
1581 jiffies = MAX_SCHEDULE_TIMEOUT;
1582 else
1583 jiffies = msecs_to_jiffies(timeout);
1584
1585 ret = down_timeout(sem, jiffies);
1586 if (ret)
1587 status = AE_TIME;
1588
1589 if (ACPI_FAILURE(status)) {
1590 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
1591 "Failed to acquire semaphore[%p|%d|%d], %s",
1592 handle, units, timeout,
1593 acpi_format_exception(status)));
1594 } else {
1595 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
1596 "Acquired semaphore[%p|%d|%d]", handle,
1597 units, timeout));
1598 }
1599
1600 return status;
1601}
1602
1603/*
1604 * TODO: Support for units > 1?
1605 */
1606acpi_status acpi_os_signal_semaphore(acpi_handle handle, u32 units)
1607{
1608 struct semaphore *sem = (struct semaphore *)handle;
1609
1610 if (!acpi_os_initialized)
1611 return AE_OK;
1612
1613 if (!sem || (units < 1))
1614 return AE_BAD_PARAMETER;
1615
1616 if (units > 1)
1617 return AE_SUPPORT;
1618
1619 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Signaling semaphore[%p|%d]\n", handle,
1620 units));
1621
1622 up(sem);
1623
1624 return AE_OK;
1625}
1626
1627acpi_status acpi_os_get_line(char *buffer, u32 buffer_length, u32 *bytes_read)
1628{
1629#ifdef ENABLE_DEBUGGER
1630 if (acpi_in_debugger) {
1631 u32 chars;
1632
1633 kdb_read(buffer, buffer_length);
1634
1635 /* remove the CR kdb includes */
1636 chars = strlen(buffer) - 1;
1637 buffer[chars] = '\0';
1638 }
1639#else
1640 int ret;
1641
1642 ret = acpi_debugger_read_cmd(buffer, buffer_length);
1643 if (ret < 0)
1644 return AE_ERROR;
1645 if (bytes_read)
1646 *bytes_read = ret;
1647#endif
1648
1649 return AE_OK;
1650}
1651EXPORT_SYMBOL(acpi_os_get_line);
1652
1653acpi_status acpi_os_wait_command_ready(void)
1654{
1655 int ret;
1656
1657 ret = acpi_debugger_wait_command_ready();
1658 if (ret < 0)
1659 return AE_ERROR;
1660 return AE_OK;
1661}
1662
1663acpi_status acpi_os_notify_command_complete(void)
1664{
1665 int ret;
1666
1667 ret = acpi_debugger_notify_command_complete();
1668 if (ret < 0)
1669 return AE_ERROR;
1670 return AE_OK;
1671}
1672
1673acpi_status acpi_os_signal(u32 function, void *info)
1674{
1675 switch (function) {
1676 case ACPI_SIGNAL_FATAL:
1677 printk(KERN_ERR PREFIX "Fatal opcode executed\n");
1678 break;
1679 case ACPI_SIGNAL_BREAKPOINT:
1680 /*
1681 * AML Breakpoint
1682 * ACPI spec. says to treat it as a NOP unless
1683 * you are debugging. So if/when we integrate
1684 * AML debugger into the kernel debugger its
1685 * hook will go here. But until then it is
1686 * not useful to print anything on breakpoints.
1687 */
1688 break;
1689 default:
1690 break;
1691 }
1692
1693 return AE_OK;
1694}
1695
1696static int __init acpi_os_name_setup(char *str)
1697{
1698 char *p = acpi_os_name;
1699 int count = ACPI_MAX_OVERRIDE_LEN - 1;
1700
1701 if (!str || !*str)
1702 return 0;
1703
1704 for (; count-- && *str; str++) {
1705 if (isalnum(*str) || *str == ' ' || *str == ':')
1706 *p++ = *str;
1707 else if (*str == '\'' || *str == '"')
1708 continue;
1709 else
1710 break;
1711 }
1712 *p = 0;
1713
1714 return 1;
1715
1716}
1717
1718__setup("acpi_os_name=", acpi_os_name_setup);
1719
1720#define OSI_STRING_LENGTH_MAX 64 /* arbitrary */
1721#define OSI_STRING_ENTRIES_MAX 16 /* arbitrary */
1722
1723struct osi_setup_entry {
1724 char string[OSI_STRING_LENGTH_MAX];
1725 bool enable;
1726};
1727
1728static struct osi_setup_entry
1729 osi_setup_entries[OSI_STRING_ENTRIES_MAX] __initdata = {
1730 {"Module Device", true},
1731 {"Processor Device", true},
1732 {"3.0 _SCP Extensions", true},
1733 {"Processor Aggregator Device", true},
1734};
1735
1736void __init acpi_osi_setup(char *str)
1737{
1738 struct osi_setup_entry *osi;
1739 bool enable = true;
1740 int i;
1741
1742 if (!acpi_gbl_create_osi_method)
1743 return;
1744
1745 if (str == NULL || *str == '\0') {
1746 printk(KERN_INFO PREFIX "_OSI method disabled\n");
1747 acpi_gbl_create_osi_method = FALSE;
1748 return;
1749 }
1750
1751 if (*str == '!') {
1752 str++;
1753 if (*str == '\0') {
1754 osi_linux.default_disabling = 1;
1755 return;
1756 } else if (*str == '*') {
1757 acpi_update_interfaces(ACPI_DISABLE_ALL_STRINGS);
1758 for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) {
1759 osi = &osi_setup_entries[i];
1760 osi->enable = false;
1761 }
1762 return;
1763 }
1764 enable = false;
1765 }
1766
1767 for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) {
1768 osi = &osi_setup_entries[i];
1769 if (!strcmp(osi->string, str)) {
1770 osi->enable = enable;
1771 break;
1772 } else if (osi->string[0] == '\0') {
1773 osi->enable = enable;
1774 strncpy(osi->string, str, OSI_STRING_LENGTH_MAX);
1775 break;
1776 }
1777 }
1778}
1779
1780static void __init set_osi_linux(unsigned int enable)
1781{
1782 if (osi_linux.enable != enable)
1783 osi_linux.enable = enable;
1784
1785 if (osi_linux.enable)
1786 acpi_osi_setup("Linux");
1787 else
1788 acpi_osi_setup("!Linux");
1789
1790 return;
1791}
1792
1793static void __init acpi_cmdline_osi_linux(unsigned int enable)
1794{
1795 osi_linux.cmdline = 1; /* cmdline set the default and override DMI */
1796 osi_linux.dmi = 0;
1797 set_osi_linux(enable);
1798
1799 return;
1800}
1801
1802void __init acpi_dmi_osi_linux(int enable, const struct dmi_system_id *d)
1803{
1804 printk(KERN_NOTICE PREFIX "DMI detected: %s\n", d->ident);
1805
1806 if (enable == -1)
1807 return;
1808
1809 osi_linux.dmi = 1; /* DMI knows that this box asks OSI(Linux) */
1810 set_osi_linux(enable);
1811
1812 return;
1813}
1814
1815/*
1816 * Modify the list of "OS Interfaces" reported to BIOS via _OSI
1817 *
1818 * empty string disables _OSI
1819 * string starting with '!' disables that string
1820 * otherwise string is added to list, augmenting built-in strings
1821 */
1822static void __init acpi_osi_setup_late(void)
1823{
1824 struct osi_setup_entry *osi;
1825 char *str;
1826 int i;
1827 acpi_status status;
1828
1829 if (osi_linux.default_disabling) {
1830 status = acpi_update_interfaces(ACPI_DISABLE_ALL_VENDOR_STRINGS);
1831
1832 if (ACPI_SUCCESS(status))
1833 printk(KERN_INFO PREFIX "Disabled all _OSI OS vendors\n");
1834 }
1835
1836 for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) {
1837 osi = &osi_setup_entries[i];
1838 str = osi->string;
1839
1840 if (*str == '\0')
1841 break;
1842 if (osi->enable) {
1843 status = acpi_install_interface(str);
1844
1845 if (ACPI_SUCCESS(status))
1846 printk(KERN_INFO PREFIX "Added _OSI(%s)\n", str);
1847 } else {
1848 status = acpi_remove_interface(str);
1849
1850 if (ACPI_SUCCESS(status))
1851 printk(KERN_INFO PREFIX "Deleted _OSI(%s)\n", str);
1852 }
1853 }
1854}
1855
1856static int __init osi_setup(char *str)
1857{
1858 if (str && !strcmp("Linux", str))
1859 acpi_cmdline_osi_linux(1);
1860 else if (str && !strcmp("!Linux", str))
1861 acpi_cmdline_osi_linux(0);
1862 else
1863 acpi_osi_setup(str);
1864
1865 return 1;
1866}
1867
1868__setup("acpi_osi=", osi_setup);
1869
1870/*
1871 * Disable the auto-serialization of named objects creation methods.
1872 *
1873 * This feature is enabled by default. It marks the AML control methods
1874 * that contain the opcodes to create named objects as "Serialized".
1875 */
1876static int __init acpi_no_auto_serialize_setup(char *str)
1877{
1878 acpi_gbl_auto_serialize_methods = FALSE;
1879 pr_info("ACPI: auto-serialization disabled\n");
1880
1881 return 1;
1882}
1883
1884__setup("acpi_no_auto_serialize", acpi_no_auto_serialize_setup);
1885
1886/* Check of resource interference between native drivers and ACPI
1887 * OperationRegions (SystemIO and System Memory only).
1888 * IO ports and memory declared in ACPI might be used by the ACPI subsystem
1889 * in arbitrary AML code and can interfere with legacy drivers.
1890 * acpi_enforce_resources= can be set to:
1891 *
1892 * - strict (default) (2)
1893 * -> further driver trying to access the resources will not load
1894 * - lax (1)
1895 * -> further driver trying to access the resources will load, but you
1896 * get a system message that something might go wrong...
1897 *
1898 * - no (0)
1899 * -> ACPI Operation Region resources will not be registered
1900 *
1901 */
1902#define ENFORCE_RESOURCES_STRICT 2
1903#define ENFORCE_RESOURCES_LAX 1
1904#define ENFORCE_RESOURCES_NO 0
1905
1906static unsigned int acpi_enforce_resources = ENFORCE_RESOURCES_STRICT;
1907
1908static int __init acpi_enforce_resources_setup(char *str)
1909{
1910 if (str == NULL || *str == '\0')
1911 return 0;
1912
1913 if (!strcmp("strict", str))
1914 acpi_enforce_resources = ENFORCE_RESOURCES_STRICT;
1915 else if (!strcmp("lax", str))
1916 acpi_enforce_resources = ENFORCE_RESOURCES_LAX;
1917 else if (!strcmp("no", str))
1918 acpi_enforce_resources = ENFORCE_RESOURCES_NO;
1919
1920 return 1;
1921}
1922
1923__setup("acpi_enforce_resources=", acpi_enforce_resources_setup);
1924
1925/* Check for resource conflicts between ACPI OperationRegions and native
1926 * drivers */
1927int acpi_check_resource_conflict(const struct resource *res)
1928{
1929 acpi_adr_space_type space_id;
1930 acpi_size length;
1931 u8 warn = 0;
1932 int clash = 0;
1933
1934 if (acpi_enforce_resources == ENFORCE_RESOURCES_NO)
1935 return 0;
1936 if (!(res->flags & IORESOURCE_IO) && !(res->flags & IORESOURCE_MEM))
1937 return 0;
1938
1939 if (res->flags & IORESOURCE_IO)
1940 space_id = ACPI_ADR_SPACE_SYSTEM_IO;
1941 else
1942 space_id = ACPI_ADR_SPACE_SYSTEM_MEMORY;
1943
1944 length = resource_size(res);
1945 if (acpi_enforce_resources != ENFORCE_RESOURCES_NO)
1946 warn = 1;
1947 clash = acpi_check_address_range(space_id, res->start, length, warn);
1948
1949 if (clash) {
1950 if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) {
1951 if (acpi_enforce_resources == ENFORCE_RESOURCES_LAX)
1952 printk(KERN_NOTICE "ACPI: This conflict may"
1953 " cause random problems and system"
1954 " instability\n");
1955 printk(KERN_INFO "ACPI: If an ACPI driver is available"
1956 " for this device, you should use it instead of"
1957 " the native driver\n");
1958 }
1959 if (acpi_enforce_resources == ENFORCE_RESOURCES_STRICT)
1960 return -EBUSY;
1961 }
1962 return 0;
1963}
1964EXPORT_SYMBOL(acpi_check_resource_conflict);
1965
1966int acpi_check_region(resource_size_t start, resource_size_t n,
1967 const char *name)
1968{
1969 struct resource res = {
1970 .start = start,
1971 .end = start + n - 1,
1972 .name = name,
1973 .flags = IORESOURCE_IO,
1974 };
1975
1976 return acpi_check_resource_conflict(&res);
1977}
1978EXPORT_SYMBOL(acpi_check_region);
1979
1980/*
1981 * Let drivers know whether the resource checks are effective
1982 */
1983int acpi_resources_are_enforced(void)
1984{
1985 return acpi_enforce_resources == ENFORCE_RESOURCES_STRICT;
1986}
1987EXPORT_SYMBOL(acpi_resources_are_enforced);
1988
1989bool acpi_osi_is_win8(void)
1990{
1991 return acpi_gbl_osi_data >= ACPI_OSI_WIN_8;
1992}
1993EXPORT_SYMBOL(acpi_osi_is_win8);
1994
1995/*
1996 * Deallocate the memory for a spinlock.
1997 */
1998void acpi_os_delete_lock(acpi_spinlock handle)
1999{
2000 ACPI_FREE(handle);
2001}
2002
2003/*
2004 * Acquire a spinlock.
2005 *
2006 * handle is a pointer to the spinlock_t.
2007 */
2008
2009acpi_cpu_flags acpi_os_acquire_lock(acpi_spinlock lockp)
2010{
2011 acpi_cpu_flags flags;
2012 spin_lock_irqsave(lockp, flags);
2013 return flags;
2014}
2015
2016/*
2017 * Release a spinlock. See above.
2018 */
2019
2020void acpi_os_release_lock(acpi_spinlock lockp, acpi_cpu_flags flags)
2021{
2022 spin_unlock_irqrestore(lockp, flags);
2023}
2024
2025#ifndef ACPI_USE_LOCAL_CACHE
2026
2027/*******************************************************************************
2028 *
2029 * FUNCTION: acpi_os_create_cache
2030 *
2031 * PARAMETERS: name - Ascii name for the cache
2032 * size - Size of each cached object
2033 * depth - Maximum depth of the cache (in objects) <ignored>
2034 * cache - Where the new cache object is returned
2035 *
2036 * RETURN: status
2037 *
2038 * DESCRIPTION: Create a cache object
2039 *
2040 ******************************************************************************/
2041
2042acpi_status
2043acpi_os_create_cache(char *name, u16 size, u16 depth, acpi_cache_t ** cache)
2044{
2045 *cache = kmem_cache_create(name, size, 0, 0, NULL);
2046 if (*cache == NULL)
2047 return AE_ERROR;
2048 else
2049 return AE_OK;
2050}
2051
2052/*******************************************************************************
2053 *
2054 * FUNCTION: acpi_os_purge_cache
2055 *
2056 * PARAMETERS: Cache - Handle to cache object
2057 *
2058 * RETURN: Status
2059 *
2060 * DESCRIPTION: Free all objects within the requested cache.
2061 *
2062 ******************************************************************************/
2063
2064acpi_status acpi_os_purge_cache(acpi_cache_t * cache)
2065{
2066 kmem_cache_shrink(cache);
2067 return (AE_OK);
2068}
2069
2070/*******************************************************************************
2071 *
2072 * FUNCTION: acpi_os_delete_cache
2073 *
2074 * PARAMETERS: Cache - Handle to cache object
2075 *
2076 * RETURN: Status
2077 *
2078 * DESCRIPTION: Free all objects within the requested cache and delete the
2079 * cache object.
2080 *
2081 ******************************************************************************/
2082
2083acpi_status acpi_os_delete_cache(acpi_cache_t * cache)
2084{
2085 kmem_cache_destroy(cache);
2086 return (AE_OK);
2087}
2088
2089/*******************************************************************************
2090 *
2091 * FUNCTION: acpi_os_release_object
2092 *
2093 * PARAMETERS: Cache - Handle to cache object
2094 * Object - The object to be released
2095 *
2096 * RETURN: None
2097 *
2098 * DESCRIPTION: Release an object to the specified cache. If cache is full,
2099 * the object is deleted.
2100 *
2101 ******************************************************************************/
2102
2103acpi_status acpi_os_release_object(acpi_cache_t * cache, void *object)
2104{
2105 kmem_cache_free(cache, object);
2106 return (AE_OK);
2107}
2108#endif
2109
2110static int __init acpi_no_static_ssdt_setup(char *s)
2111{
2112 acpi_gbl_disable_ssdt_table_install = TRUE;
2113 pr_info("ACPI: static SSDT installation disabled\n");
2114
2115 return 0;
2116}
2117
2118early_param("acpi_no_static_ssdt", acpi_no_static_ssdt_setup);
2119
2120static int __init acpi_disable_return_repair(char *s)
2121{
2122 printk(KERN_NOTICE PREFIX
2123 "ACPI: Predefined validation mechanism disabled\n");
2124 acpi_gbl_disable_auto_repair = TRUE;
2125
2126 return 1;
2127}
2128
2129__setup("acpica_no_return_repair", acpi_disable_return_repair);
2130
2131acpi_status __init acpi_os_initialize(void)
2132{
2133 acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1a_event_block);
2134 acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1b_event_block);
2135 acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe0_block);
2136 acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe1_block);
2137 if (acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER) {
2138 /*
2139 * Use acpi_os_map_generic_address to pre-map the reset
2140 * register if it's in system memory.
2141 */
2142 int rv;
2143
2144 rv = acpi_os_map_generic_address(&acpi_gbl_FADT.reset_register);
2145 pr_debug(PREFIX "%s: map reset_reg status %d\n", __func__, rv);
2146 }
2147 acpi_os_initialized = true;
2148
2149 return AE_OK;
2150}
2151
2152acpi_status __init acpi_os_initialize1(void)
2153{
2154 kacpid_wq = alloc_workqueue("kacpid", 0, 1);
2155 kacpi_notify_wq = alloc_workqueue("kacpi_notify", 0, 1);
2156 kacpi_hotplug_wq = alloc_ordered_workqueue("kacpi_hotplug", 0);
2157 BUG_ON(!kacpid_wq);
2158 BUG_ON(!kacpi_notify_wq);
2159 BUG_ON(!kacpi_hotplug_wq);
2160 acpi_install_interface_handler(acpi_osi_handler);
2161 acpi_osi_setup_late();
2162 return AE_OK;
2163}
2164
2165acpi_status acpi_os_terminate(void)
2166{
2167 if (acpi_irq_handler) {
2168 acpi_os_remove_interrupt_handler(acpi_gbl_FADT.sci_interrupt,
2169 acpi_irq_handler);
2170 }
2171
2172 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe1_block);
2173 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe0_block);
2174 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1b_event_block);
2175 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1a_event_block);
2176 if (acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER)
2177 acpi_os_unmap_generic_address(&acpi_gbl_FADT.reset_register);
2178
2179 destroy_workqueue(kacpid_wq);
2180 destroy_workqueue(kacpi_notify_wq);
2181 destroy_workqueue(kacpi_hotplug_wq);
2182
2183 return AE_OK;
2184}
2185
2186acpi_status acpi_os_prepare_sleep(u8 sleep_state, u32 pm1a_control,
2187 u32 pm1b_control)
2188{
2189 int rc = 0;
2190 if (__acpi_os_prepare_sleep)
2191 rc = __acpi_os_prepare_sleep(sleep_state,
2192 pm1a_control, pm1b_control);
2193 if (rc < 0)
2194 return AE_ERROR;
2195 else if (rc > 0)
2196 return AE_CTRL_SKIP;
2197
2198 return AE_OK;
2199}
2200
2201void acpi_os_set_prepare_sleep(int (*func)(u8 sleep_state,
2202 u32 pm1a_ctrl, u32 pm1b_ctrl))
2203{
2204 __acpi_os_prepare_sleep = func;
2205}
2206
2207acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state, u32 val_a,
2208 u32 val_b)
2209{
2210 int rc = 0;
2211 if (__acpi_os_prepare_extended_sleep)
2212 rc = __acpi_os_prepare_extended_sleep(sleep_state,
2213 val_a, val_b);
2214 if (rc < 0)
2215 return AE_ERROR;
2216 else if (rc > 0)
2217 return AE_CTRL_SKIP;
2218
2219 return AE_OK;
2220}
2221
2222void acpi_os_set_prepare_extended_sleep(int (*func)(u8 sleep_state,
2223 u32 val_a, u32 val_b))
2224{
2225 __acpi_os_prepare_extended_sleep = func;
2226}
1/*
2 * acpi_osl.c - OS-dependent functions ($Revision: 83 $)
3 *
4 * Copyright (C) 2000 Andrew Henroid
5 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
6 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
7 * Copyright (c) 2008 Intel Corporation
8 * Author: Matthew Wilcox <willy@linux.intel.com>
9 *
10 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 *
26 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
27 *
28 */
29
30#include <linux/module.h>
31#include <linux/kernel.h>
32#include <linux/slab.h>
33#include <linux/mm.h>
34#include <linux/highmem.h>
35#include <linux/pci.h>
36#include <linux/interrupt.h>
37#include <linux/kmod.h>
38#include <linux/delay.h>
39#include <linux/workqueue.h>
40#include <linux/nmi.h>
41#include <linux/acpi.h>
42#include <linux/acpi_io.h>
43#include <linux/efi.h>
44#include <linux/ioport.h>
45#include <linux/list.h>
46#include <linux/jiffies.h>
47#include <linux/semaphore.h>
48
49#include <asm/io.h>
50#include <asm/uaccess.h>
51
52#include <acpi/acpi.h>
53#include <acpi/acpi_bus.h>
54#include <acpi/processor.h>
55
56#define _COMPONENT ACPI_OS_SERVICES
57ACPI_MODULE_NAME("osl");
58#define PREFIX "ACPI: "
59struct acpi_os_dpc {
60 acpi_osd_exec_callback function;
61 void *context;
62 struct work_struct work;
63 int wait;
64};
65
66#ifdef CONFIG_ACPI_CUSTOM_DSDT
67#include CONFIG_ACPI_CUSTOM_DSDT_FILE
68#endif
69
70#ifdef ENABLE_DEBUGGER
71#include <linux/kdb.h>
72
73/* stuff for debugger support */
74int acpi_in_debugger;
75EXPORT_SYMBOL(acpi_in_debugger);
76
77extern char line_buf[80];
78#endif /*ENABLE_DEBUGGER */
79
80static int (*__acpi_os_prepare_sleep)(u8 sleep_state, u32 pm1a_ctrl,
81 u32 pm1b_ctrl);
82
83static acpi_osd_handler acpi_irq_handler;
84static void *acpi_irq_context;
85static struct workqueue_struct *kacpid_wq;
86static struct workqueue_struct *kacpi_notify_wq;
87struct workqueue_struct *kacpi_hotplug_wq;
88EXPORT_SYMBOL(kacpi_hotplug_wq);
89
90/*
91 * This list of permanent mappings is for memory that may be accessed from
92 * interrupt context, where we can't do the ioremap().
93 */
94struct acpi_ioremap {
95 struct list_head list;
96 void __iomem *virt;
97 acpi_physical_address phys;
98 acpi_size size;
99 unsigned long refcount;
100};
101
102static LIST_HEAD(acpi_ioremaps);
103static DEFINE_MUTEX(acpi_ioremap_lock);
104
105static void __init acpi_osi_setup_late(void);
106
107/*
108 * The story of _OSI(Linux)
109 *
110 * From pre-history through Linux-2.6.22,
111 * Linux responded TRUE upon a BIOS OSI(Linux) query.
112 *
113 * Unfortunately, reference BIOS writers got wind of this
114 * and put OSI(Linux) in their example code, quickly exposing
115 * this string as ill-conceived and opening the door to
116 * an un-bounded number of BIOS incompatibilities.
117 *
118 * For example, OSI(Linux) was used on resume to re-POST a
119 * video card on one system, because Linux at that time
120 * could not do a speedy restore in its native driver.
121 * But then upon gaining quick native restore capability,
122 * Linux has no way to tell the BIOS to skip the time-consuming
123 * POST -- putting Linux at a permanent performance disadvantage.
124 * On another system, the BIOS writer used OSI(Linux)
125 * to infer native OS support for IPMI! On other systems,
126 * OSI(Linux) simply got in the way of Linux claiming to
127 * be compatible with other operating systems, exposing
128 * BIOS issues such as skipped device initialization.
129 *
130 * So "Linux" turned out to be a really poor chose of
131 * OSI string, and from Linux-2.6.23 onward we respond FALSE.
132 *
133 * BIOS writers should NOT query _OSI(Linux) on future systems.
134 * Linux will complain on the console when it sees it, and return FALSE.
135 * To get Linux to return TRUE for your system will require
136 * a kernel source update to add a DMI entry,
137 * or boot with "acpi_osi=Linux"
138 */
139
140static struct osi_linux {
141 unsigned int enable:1;
142 unsigned int dmi:1;
143 unsigned int cmdline:1;
144} osi_linux = {0, 0, 0};
145
146static u32 acpi_osi_handler(acpi_string interface, u32 supported)
147{
148 if (!strcmp("Linux", interface)) {
149
150 printk_once(KERN_NOTICE FW_BUG PREFIX
151 "BIOS _OSI(Linux) query %s%s\n",
152 osi_linux.enable ? "honored" : "ignored",
153 osi_linux.cmdline ? " via cmdline" :
154 osi_linux.dmi ? " via DMI" : "");
155 }
156
157 return supported;
158}
159
160static void __init acpi_request_region (struct acpi_generic_address *gas,
161 unsigned int length, char *desc)
162{
163 u64 addr;
164
165 /* Handle possible alignment issues */
166 memcpy(&addr, &gas->address, sizeof(addr));
167 if (!addr || !length)
168 return;
169
170 /* Resources are never freed */
171 if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO)
172 request_region(addr, length, desc);
173 else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
174 request_mem_region(addr, length, desc);
175}
176
177static int __init acpi_reserve_resources(void)
178{
179 acpi_request_region(&acpi_gbl_FADT.xpm1a_event_block, acpi_gbl_FADT.pm1_event_length,
180 "ACPI PM1a_EVT_BLK");
181
182 acpi_request_region(&acpi_gbl_FADT.xpm1b_event_block, acpi_gbl_FADT.pm1_event_length,
183 "ACPI PM1b_EVT_BLK");
184
185 acpi_request_region(&acpi_gbl_FADT.xpm1a_control_block, acpi_gbl_FADT.pm1_control_length,
186 "ACPI PM1a_CNT_BLK");
187
188 acpi_request_region(&acpi_gbl_FADT.xpm1b_control_block, acpi_gbl_FADT.pm1_control_length,
189 "ACPI PM1b_CNT_BLK");
190
191 if (acpi_gbl_FADT.pm_timer_length == 4)
192 acpi_request_region(&acpi_gbl_FADT.xpm_timer_block, 4, "ACPI PM_TMR");
193
194 acpi_request_region(&acpi_gbl_FADT.xpm2_control_block, acpi_gbl_FADT.pm2_control_length,
195 "ACPI PM2_CNT_BLK");
196
197 /* Length of GPE blocks must be a non-negative multiple of 2 */
198
199 if (!(acpi_gbl_FADT.gpe0_block_length & 0x1))
200 acpi_request_region(&acpi_gbl_FADT.xgpe0_block,
201 acpi_gbl_FADT.gpe0_block_length, "ACPI GPE0_BLK");
202
203 if (!(acpi_gbl_FADT.gpe1_block_length & 0x1))
204 acpi_request_region(&acpi_gbl_FADT.xgpe1_block,
205 acpi_gbl_FADT.gpe1_block_length, "ACPI GPE1_BLK");
206
207 return 0;
208}
209device_initcall(acpi_reserve_resources);
210
211void acpi_os_printf(const char *fmt, ...)
212{
213 va_list args;
214 va_start(args, fmt);
215 acpi_os_vprintf(fmt, args);
216 va_end(args);
217}
218
219void acpi_os_vprintf(const char *fmt, va_list args)
220{
221 static char buffer[512];
222
223 vsprintf(buffer, fmt, args);
224
225#ifdef ENABLE_DEBUGGER
226 if (acpi_in_debugger) {
227 kdb_printf("%s", buffer);
228 } else {
229 printk(KERN_CONT "%s", buffer);
230 }
231#else
232 printk(KERN_CONT "%s", buffer);
233#endif
234}
235
236#ifdef CONFIG_KEXEC
237static unsigned long acpi_rsdp;
238static int __init setup_acpi_rsdp(char *arg)
239{
240 acpi_rsdp = simple_strtoul(arg, NULL, 16);
241 return 0;
242}
243early_param("acpi_rsdp", setup_acpi_rsdp);
244#endif
245
246acpi_physical_address __init acpi_os_get_root_pointer(void)
247{
248#ifdef CONFIG_KEXEC
249 if (acpi_rsdp)
250 return acpi_rsdp;
251#endif
252
253 if (efi_enabled) {
254 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
255 return efi.acpi20;
256 else if (efi.acpi != EFI_INVALID_TABLE_ADDR)
257 return efi.acpi;
258 else {
259 printk(KERN_ERR PREFIX
260 "System description tables not found\n");
261 return 0;
262 }
263 } else {
264 acpi_physical_address pa = 0;
265
266 acpi_find_root_pointer(&pa);
267 return pa;
268 }
269}
270
271/* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
272static struct acpi_ioremap *
273acpi_map_lookup(acpi_physical_address phys, acpi_size size)
274{
275 struct acpi_ioremap *map;
276
277 list_for_each_entry_rcu(map, &acpi_ioremaps, list)
278 if (map->phys <= phys &&
279 phys + size <= map->phys + map->size)
280 return map;
281
282 return NULL;
283}
284
285/* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
286static void __iomem *
287acpi_map_vaddr_lookup(acpi_physical_address phys, unsigned int size)
288{
289 struct acpi_ioremap *map;
290
291 map = acpi_map_lookup(phys, size);
292 if (map)
293 return map->virt + (phys - map->phys);
294
295 return NULL;
296}
297
298void __iomem *acpi_os_get_iomem(acpi_physical_address phys, unsigned int size)
299{
300 struct acpi_ioremap *map;
301 void __iomem *virt = NULL;
302
303 mutex_lock(&acpi_ioremap_lock);
304 map = acpi_map_lookup(phys, size);
305 if (map) {
306 virt = map->virt + (phys - map->phys);
307 map->refcount++;
308 }
309 mutex_unlock(&acpi_ioremap_lock);
310 return virt;
311}
312EXPORT_SYMBOL_GPL(acpi_os_get_iomem);
313
314/* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
315static struct acpi_ioremap *
316acpi_map_lookup_virt(void __iomem *virt, acpi_size size)
317{
318 struct acpi_ioremap *map;
319
320 list_for_each_entry_rcu(map, &acpi_ioremaps, list)
321 if (map->virt <= virt &&
322 virt + size <= map->virt + map->size)
323 return map;
324
325 return NULL;
326}
327
328#ifndef CONFIG_IA64
329#define should_use_kmap(pfn) page_is_ram(pfn)
330#else
331/* ioremap will take care of cache attributes */
332#define should_use_kmap(pfn) 0
333#endif
334
335static void __iomem *acpi_map(acpi_physical_address pg_off, unsigned long pg_sz)
336{
337 unsigned long pfn;
338
339 pfn = pg_off >> PAGE_SHIFT;
340 if (should_use_kmap(pfn)) {
341 if (pg_sz > PAGE_SIZE)
342 return NULL;
343 return (void __iomem __force *)kmap(pfn_to_page(pfn));
344 } else
345 return acpi_os_ioremap(pg_off, pg_sz);
346}
347
348static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr)
349{
350 unsigned long pfn;
351
352 pfn = pg_off >> PAGE_SHIFT;
353 if (should_use_kmap(pfn))
354 kunmap(pfn_to_page(pfn));
355 else
356 iounmap(vaddr);
357}
358
359void __iomem *__init_refok
360acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
361{
362 struct acpi_ioremap *map;
363 void __iomem *virt;
364 acpi_physical_address pg_off;
365 acpi_size pg_sz;
366
367 if (phys > ULONG_MAX) {
368 printk(KERN_ERR PREFIX "Cannot map memory that high\n");
369 return NULL;
370 }
371
372 if (!acpi_gbl_permanent_mmap)
373 return __acpi_map_table((unsigned long)phys, size);
374
375 mutex_lock(&acpi_ioremap_lock);
376 /* Check if there's a suitable mapping already. */
377 map = acpi_map_lookup(phys, size);
378 if (map) {
379 map->refcount++;
380 goto out;
381 }
382
383 map = kzalloc(sizeof(*map), GFP_KERNEL);
384 if (!map) {
385 mutex_unlock(&acpi_ioremap_lock);
386 return NULL;
387 }
388
389 pg_off = round_down(phys, PAGE_SIZE);
390 pg_sz = round_up(phys + size, PAGE_SIZE) - pg_off;
391 virt = acpi_map(pg_off, pg_sz);
392 if (!virt) {
393 mutex_unlock(&acpi_ioremap_lock);
394 kfree(map);
395 return NULL;
396 }
397
398 INIT_LIST_HEAD(&map->list);
399 map->virt = virt;
400 map->phys = pg_off;
401 map->size = pg_sz;
402 map->refcount = 1;
403
404 list_add_tail_rcu(&map->list, &acpi_ioremaps);
405
406 out:
407 mutex_unlock(&acpi_ioremap_lock);
408 return map->virt + (phys - map->phys);
409}
410EXPORT_SYMBOL_GPL(acpi_os_map_memory);
411
412static void acpi_os_drop_map_ref(struct acpi_ioremap *map)
413{
414 if (!--map->refcount)
415 list_del_rcu(&map->list);
416}
417
418static void acpi_os_map_cleanup(struct acpi_ioremap *map)
419{
420 if (!map->refcount) {
421 synchronize_rcu();
422 acpi_unmap(map->phys, map->virt);
423 kfree(map);
424 }
425}
426
427void __ref acpi_os_unmap_memory(void __iomem *virt, acpi_size size)
428{
429 struct acpi_ioremap *map;
430
431 if (!acpi_gbl_permanent_mmap) {
432 __acpi_unmap_table(virt, size);
433 return;
434 }
435
436 mutex_lock(&acpi_ioremap_lock);
437 map = acpi_map_lookup_virt(virt, size);
438 if (!map) {
439 mutex_unlock(&acpi_ioremap_lock);
440 WARN(true, PREFIX "%s: bad address %p\n", __func__, virt);
441 return;
442 }
443 acpi_os_drop_map_ref(map);
444 mutex_unlock(&acpi_ioremap_lock);
445
446 acpi_os_map_cleanup(map);
447}
448EXPORT_SYMBOL_GPL(acpi_os_unmap_memory);
449
450void __init early_acpi_os_unmap_memory(void __iomem *virt, acpi_size size)
451{
452 if (!acpi_gbl_permanent_mmap)
453 __acpi_unmap_table(virt, size);
454}
455
456int acpi_os_map_generic_address(struct acpi_generic_address *gas)
457{
458 u64 addr;
459 void __iomem *virt;
460
461 if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
462 return 0;
463
464 /* Handle possible alignment issues */
465 memcpy(&addr, &gas->address, sizeof(addr));
466 if (!addr || !gas->bit_width)
467 return -EINVAL;
468
469 virt = acpi_os_map_memory(addr, gas->bit_width / 8);
470 if (!virt)
471 return -EIO;
472
473 return 0;
474}
475EXPORT_SYMBOL(acpi_os_map_generic_address);
476
477void acpi_os_unmap_generic_address(struct acpi_generic_address *gas)
478{
479 u64 addr;
480 struct acpi_ioremap *map;
481
482 if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
483 return;
484
485 /* Handle possible alignment issues */
486 memcpy(&addr, &gas->address, sizeof(addr));
487 if (!addr || !gas->bit_width)
488 return;
489
490 mutex_lock(&acpi_ioremap_lock);
491 map = acpi_map_lookup(addr, gas->bit_width / 8);
492 if (!map) {
493 mutex_unlock(&acpi_ioremap_lock);
494 return;
495 }
496 acpi_os_drop_map_ref(map);
497 mutex_unlock(&acpi_ioremap_lock);
498
499 acpi_os_map_cleanup(map);
500}
501EXPORT_SYMBOL(acpi_os_unmap_generic_address);
502
503#ifdef ACPI_FUTURE_USAGE
504acpi_status
505acpi_os_get_physical_address(void *virt, acpi_physical_address * phys)
506{
507 if (!phys || !virt)
508 return AE_BAD_PARAMETER;
509
510 *phys = virt_to_phys(virt);
511
512 return AE_OK;
513}
514#endif
515
516#define ACPI_MAX_OVERRIDE_LEN 100
517
518static char acpi_os_name[ACPI_MAX_OVERRIDE_LEN];
519
520acpi_status
521acpi_os_predefined_override(const struct acpi_predefined_names *init_val,
522 acpi_string * new_val)
523{
524 if (!init_val || !new_val)
525 return AE_BAD_PARAMETER;
526
527 *new_val = NULL;
528 if (!memcmp(init_val->name, "_OS_", 4) && strlen(acpi_os_name)) {
529 printk(KERN_INFO PREFIX "Overriding _OS definition to '%s'\n",
530 acpi_os_name);
531 *new_val = acpi_os_name;
532 }
533
534 return AE_OK;
535}
536
537acpi_status
538acpi_os_table_override(struct acpi_table_header * existing_table,
539 struct acpi_table_header ** new_table)
540{
541 if (!existing_table || !new_table)
542 return AE_BAD_PARAMETER;
543
544 *new_table = NULL;
545
546#ifdef CONFIG_ACPI_CUSTOM_DSDT
547 if (strncmp(existing_table->signature, "DSDT", 4) == 0)
548 *new_table = (struct acpi_table_header *)AmlCode;
549#endif
550 if (*new_table != NULL) {
551 printk(KERN_WARNING PREFIX "Override [%4.4s-%8.8s], "
552 "this is unsafe: tainting kernel\n",
553 existing_table->signature,
554 existing_table->oem_table_id);
555 add_taint(TAINT_OVERRIDDEN_ACPI_TABLE);
556 }
557 return AE_OK;
558}
559
560acpi_status
561acpi_os_physical_table_override(struct acpi_table_header *existing_table,
562 acpi_physical_address * new_address,
563 u32 *new_table_length)
564{
565 return AE_SUPPORT;
566}
567
568
569static irqreturn_t acpi_irq(int irq, void *dev_id)
570{
571 u32 handled;
572
573 handled = (*acpi_irq_handler) (acpi_irq_context);
574
575 if (handled) {
576 acpi_irq_handled++;
577 return IRQ_HANDLED;
578 } else {
579 acpi_irq_not_handled++;
580 return IRQ_NONE;
581 }
582}
583
584acpi_status
585acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler,
586 void *context)
587{
588 unsigned int irq;
589
590 acpi_irq_stats_init();
591
592 /*
593 * ACPI interrupts different from the SCI in our copy of the FADT are
594 * not supported.
595 */
596 if (gsi != acpi_gbl_FADT.sci_interrupt)
597 return AE_BAD_PARAMETER;
598
599 if (acpi_irq_handler)
600 return AE_ALREADY_ACQUIRED;
601
602 if (acpi_gsi_to_irq(gsi, &irq) < 0) {
603 printk(KERN_ERR PREFIX "SCI (ACPI GSI %d) not registered\n",
604 gsi);
605 return AE_OK;
606 }
607
608 acpi_irq_handler = handler;
609 acpi_irq_context = context;
610 if (request_irq(irq, acpi_irq, IRQF_SHARED, "acpi", acpi_irq)) {
611 printk(KERN_ERR PREFIX "SCI (IRQ%d) allocation failed\n", irq);
612 acpi_irq_handler = NULL;
613 return AE_NOT_ACQUIRED;
614 }
615
616 return AE_OK;
617}
618
619acpi_status acpi_os_remove_interrupt_handler(u32 irq, acpi_osd_handler handler)
620{
621 if (irq != acpi_gbl_FADT.sci_interrupt)
622 return AE_BAD_PARAMETER;
623
624 free_irq(irq, acpi_irq);
625 acpi_irq_handler = NULL;
626
627 return AE_OK;
628}
629
630/*
631 * Running in interpreter thread context, safe to sleep
632 */
633
634void acpi_os_sleep(u64 ms)
635{
636 schedule_timeout_interruptible(msecs_to_jiffies(ms));
637}
638
639void acpi_os_stall(u32 us)
640{
641 while (us) {
642 u32 delay = 1000;
643
644 if (delay > us)
645 delay = us;
646 udelay(delay);
647 touch_nmi_watchdog();
648 us -= delay;
649 }
650}
651
652/*
653 * Support ACPI 3.0 AML Timer operand
654 * Returns 64-bit free-running, monotonically increasing timer
655 * with 100ns granularity
656 */
657u64 acpi_os_get_timer(void)
658{
659 static u64 t;
660
661#ifdef CONFIG_HPET
662 /* TBD: use HPET if available */
663#endif
664
665#ifdef CONFIG_X86_PM_TIMER
666 /* TBD: default to PM timer if HPET was not available */
667#endif
668 if (!t)
669 printk(KERN_ERR PREFIX "acpi_os_get_timer() TBD\n");
670
671 return ++t;
672}
673
674acpi_status acpi_os_read_port(acpi_io_address port, u32 * value, u32 width)
675{
676 u32 dummy;
677
678 if (!value)
679 value = &dummy;
680
681 *value = 0;
682 if (width <= 8) {
683 *(u8 *) value = inb(port);
684 } else if (width <= 16) {
685 *(u16 *) value = inw(port);
686 } else if (width <= 32) {
687 *(u32 *) value = inl(port);
688 } else {
689 BUG();
690 }
691
692 return AE_OK;
693}
694
695EXPORT_SYMBOL(acpi_os_read_port);
696
697acpi_status acpi_os_write_port(acpi_io_address port, u32 value, u32 width)
698{
699 if (width <= 8) {
700 outb(value, port);
701 } else if (width <= 16) {
702 outw(value, port);
703 } else if (width <= 32) {
704 outl(value, port);
705 } else {
706 BUG();
707 }
708
709 return AE_OK;
710}
711
712EXPORT_SYMBOL(acpi_os_write_port);
713
714#ifdef readq
715static inline u64 read64(const volatile void __iomem *addr)
716{
717 return readq(addr);
718}
719#else
720static inline u64 read64(const volatile void __iomem *addr)
721{
722 u64 l, h;
723 l = readl(addr);
724 h = readl(addr+4);
725 return l | (h << 32);
726}
727#endif
728
729acpi_status
730acpi_os_read_memory(acpi_physical_address phys_addr, u64 *value, u32 width)
731{
732 void __iomem *virt_addr;
733 unsigned int size = width / 8;
734 bool unmap = false;
735 u64 dummy;
736
737 rcu_read_lock();
738 virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
739 if (!virt_addr) {
740 rcu_read_unlock();
741 virt_addr = acpi_os_ioremap(phys_addr, size);
742 if (!virt_addr)
743 return AE_BAD_ADDRESS;
744 unmap = true;
745 }
746
747 if (!value)
748 value = &dummy;
749
750 switch (width) {
751 case 8:
752 *(u8 *) value = readb(virt_addr);
753 break;
754 case 16:
755 *(u16 *) value = readw(virt_addr);
756 break;
757 case 32:
758 *(u32 *) value = readl(virt_addr);
759 break;
760 case 64:
761 *(u64 *) value = read64(virt_addr);
762 break;
763 default:
764 BUG();
765 }
766
767 if (unmap)
768 iounmap(virt_addr);
769 else
770 rcu_read_unlock();
771
772 return AE_OK;
773}
774
775#ifdef writeq
776static inline void write64(u64 val, volatile void __iomem *addr)
777{
778 writeq(val, addr);
779}
780#else
781static inline void write64(u64 val, volatile void __iomem *addr)
782{
783 writel(val, addr);
784 writel(val>>32, addr+4);
785}
786#endif
787
788acpi_status
789acpi_os_write_memory(acpi_physical_address phys_addr, u64 value, u32 width)
790{
791 void __iomem *virt_addr;
792 unsigned int size = width / 8;
793 bool unmap = false;
794
795 rcu_read_lock();
796 virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
797 if (!virt_addr) {
798 rcu_read_unlock();
799 virt_addr = acpi_os_ioremap(phys_addr, size);
800 if (!virt_addr)
801 return AE_BAD_ADDRESS;
802 unmap = true;
803 }
804
805 switch (width) {
806 case 8:
807 writeb(value, virt_addr);
808 break;
809 case 16:
810 writew(value, virt_addr);
811 break;
812 case 32:
813 writel(value, virt_addr);
814 break;
815 case 64:
816 write64(value, virt_addr);
817 break;
818 default:
819 BUG();
820 }
821
822 if (unmap)
823 iounmap(virt_addr);
824 else
825 rcu_read_unlock();
826
827 return AE_OK;
828}
829
830acpi_status
831acpi_os_read_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
832 u64 *value, u32 width)
833{
834 int result, size;
835 u32 value32;
836
837 if (!value)
838 return AE_BAD_PARAMETER;
839
840 switch (width) {
841 case 8:
842 size = 1;
843 break;
844 case 16:
845 size = 2;
846 break;
847 case 32:
848 size = 4;
849 break;
850 default:
851 return AE_ERROR;
852 }
853
854 result = raw_pci_read(pci_id->segment, pci_id->bus,
855 PCI_DEVFN(pci_id->device, pci_id->function),
856 reg, size, &value32);
857 *value = value32;
858
859 return (result ? AE_ERROR : AE_OK);
860}
861
862acpi_status
863acpi_os_write_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
864 u64 value, u32 width)
865{
866 int result, size;
867
868 switch (width) {
869 case 8:
870 size = 1;
871 break;
872 case 16:
873 size = 2;
874 break;
875 case 32:
876 size = 4;
877 break;
878 default:
879 return AE_ERROR;
880 }
881
882 result = raw_pci_write(pci_id->segment, pci_id->bus,
883 PCI_DEVFN(pci_id->device, pci_id->function),
884 reg, size, value);
885
886 return (result ? AE_ERROR : AE_OK);
887}
888
889static void acpi_os_execute_deferred(struct work_struct *work)
890{
891 struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work);
892
893 if (dpc->wait)
894 acpi_os_wait_events_complete(NULL);
895
896 dpc->function(dpc->context);
897 kfree(dpc);
898}
899
900/*******************************************************************************
901 *
902 * FUNCTION: acpi_os_execute
903 *
904 * PARAMETERS: Type - Type of the callback
905 * Function - Function to be executed
906 * Context - Function parameters
907 *
908 * RETURN: Status
909 *
910 * DESCRIPTION: Depending on type, either queues function for deferred execution or
911 * immediately executes function on a separate thread.
912 *
913 ******************************************************************************/
914
915static acpi_status __acpi_os_execute(acpi_execute_type type,
916 acpi_osd_exec_callback function, void *context, int hp)
917{
918 acpi_status status = AE_OK;
919 struct acpi_os_dpc *dpc;
920 struct workqueue_struct *queue;
921 int ret;
922 ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
923 "Scheduling function [%p(%p)] for deferred execution.\n",
924 function, context));
925
926 /*
927 * Allocate/initialize DPC structure. Note that this memory will be
928 * freed by the callee. The kernel handles the work_struct list in a
929 * way that allows us to also free its memory inside the callee.
930 * Because we may want to schedule several tasks with different
931 * parameters we can't use the approach some kernel code uses of
932 * having a static work_struct.
933 */
934
935 dpc = kmalloc(sizeof(struct acpi_os_dpc), GFP_ATOMIC);
936 if (!dpc)
937 return AE_NO_MEMORY;
938
939 dpc->function = function;
940 dpc->context = context;
941
942 /*
943 * We can't run hotplug code in keventd_wq/kacpid_wq/kacpid_notify_wq
944 * because the hotplug code may call driver .remove() functions,
945 * which invoke flush_scheduled_work/acpi_os_wait_events_complete
946 * to flush these workqueues.
947 */
948 queue = hp ? kacpi_hotplug_wq :
949 (type == OSL_NOTIFY_HANDLER ? kacpi_notify_wq : kacpid_wq);
950 dpc->wait = hp ? 1 : 0;
951
952 if (queue == kacpi_hotplug_wq)
953 INIT_WORK(&dpc->work, acpi_os_execute_deferred);
954 else if (queue == kacpi_notify_wq)
955 INIT_WORK(&dpc->work, acpi_os_execute_deferred);
956 else
957 INIT_WORK(&dpc->work, acpi_os_execute_deferred);
958
959 /*
960 * On some machines, a software-initiated SMI causes corruption unless
961 * the SMI runs on CPU 0. An SMI can be initiated by any AML, but
962 * typically it's done in GPE-related methods that are run via
963 * workqueues, so we can avoid the known corruption cases by always
964 * queueing on CPU 0.
965 */
966 ret = queue_work_on(0, queue, &dpc->work);
967
968 if (!ret) {
969 printk(KERN_ERR PREFIX
970 "Call to queue_work() failed.\n");
971 status = AE_ERROR;
972 kfree(dpc);
973 }
974 return status;
975}
976
977acpi_status acpi_os_execute(acpi_execute_type type,
978 acpi_osd_exec_callback function, void *context)
979{
980 return __acpi_os_execute(type, function, context, 0);
981}
982EXPORT_SYMBOL(acpi_os_execute);
983
984acpi_status acpi_os_hotplug_execute(acpi_osd_exec_callback function,
985 void *context)
986{
987 return __acpi_os_execute(0, function, context, 1);
988}
989
990void acpi_os_wait_events_complete(void *context)
991{
992 flush_workqueue(kacpid_wq);
993 flush_workqueue(kacpi_notify_wq);
994}
995
996EXPORT_SYMBOL(acpi_os_wait_events_complete);
997
998acpi_status
999acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle)
1000{
1001 struct semaphore *sem = NULL;
1002
1003 sem = acpi_os_allocate(sizeof(struct semaphore));
1004 if (!sem)
1005 return AE_NO_MEMORY;
1006 memset(sem, 0, sizeof(struct semaphore));
1007
1008 sema_init(sem, initial_units);
1009
1010 *handle = (acpi_handle *) sem;
1011
1012 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Creating semaphore[%p|%d].\n",
1013 *handle, initial_units));
1014
1015 return AE_OK;
1016}
1017
1018/*
1019 * TODO: A better way to delete semaphores? Linux doesn't have a
1020 * 'delete_semaphore()' function -- may result in an invalid
1021 * pointer dereference for non-synchronized consumers. Should
1022 * we at least check for blocked threads and signal/cancel them?
1023 */
1024
1025acpi_status acpi_os_delete_semaphore(acpi_handle handle)
1026{
1027 struct semaphore *sem = (struct semaphore *)handle;
1028
1029 if (!sem)
1030 return AE_BAD_PARAMETER;
1031
1032 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Deleting semaphore[%p].\n", handle));
1033
1034 BUG_ON(!list_empty(&sem->wait_list));
1035 kfree(sem);
1036 sem = NULL;
1037
1038 return AE_OK;
1039}
1040
1041/*
1042 * TODO: Support for units > 1?
1043 */
1044acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout)
1045{
1046 acpi_status status = AE_OK;
1047 struct semaphore *sem = (struct semaphore *)handle;
1048 long jiffies;
1049 int ret = 0;
1050
1051 if (!sem || (units < 1))
1052 return AE_BAD_PARAMETER;
1053
1054 if (units > 1)
1055 return AE_SUPPORT;
1056
1057 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Waiting for semaphore[%p|%d|%d]\n",
1058 handle, units, timeout));
1059
1060 if (timeout == ACPI_WAIT_FOREVER)
1061 jiffies = MAX_SCHEDULE_TIMEOUT;
1062 else
1063 jiffies = msecs_to_jiffies(timeout);
1064
1065 ret = down_timeout(sem, jiffies);
1066 if (ret)
1067 status = AE_TIME;
1068
1069 if (ACPI_FAILURE(status)) {
1070 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
1071 "Failed to acquire semaphore[%p|%d|%d], %s",
1072 handle, units, timeout,
1073 acpi_format_exception(status)));
1074 } else {
1075 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
1076 "Acquired semaphore[%p|%d|%d]", handle,
1077 units, timeout));
1078 }
1079
1080 return status;
1081}
1082
1083/*
1084 * TODO: Support for units > 1?
1085 */
1086acpi_status acpi_os_signal_semaphore(acpi_handle handle, u32 units)
1087{
1088 struct semaphore *sem = (struct semaphore *)handle;
1089
1090 if (!sem || (units < 1))
1091 return AE_BAD_PARAMETER;
1092
1093 if (units > 1)
1094 return AE_SUPPORT;
1095
1096 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Signaling semaphore[%p|%d]\n", handle,
1097 units));
1098
1099 up(sem);
1100
1101 return AE_OK;
1102}
1103
1104#ifdef ACPI_FUTURE_USAGE
1105u32 acpi_os_get_line(char *buffer)
1106{
1107
1108#ifdef ENABLE_DEBUGGER
1109 if (acpi_in_debugger) {
1110 u32 chars;
1111
1112 kdb_read(buffer, sizeof(line_buf));
1113
1114 /* remove the CR kdb includes */
1115 chars = strlen(buffer) - 1;
1116 buffer[chars] = '\0';
1117 }
1118#endif
1119
1120 return 0;
1121}
1122#endif /* ACPI_FUTURE_USAGE */
1123
1124acpi_status acpi_os_signal(u32 function, void *info)
1125{
1126 switch (function) {
1127 case ACPI_SIGNAL_FATAL:
1128 printk(KERN_ERR PREFIX "Fatal opcode executed\n");
1129 break;
1130 case ACPI_SIGNAL_BREAKPOINT:
1131 /*
1132 * AML Breakpoint
1133 * ACPI spec. says to treat it as a NOP unless
1134 * you are debugging. So if/when we integrate
1135 * AML debugger into the kernel debugger its
1136 * hook will go here. But until then it is
1137 * not useful to print anything on breakpoints.
1138 */
1139 break;
1140 default:
1141 break;
1142 }
1143
1144 return AE_OK;
1145}
1146
1147static int __init acpi_os_name_setup(char *str)
1148{
1149 char *p = acpi_os_name;
1150 int count = ACPI_MAX_OVERRIDE_LEN - 1;
1151
1152 if (!str || !*str)
1153 return 0;
1154
1155 for (; count-- && str && *str; str++) {
1156 if (isalnum(*str) || *str == ' ' || *str == ':')
1157 *p++ = *str;
1158 else if (*str == '\'' || *str == '"')
1159 continue;
1160 else
1161 break;
1162 }
1163 *p = 0;
1164
1165 return 1;
1166
1167}
1168
1169__setup("acpi_os_name=", acpi_os_name_setup);
1170
1171#define OSI_STRING_LENGTH_MAX 64 /* arbitrary */
1172#define OSI_STRING_ENTRIES_MAX 16 /* arbitrary */
1173
1174struct osi_setup_entry {
1175 char string[OSI_STRING_LENGTH_MAX];
1176 bool enable;
1177};
1178
1179static struct osi_setup_entry __initdata
1180 osi_setup_entries[OSI_STRING_ENTRIES_MAX] = {
1181 {"Module Device", true},
1182 {"Processor Device", true},
1183 {"3.0 _SCP Extensions", true},
1184 {"Processor Aggregator Device", true},
1185};
1186
1187void __init acpi_osi_setup(char *str)
1188{
1189 struct osi_setup_entry *osi;
1190 bool enable = true;
1191 int i;
1192
1193 if (!acpi_gbl_create_osi_method)
1194 return;
1195
1196 if (str == NULL || *str == '\0') {
1197 printk(KERN_INFO PREFIX "_OSI method disabled\n");
1198 acpi_gbl_create_osi_method = FALSE;
1199 return;
1200 }
1201
1202 if (*str == '!') {
1203 str++;
1204 enable = false;
1205 }
1206
1207 for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) {
1208 osi = &osi_setup_entries[i];
1209 if (!strcmp(osi->string, str)) {
1210 osi->enable = enable;
1211 break;
1212 } else if (osi->string[0] == '\0') {
1213 osi->enable = enable;
1214 strncpy(osi->string, str, OSI_STRING_LENGTH_MAX);
1215 break;
1216 }
1217 }
1218}
1219
1220static void __init set_osi_linux(unsigned int enable)
1221{
1222 if (osi_linux.enable != enable)
1223 osi_linux.enable = enable;
1224
1225 if (osi_linux.enable)
1226 acpi_osi_setup("Linux");
1227 else
1228 acpi_osi_setup("!Linux");
1229
1230 return;
1231}
1232
1233static void __init acpi_cmdline_osi_linux(unsigned int enable)
1234{
1235 osi_linux.cmdline = 1; /* cmdline set the default and override DMI */
1236 osi_linux.dmi = 0;
1237 set_osi_linux(enable);
1238
1239 return;
1240}
1241
1242void __init acpi_dmi_osi_linux(int enable, const struct dmi_system_id *d)
1243{
1244 printk(KERN_NOTICE PREFIX "DMI detected: %s\n", d->ident);
1245
1246 if (enable == -1)
1247 return;
1248
1249 osi_linux.dmi = 1; /* DMI knows that this box asks OSI(Linux) */
1250 set_osi_linux(enable);
1251
1252 return;
1253}
1254
1255/*
1256 * Modify the list of "OS Interfaces" reported to BIOS via _OSI
1257 *
1258 * empty string disables _OSI
1259 * string starting with '!' disables that string
1260 * otherwise string is added to list, augmenting built-in strings
1261 */
1262static void __init acpi_osi_setup_late(void)
1263{
1264 struct osi_setup_entry *osi;
1265 char *str;
1266 int i;
1267 acpi_status status;
1268
1269 for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) {
1270 osi = &osi_setup_entries[i];
1271 str = osi->string;
1272
1273 if (*str == '\0')
1274 break;
1275 if (osi->enable) {
1276 status = acpi_install_interface(str);
1277
1278 if (ACPI_SUCCESS(status))
1279 printk(KERN_INFO PREFIX "Added _OSI(%s)\n", str);
1280 } else {
1281 status = acpi_remove_interface(str);
1282
1283 if (ACPI_SUCCESS(status))
1284 printk(KERN_INFO PREFIX "Deleted _OSI(%s)\n", str);
1285 }
1286 }
1287}
1288
1289static int __init osi_setup(char *str)
1290{
1291 if (str && !strcmp("Linux", str))
1292 acpi_cmdline_osi_linux(1);
1293 else if (str && !strcmp("!Linux", str))
1294 acpi_cmdline_osi_linux(0);
1295 else
1296 acpi_osi_setup(str);
1297
1298 return 1;
1299}
1300
1301__setup("acpi_osi=", osi_setup);
1302
1303/* enable serialization to combat AE_ALREADY_EXISTS errors */
1304static int __init acpi_serialize_setup(char *str)
1305{
1306 printk(KERN_INFO PREFIX "serialize enabled\n");
1307
1308 acpi_gbl_all_methods_serialized = TRUE;
1309
1310 return 1;
1311}
1312
1313__setup("acpi_serialize", acpi_serialize_setup);
1314
1315/* Check of resource interference between native drivers and ACPI
1316 * OperationRegions (SystemIO and System Memory only).
1317 * IO ports and memory declared in ACPI might be used by the ACPI subsystem
1318 * in arbitrary AML code and can interfere with legacy drivers.
1319 * acpi_enforce_resources= can be set to:
1320 *
1321 * - strict (default) (2)
1322 * -> further driver trying to access the resources will not load
1323 * - lax (1)
1324 * -> further driver trying to access the resources will load, but you
1325 * get a system message that something might go wrong...
1326 *
1327 * - no (0)
1328 * -> ACPI Operation Region resources will not be registered
1329 *
1330 */
1331#define ENFORCE_RESOURCES_STRICT 2
1332#define ENFORCE_RESOURCES_LAX 1
1333#define ENFORCE_RESOURCES_NO 0
1334
1335static unsigned int acpi_enforce_resources = ENFORCE_RESOURCES_STRICT;
1336
1337static int __init acpi_enforce_resources_setup(char *str)
1338{
1339 if (str == NULL || *str == '\0')
1340 return 0;
1341
1342 if (!strcmp("strict", str))
1343 acpi_enforce_resources = ENFORCE_RESOURCES_STRICT;
1344 else if (!strcmp("lax", str))
1345 acpi_enforce_resources = ENFORCE_RESOURCES_LAX;
1346 else if (!strcmp("no", str))
1347 acpi_enforce_resources = ENFORCE_RESOURCES_NO;
1348
1349 return 1;
1350}
1351
1352__setup("acpi_enforce_resources=", acpi_enforce_resources_setup);
1353
1354/* Check for resource conflicts between ACPI OperationRegions and native
1355 * drivers */
1356int acpi_check_resource_conflict(const struct resource *res)
1357{
1358 acpi_adr_space_type space_id;
1359 acpi_size length;
1360 u8 warn = 0;
1361 int clash = 0;
1362
1363 if (acpi_enforce_resources == ENFORCE_RESOURCES_NO)
1364 return 0;
1365 if (!(res->flags & IORESOURCE_IO) && !(res->flags & IORESOURCE_MEM))
1366 return 0;
1367
1368 if (res->flags & IORESOURCE_IO)
1369 space_id = ACPI_ADR_SPACE_SYSTEM_IO;
1370 else
1371 space_id = ACPI_ADR_SPACE_SYSTEM_MEMORY;
1372
1373 length = res->end - res->start + 1;
1374 if (acpi_enforce_resources != ENFORCE_RESOURCES_NO)
1375 warn = 1;
1376 clash = acpi_check_address_range(space_id, res->start, length, warn);
1377
1378 if (clash) {
1379 if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) {
1380 if (acpi_enforce_resources == ENFORCE_RESOURCES_LAX)
1381 printk(KERN_NOTICE "ACPI: This conflict may"
1382 " cause random problems and system"
1383 " instability\n");
1384 printk(KERN_INFO "ACPI: If an ACPI driver is available"
1385 " for this device, you should use it instead of"
1386 " the native driver\n");
1387 }
1388 if (acpi_enforce_resources == ENFORCE_RESOURCES_STRICT)
1389 return -EBUSY;
1390 }
1391 return 0;
1392}
1393EXPORT_SYMBOL(acpi_check_resource_conflict);
1394
1395int acpi_check_region(resource_size_t start, resource_size_t n,
1396 const char *name)
1397{
1398 struct resource res = {
1399 .start = start,
1400 .end = start + n - 1,
1401 .name = name,
1402 .flags = IORESOURCE_IO,
1403 };
1404
1405 return acpi_check_resource_conflict(&res);
1406}
1407EXPORT_SYMBOL(acpi_check_region);
1408
1409/*
1410 * Let drivers know whether the resource checks are effective
1411 */
1412int acpi_resources_are_enforced(void)
1413{
1414 return acpi_enforce_resources == ENFORCE_RESOURCES_STRICT;
1415}
1416EXPORT_SYMBOL(acpi_resources_are_enforced);
1417
1418/*
1419 * Deallocate the memory for a spinlock.
1420 */
1421void acpi_os_delete_lock(acpi_spinlock handle)
1422{
1423 ACPI_FREE(handle);
1424}
1425
1426/*
1427 * Acquire a spinlock.
1428 *
1429 * handle is a pointer to the spinlock_t.
1430 */
1431
1432acpi_cpu_flags acpi_os_acquire_lock(acpi_spinlock lockp)
1433{
1434 acpi_cpu_flags flags;
1435 spin_lock_irqsave(lockp, flags);
1436 return flags;
1437}
1438
1439/*
1440 * Release a spinlock. See above.
1441 */
1442
1443void acpi_os_release_lock(acpi_spinlock lockp, acpi_cpu_flags flags)
1444{
1445 spin_unlock_irqrestore(lockp, flags);
1446}
1447
1448#ifndef ACPI_USE_LOCAL_CACHE
1449
1450/*******************************************************************************
1451 *
1452 * FUNCTION: acpi_os_create_cache
1453 *
1454 * PARAMETERS: name - Ascii name for the cache
1455 * size - Size of each cached object
1456 * depth - Maximum depth of the cache (in objects) <ignored>
1457 * cache - Where the new cache object is returned
1458 *
1459 * RETURN: status
1460 *
1461 * DESCRIPTION: Create a cache object
1462 *
1463 ******************************************************************************/
1464
1465acpi_status
1466acpi_os_create_cache(char *name, u16 size, u16 depth, acpi_cache_t ** cache)
1467{
1468 *cache = kmem_cache_create(name, size, 0, 0, NULL);
1469 if (*cache == NULL)
1470 return AE_ERROR;
1471 else
1472 return AE_OK;
1473}
1474
1475/*******************************************************************************
1476 *
1477 * FUNCTION: acpi_os_purge_cache
1478 *
1479 * PARAMETERS: Cache - Handle to cache object
1480 *
1481 * RETURN: Status
1482 *
1483 * DESCRIPTION: Free all objects within the requested cache.
1484 *
1485 ******************************************************************************/
1486
1487acpi_status acpi_os_purge_cache(acpi_cache_t * cache)
1488{
1489 kmem_cache_shrink(cache);
1490 return (AE_OK);
1491}
1492
1493/*******************************************************************************
1494 *
1495 * FUNCTION: acpi_os_delete_cache
1496 *
1497 * PARAMETERS: Cache - Handle to cache object
1498 *
1499 * RETURN: Status
1500 *
1501 * DESCRIPTION: Free all objects within the requested cache and delete the
1502 * cache object.
1503 *
1504 ******************************************************************************/
1505
1506acpi_status acpi_os_delete_cache(acpi_cache_t * cache)
1507{
1508 kmem_cache_destroy(cache);
1509 return (AE_OK);
1510}
1511
1512/*******************************************************************************
1513 *
1514 * FUNCTION: acpi_os_release_object
1515 *
1516 * PARAMETERS: Cache - Handle to cache object
1517 * Object - The object to be released
1518 *
1519 * RETURN: None
1520 *
1521 * DESCRIPTION: Release an object to the specified cache. If cache is full,
1522 * the object is deleted.
1523 *
1524 ******************************************************************************/
1525
1526acpi_status acpi_os_release_object(acpi_cache_t * cache, void *object)
1527{
1528 kmem_cache_free(cache, object);
1529 return (AE_OK);
1530}
1531#endif
1532
1533acpi_status __init acpi_os_initialize(void)
1534{
1535 acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1a_event_block);
1536 acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1b_event_block);
1537 acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe0_block);
1538 acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe1_block);
1539
1540 return AE_OK;
1541}
1542
1543acpi_status __init acpi_os_initialize1(void)
1544{
1545 kacpid_wq = alloc_workqueue("kacpid", 0, 1);
1546 kacpi_notify_wq = alloc_workqueue("kacpi_notify", 0, 1);
1547 kacpi_hotplug_wq = alloc_workqueue("kacpi_hotplug", 0, 1);
1548 BUG_ON(!kacpid_wq);
1549 BUG_ON(!kacpi_notify_wq);
1550 BUG_ON(!kacpi_hotplug_wq);
1551 acpi_install_interface_handler(acpi_osi_handler);
1552 acpi_osi_setup_late();
1553 return AE_OK;
1554}
1555
1556acpi_status acpi_os_terminate(void)
1557{
1558 if (acpi_irq_handler) {
1559 acpi_os_remove_interrupt_handler(acpi_gbl_FADT.sci_interrupt,
1560 acpi_irq_handler);
1561 }
1562
1563 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe1_block);
1564 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe0_block);
1565 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1b_event_block);
1566 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1a_event_block);
1567
1568 destroy_workqueue(kacpid_wq);
1569 destroy_workqueue(kacpi_notify_wq);
1570 destroy_workqueue(kacpi_hotplug_wq);
1571
1572 return AE_OK;
1573}
1574
1575acpi_status acpi_os_prepare_sleep(u8 sleep_state, u32 pm1a_control,
1576 u32 pm1b_control)
1577{
1578 int rc = 0;
1579 if (__acpi_os_prepare_sleep)
1580 rc = __acpi_os_prepare_sleep(sleep_state,
1581 pm1a_control, pm1b_control);
1582 if (rc < 0)
1583 return AE_ERROR;
1584 else if (rc > 0)
1585 return AE_CTRL_SKIP;
1586
1587 return AE_OK;
1588}
1589
1590void acpi_os_set_prepare_sleep(int (*func)(u8 sleep_state,
1591 u32 pm1a_ctrl, u32 pm1b_ctrl))
1592{
1593 __acpi_os_prepare_sleep = func;
1594}