*/
#include <linux/config.h>
+#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <acpi/acpi.h>
#include <asm/io.h>
#include <acpi/acpi_bus.h>
+#include <acpi/processor.h>
#include <asm/uaccess.h>
#include <linux/efi.h>
struct acpi_os_dpc
{
- OSD_EXECUTION_CALLBACK function;
+ acpi_osd_exec_callback function;
void *context;
};
+#ifdef CONFIG_ACPI_CUSTOM_DSDT
+#include CONFIG_ACPI_CUSTOM_DSDT_FILE
+#endif
#ifdef ENABLE_DEBUGGER
#include <linux/kdb.h>
+
/* stuff for debugger support */
int acpi_in_debugger;
+EXPORT_SYMBOL(acpi_in_debugger);
+
extern char line_buf[80];
#endif /*ENABLE_DEBUGGER*/
static unsigned int acpi_irq_irq;
-static OSD_HANDLER acpi_irq_handler;
+static acpi_osd_handler acpi_irq_handler;
static void *acpi_irq_context;
+static struct workqueue_struct *kacpid_wq;
acpi_status
acpi_os_initialize(void)
+{
+ return AE_OK;
+}
+
+acpi_status
+acpi_os_initialize1(void)
{
/*
* Initialize PCI configuration space access, as we'll need to access
return AE_NULL_ENTRY;
}
#endif
+ kacpid_wq = create_singlethread_workqueue("kacpid");
+ BUG_ON(!kacpid_wq);
return AE_OK;
}
acpi_irq_handler);
}
+ destroy_workqueue(kacpid_wq);
+
return AE_OK;
}
acpi_os_vprintf(fmt, args);
va_end(args);
}
+EXPORT_SYMBOL(acpi_os_printf);
void
acpi_os_vprintf(const char *fmt, va_list args)
{
kfree(ptr);
}
+EXPORT_SYMBOL(acpi_os_free);
acpi_status
acpi_os_get_root_pointer(u32 flags, struct acpi_pointer *addr)
}
acpi_status
-acpi_os_map_memory(acpi_physical_address phys, acpi_size size, void **virt)
+acpi_os_map_memory(acpi_physical_address phys, acpi_size size, void __iomem **virt)
{
if (efi_enabled) {
if (EFI_MEMORY_WB & efi_mem_attributes(phys)) {
- *virt = phys_to_virt(phys);
+ *virt = (void __iomem *) phys_to_virt(phys);
} else {
*virt = ioremap(phys, size);
}
}
void
-acpi_os_unmap_memory(void *virt, acpi_size size)
+acpi_os_unmap_memory(void __iomem *virt, acpi_size size)
{
iounmap(virt);
}
+#ifdef ACPI_FUTURE_USAGE
acpi_status
acpi_os_get_physical_address(void *virt, acpi_physical_address *phys)
{
return AE_OK;
}
+#endif
#define ACPI_MAX_OVERRIDE_LEN 100
*new_val = NULL;
if (!memcmp (init_val->name, "_OS_", 4) && strlen(acpi_os_name)) {
- printk(KERN_INFO PREFIX "Overriding _OS definition %s\n",
+ printk(KERN_INFO PREFIX "Overriding _OS definition to '%s'\n",
acpi_os_name);
*new_val = acpi_os_name;
}
if (!existing_table || !new_table)
return AE_BAD_PARAMETER;
+#ifdef CONFIG_ACPI_CUSTOM_DSDT
+ if (strncmp(existing_table->signature, "DSDT", 4) == 0)
+ *new_table = (struct acpi_table_header*)AmlCode;
+ else
+ *new_table = NULL;
+#else
*new_table = NULL;
+#endif
return AE_OK;
}
}
acpi_status
-acpi_os_install_interrupt_handler(u32 gsi, OSD_HANDLER handler, void *context)
+acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler, void *context)
{
unsigned int irq;
}
acpi_status
-acpi_os_remove_interrupt_handler(u32 irq, OSD_HANDLER handler)
+acpi_os_remove_interrupt_handler(u32 irq, acpi_osd_handler handler)
{
if (irq) {
free_irq(irq, acpi_irq);
*/
void
-acpi_os_sleep(u32 sec, u32 ms)
+acpi_os_sleep(acpi_integer ms)
{
current->state = TASK_INTERRUPTIBLE;
- schedule_timeout(HZ * sec + (ms * HZ) / 1000);
+ schedule_timeout(((signed long) ms * HZ) / 1000);
}
+EXPORT_SYMBOL(acpi_os_sleep);
void
acpi_os_stall(u32 us)
us -= delay;
}
}
+EXPORT_SYMBOL(acpi_os_stall);
+
+/*
+ * Support ACPI 3.0 AML Timer operand
+ * Returns 64-bit free-running, monotonically increasing timer
+ * with 100ns granularity
+ */
+u64
+acpi_os_get_timer (void)
+{
+ static u64 t;
+
+#ifdef CONFIG_HPET
+ /* TBD: use HPET if available */
+#endif
+
+#ifdef CONFIG_X86_PM_TIMER
+ /* TBD: default to PM timer if HPET was not available */
+#endif
+ if (!t)
+ printk(KERN_ERR PREFIX "acpi_os_get_timer() TBD\n");
+
+ return ++t;
+}
acpi_status
acpi_os_read_port(
return AE_OK;
}
+EXPORT_SYMBOL(acpi_os_read_port);
acpi_status
acpi_os_write_port(
return AE_OK;
}
+EXPORT_SYMBOL(acpi_os_write_port);
acpi_status
acpi_os_read_memory(
u32 width)
{
u32 dummy;
- void *virt_addr;
+ void __iomem *virt_addr;
int iomem = 0;
if (efi_enabled) {
if (EFI_MEMORY_WB & efi_mem_attributes(phys_addr)) {
- virt_addr = phys_to_virt(phys_addr);
+ /* HACK ALERT! We can use readb/w/l on real memory too.. */
+ virt_addr = (void __iomem *) phys_to_virt(phys_addr);
} else {
iomem = 1;
virt_addr = ioremap(phys_addr, width);
}
} else
- virt_addr = phys_to_virt(phys_addr);
+ virt_addr = (void __iomem *) phys_to_virt(phys_addr);
if (!value)
value = &dummy;
switch (width) {
case 8:
- *(u8*) value = *(u8*) virt_addr;
+ *(u8*) value = readb(virt_addr);
break;
case 16:
- *(u16*) value = *(u16*) virt_addr;
+ *(u16*) value = readw(virt_addr);
break;
case 32:
- *(u32*) value = *(u32*) virt_addr;
+ *(u32*) value = readl(virt_addr);
break;
default:
BUG();
u32 value,
u32 width)
{
- void *virt_addr;
+ void __iomem *virt_addr;
int iomem = 0;
if (efi_enabled) {
if (EFI_MEMORY_WB & efi_mem_attributes(phys_addr)) {
- virt_addr = phys_to_virt(phys_addr);
+ /* HACK ALERT! We can use writeb/w/l on real memory too */
+ virt_addr = (void __iomem *) phys_to_virt(phys_addr);
} else {
iomem = 1;
virt_addr = ioremap(phys_addr, width);
}
} else
- virt_addr = phys_to_virt(phys_addr);
+ virt_addr = (void __iomem *) phys_to_virt(phys_addr);
switch (width) {
case 8:
- *(u8*) virt_addr = value;
+ writeb(value, virt_addr);
break;
case 16:
- *(u16*) virt_addr = value;
+ writew(value, virt_addr);
break;
case 32:
- *(u32*) virt_addr = value;
+ writel(value, virt_addr);
break;
default:
BUG();
return AE_ERROR;
}
+ BUG_ON(!raw_pci_ops);
+
result = raw_pci_ops->read(pci_id->segment, pci_id->bus,
PCI_DEVFN(pci_id->device, pci_id->function),
reg, size, value);
return (result ? AE_ERROR : AE_OK);
}
+EXPORT_SYMBOL(acpi_os_read_pci_configuration);
acpi_status
acpi_os_write_pci_configuration (struct acpi_pci_id *pci_id, u32 reg, acpi_integer value, u32 width)
return AE_ERROR;
}
+ BUG_ON(!raw_pci_ops);
+
result = raw_pci_ops->write(pci_id->segment, pci_id->bus,
PCI_DEVFN(pci_id->device, pci_id->function),
reg, size, value);
acpi_integer value,
u32 width)
{
- return (AE_SUPPORT);
+ return AE_SUPPORT;
}
acpi_status
void *value,
u32 width)
{
- return (AE_SUPPORT);
+ return AE_SUPPORT;
}
void
acpi_status
acpi_os_queue_for_execution(
u32 priority,
- OSD_EXECUTION_CALLBACK function,
+ acpi_osd_exec_callback function,
void *context)
{
acpi_status status = AE_OK;
task = (void *)(dpc+1);
INIT_WORK(task, acpi_os_execute_deferred, (void*)dpc);
- if (!schedule_work(task)) {
- ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Call to schedule_work() failed.\n"));
+ if (!queue_work(kacpid_wq, task)) {
+ ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Call to queue_work() failed.\n"));
kfree(dpc);
status = AE_ERROR;
}
return_ACPI_STATUS (status);
}
+EXPORT_SYMBOL(acpi_os_queue_for_execution);
+
+void
+acpi_os_wait_events_complete(
+ void *context)
+{
+ flush_workqueue(kacpid_wq);
+}
+EXPORT_SYMBOL(acpi_os_wait_events_complete);
/*
* Allocate the memory for a spinlock and initialize it.
return_ACPI_STATUS (AE_OK);
}
+EXPORT_SYMBOL(acpi_os_create_semaphore);
/*
return_ACPI_STATUS (AE_OK);
}
+EXPORT_SYMBOL(acpi_os_delete_semaphore);
/*
// TODO: A better timeout algorithm?
{
int i = 0;
- static const int quantum_ms = 1000/HZ;
+ static const int quantum_ms = (HZ>1000)?1:(1000/HZ);
ret = down_trylock(sem);
for (i = timeout; (i > 0 && ret < 0); i -= quantum_ms) {
return_ACPI_STATUS (status);
}
+EXPORT_SYMBOL(acpi_os_wait_semaphore);
/*
return_ACPI_STATUS (AE_OK);
}
+EXPORT_SYMBOL(acpi_os_signal_semaphore);
+#ifdef ACPI_FUTURE_USAGE
u32
acpi_os_get_line(char *buffer)
{
return 0;
}
+#endif /* ACPI_FUTURE_USAGE */
/* Assumes no unreadable holes inbetween */
u8
{
#if defined(__i386__) || defined(__x86_64__)
char tmp;
- return !__get_user(tmp, (char *)ptr) && !__get_user(tmp, (char *)ptr + len - 1);
+ return !__get_user(tmp, (char __user *)ptr) && !__get_user(tmp, (char __user *)ptr + len - 1);
#endif
return 1;
}
+#ifdef ACPI_FUTURE_USAGE
u8
acpi_os_writable(void *ptr, acpi_size len)
{
The later may be difficult at early boot when kmap doesn't work yet. */
return 1;
}
+#endif
u32
acpi_os_get_thread_id (void)
printk(KERN_ERR PREFIX "Fatal opcode executed\n");
break;
case ACPI_SIGNAL_BREAKPOINT:
- {
- char *bp_info = (char*) info;
-
- printk(KERN_ERR "ACPI breakpoint: %s\n", bp_info);
- }
+ /*
+ * AML Breakpoint
+ * ACPI spec. says to treat it as a NOP unless
+ * you are debugging. So if/when we integrate
+ * AML debugger into the kernel debugger its
+ * hook will go here. But until then it is
+ * not useful to print anything on breakpoints.
+ */
+ break;
default:
break;
}
return AE_OK;
}
+EXPORT_SYMBOL(acpi_os_signal);
int __init
acpi_os_name_setup(char *str)
* Run-time events on the same GPE this flag is available
* to tell Linux to keep the wake-time GPEs enabled at run-time.
*/
-static int __init
-acpi_leave_gpes_disabled_setup(char *str)
+int __init
+acpi_wake_gpes_always_on_setup(char *str)
{
- printk(KERN_INFO PREFIX "leave wake GPEs disabled\n");
+ printk(KERN_INFO PREFIX "wake GPEs not disabled\n");
- acpi_gbl_leave_wake_gpes_disabled = TRUE;
+ acpi_gbl_leave_wake_gpes_disabled = FALSE;
return 1;
}
-__setup("acpi_leave_gpes_disabled", acpi_leave_gpes_disabled_setup);
+__setup("acpi_wake_gpes_always_on", acpi_wake_gpes_always_on_setup);
+
+/*
+ * max_cstate is defined in the base kernel so modules can
+ * change it w/o depending on the state of the processor module.
+ */
+unsigned int max_cstate = ACPI_PROCESSOR_MAX_POWER;
+
+EXPORT_SYMBOL(max_cstate);