* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
+#include <linux/config.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
#define BAD_MADT_ENTRY(entry, end) ( \
(!entry) || (unsigned long)entry + sizeof(*entry) > end || \
- ((acpi_table_entry_header *)entry)->length < sizeof(*entry))
+ ((acpi_table_entry_header *)entry)->length != sizeof(*entry))
#define PREFIX "ACPI: "
unsigned char acpi_kbd_controller_present = 1;
unsigned char acpi_legacy_devices;
+static unsigned int __initdata acpi_madt_rev;
+
unsigned int acpi_cpei_override;
unsigned int acpi_cpei_phys_cpuid;
return iosapic_init(iosapic->address, iosapic->global_irq_base);
}
-static unsigned int __initdata acpi_madt_rev;
-
static int __init
acpi_parse_plat_int_src(acpi_table_entry_header * header,
const unsigned long end)
return 0;
}
-#ifdef CONFIG_HOTPLUG_CPU
unsigned int can_cpei_retarget(void)
{
extern int cpe_vector;
- extern unsigned int force_cpei_retarget;
/*
* Only if CPEI is supported and the override flag
* is present, otherwise return that its re-targettable
* if we are in polling mode.
*/
- if (cpe_vector > 0) {
- if (acpi_cpei_override || force_cpei_retarget)
- return 1;
- else
- return 0;
- }
- return 1;
+ if (cpe_vector > 0 && !acpi_cpei_override)
+ return 0;
+ else
+ return 1;
}
unsigned int is_cpu_cpei_target(unsigned int cpu)
{
acpi_cpei_phys_cpuid = cpu_physical_id(cpu);
}
-#endif
unsigned int get_cpei_target_cpu(void)
{
static u32 __devinitdata pxm_flag[PXM_FLAG_LEN];
#define pxm_bit_set(bit) (set_bit(bit,(void *)pxm_flag))
#define pxm_bit_test(bit) (test_bit(bit,(void *)pxm_flag))
+/* maps to convert between proximity domain and logical node ID */
+int __devinitdata pxm_to_nid_map[MAX_PXM_DOMAINS];
+int __initdata nid_to_pxm_map[MAX_NUMNODES];
static struct acpi_table_slit __initdata *slit_table;
-static int get_processor_proximity_domain(struct acpi_table_processor_affinity *pa)
-{
- int pxm;
-
- pxm = pa->proximity_domain;
- if (ia64_platform_is("sn2"))
- pxm += pa->reserved[0] << 8;
- return pxm;
-}
-
-static int get_memory_proximity_domain(struct acpi_table_memory_affinity *ma)
-{
- int pxm;
-
- pxm = ma->proximity_domain;
- if (ia64_platform_is("sn2"))
- pxm += ma->reserved1[0] << 8;
- return pxm;
-}
-
/*
* ACPI 2.0 SLIT (System Locality Information Table)
* http://devresource.hp.com/devresource/Docs/TechPapers/IA64/slit.pdf
void __init
acpi_numa_processor_affinity_init(struct acpi_table_processor_affinity *pa)
{
- int pxm;
-
- if (!pa->flags.enabled)
- return;
-
- pxm = get_processor_proximity_domain(pa);
-
/* record this node in proximity bitmap */
- pxm_bit_set(pxm);
+ pxm_bit_set(pa->proximity_domain);
node_cpuid[srat_num_cpus].phys_id =
(pa->apic_id << 8) | (pa->lsapic_eid);
/* nid should be overridden as logical node id later */
- node_cpuid[srat_num_cpus].nid = pxm;
+ node_cpuid[srat_num_cpus].nid = pa->proximity_domain;
srat_num_cpus++;
}
acpi_numa_memory_affinity_init(struct acpi_table_memory_affinity *ma)
{
unsigned long paddr, size;
- int pxm;
+ u8 pxm;
struct node_memblk_s *p, *q, *pend;
- pxm = get_memory_proximity_domain(ma);
+ pxm = ma->proximity_domain;
/* fill node memory chunk structure */
paddr = ma->base_addr_hi;
* MCD - This can probably be dropped now. No need for pxm ID to node ID
* mapping with sparse node numbering iff MAX_PXM_DOMAINS <= MAX_NUMNODES.
*/
+ /* calculate total number of nodes in system from PXM bitmap */
+ memset(pxm_to_nid_map, -1, sizeof(pxm_to_nid_map));
+ memset(nid_to_pxm_map, -1, sizeof(nid_to_pxm_map));
nodes_clear(node_online_map);
for (i = 0; i < MAX_PXM_DOMAINS; i++) {
if (pxm_bit_test(i)) {
- int nid = acpi_map_pxm_to_node(i);
+ int nid = num_online_nodes();
+ pxm_to_nid_map[i] = nid;
+ nid_to_pxm_map[nid] = i;
node_set_online(nid);
}
}
/* set logical node id in memory chunk structure */
for (i = 0; i < num_node_memblks; i++)
- node_memblk[i].nid = pxm_to_node(node_memblk[i].nid);
+ node_memblk[i].nid = pxm_to_nid_map[node_memblk[i].nid];
/* assign memory bank numbers for each chunk on each node */
for_each_online_node(i) {
/* set logical node id in cpu structure */
for (i = 0; i < srat_num_cpus; i++)
- node_cpuid[i].nid = pxm_to_node(node_cpuid[i].nid);
+ node_cpuid[i].nid = pxm_to_nid_map[node_cpuid[i].nid];
printk(KERN_INFO "Number of logical nodes in system = %d\n",
num_online_nodes());
for (i = 0; i < slit_table->localities; i++) {
if (!pxm_bit_test(i))
continue;
- node_from = pxm_to_node(i);
+ node_from = pxm_to_nid_map[i];
for (j = 0; j < slit_table->localities; j++) {
if (!pxm_bit_test(j))
continue;
- node_to = pxm_to_node(j);
+ node_to = pxm_to_nid_map[j];
node_distance(node_from, node_to) =
slit_table->entry[i * slit_table->localities + j];
}
static int __init acpi_parse_fadt(unsigned long phys_addr, unsigned long size)
{
struct acpi_table_header *fadt_header;
- struct fadt_descriptor *fadt;
+ struct fadt_descriptor_rev2 *fadt;
if (!phys_addr || !size)
return -EINVAL;
if (fadt_header->revision != 3)
return -ENODEV; /* Only deal with ACPI 2.0 FADT */
- fadt = (struct fadt_descriptor *)fadt_header;
+ fadt = (struct fadt_descriptor_rev2 *)fadt_header;
if (!(fadt->iapc_boot_arch & BAF_8042_KEYBOARD_CONTROLLER))
acpi_kbd_controller_present = 0;
{
unsigned long rsdp_phys = 0;
- if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
- rsdp_phys = efi.acpi20;
- else if (efi.acpi != EFI_INVALID_TABLE_ADDR)
+ if (efi.acpi20)
+ rsdp_phys = __pa(efi.acpi20);
+ else if (efi.acpi)
printk(KERN_WARNING PREFIX
"v1.0/r0.71 tables no longer supported\n");
return rsdp_phys;
{
#ifdef CONFIG_ACPI_NUMA
int pxm_id;
- int nid;
pxm_id = acpi_get_pxm(handle);
+
/*
- * We don't have cpu-only-node hotadd. But if the system equips
- * SRAT table, pxm is already found and node is ready.
- * So, just pxm_to_nid(pxm) is OK.
- * This code here is for the system which doesn't have full SRAT
- * table for possible cpus.
+ * Assuming that the container driver would have set the proximity
+ * domain and would have initialized pxm_to_nid_map[pxm_id] && pxm_flag
*/
- nid = acpi_map_pxm_to_node(pxm_id);
+ node_cpuid[cpu].nid = (pxm_id < 0) ? 0 : pxm_to_nid_map[pxm_id];
+
node_cpuid[cpu].phys_id = physid;
- node_cpuid[cpu].nid = nid;
#endif
return (0);
}
obj = buffer.pointer;
if (obj->type != ACPI_TYPE_BUFFER ||
obj->buffer.length < sizeof(*lsapic)) {
- kfree(buffer.pointer);
+ acpi_os_free(buffer.pointer);
return -EINVAL;
}
if ((lsapic->header.type != ACPI_MADT_LSAPIC) ||
(!lsapic->flags.enabled)) {
- kfree(buffer.pointer);
+ acpi_os_free(buffer.pointer);
return -EINVAL;
}
physid = ((lsapic->id << 8) | (lsapic->eid));
- kfree(buffer.pointer);
+ acpi_os_free(buffer.pointer);
buffer.length = ACPI_ALLOCATE_BUFFER;
buffer.pointer = NULL;
obj = buffer.pointer;
if (obj->type != ACPI_TYPE_BUFFER ||
obj->buffer.length < sizeof(*iosapic)) {
- kfree(buffer.pointer);
+ acpi_os_free(buffer.pointer);
return AE_OK;
}
iosapic = (struct acpi_table_iosapic *)obj->buffer.pointer;
if (iosapic->header.type != ACPI_MADT_IOSAPIC) {
- kfree(buffer.pointer);
+ acpi_os_free(buffer.pointer);
return AE_OK;
}
gsi_base = iosapic->global_irq_base;
- kfree(buffer.pointer);
+ acpi_os_free(buffer.pointer);
/*
* OK, it's an IOSAPIC MADT entry, look for a _PXM value to tell
if (pxm < 0)
return AE_OK;
- node = pxm_to_node(pxm);
+ node = pxm_to_nid_map[pxm];
if (node >= MAX_NUMNODES || !node_online(node) ||
cpus_empty(node_to_cpumask(node)))