patch-2.4.23 linux-2.4.23/arch/ia64/kernel/acpi.c
Next file: linux-2.4.23/arch/ia64/kernel/efi.c
Previous file: linux-2.4.23/arch/ia64/kernel/Makefile
Back to the patch index
Back to the overall index
- Lines: 535
- Date:
2003-11-28 10:26:19.000000000 -0800
- Orig file:
linux-2.4.22/arch/ia64/kernel/acpi.c
- Orig date:
2003-08-25 04:44:39.000000000 -0700
diff -urN linux-2.4.22/arch/ia64/kernel/acpi.c linux-2.4.23/arch/ia64/kernel/acpi.c
@@ -8,6 +8,9 @@
* Copyright (C) 2000 Intel Corp.
* Copyright (C) 2000,2001 J.I. Lee <jung-ik.lee@intel.com>
* Copyright (C) 2001 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
+ * Copyright (C) 2001 Jenna Hall <jenna.s.hall@intel.com>
+ * Copyright (C) 2001 Takayoshi Kochi <t-kouchi@cq.jp.nec.com>
+ * Copyright (C) 2002 Erich Focht <efocht@ess.nec.de>
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
@@ -38,11 +41,14 @@
#include <linux/irq.h>
#include <linux/acpi.h>
#include <linux/efi.h>
+#include <linux/mm.h>
+#include <linux/mmzone.h>
#include <asm/io.h>
#include <asm/iosapic.h>
#include <asm/machvec.h>
#include <asm/page.h>
#include <asm/system.h>
+#include <asm/numa.h>
#define PREFIX "ACPI: "
@@ -59,6 +65,8 @@
unsigned char acpi_kbd_controller_present = 1;
+int acpi_disabled __initdata; /* XXX this shouldn't be needed---we can't boot without ACPI! */
+
const char *
acpi_get_sysname (void)
{
@@ -90,6 +98,9 @@
if (!strcmp(hdr->oem_id, "HP")) {
return "hpzx1";
}
+ else if (!strcmp(hdr->oem_id, "SGI")) {
+ return "sn2";
+ }
return "dig";
#else
@@ -97,8 +108,6 @@
return "hpsim";
# elif defined (CONFIG_IA64_HP_ZX1)
return "hpzx1";
-# elif defined (CONFIG_IA64_SGI_SN1)
- return "sn1";
# elif defined (CONFIG_IA64_SGI_SN2)
return "sn2";
# elif defined (CONFIG_IA64_DIG)
@@ -123,17 +132,17 @@
};
acpi_status
-acpi_vendor_resource_match(acpi_resource *resource, void *context)
+acpi_vendor_resource_match (struct acpi_resource *resource, void *context)
{
struct acpi_vendor_info *info = (struct acpi_vendor_info *) context;
- acpi_resource_vendor *vendor;
+ struct acpi_resource_vendor *vendor;
struct acpi_vendor_descriptor *descriptor;
u32 length;
if (resource->id != ACPI_RSTYPE_VENDOR)
return AE_OK;
- vendor = (acpi_resource_vendor *) &resource->data;
+ vendor = (struct acpi_resource_vendor *) &resource->data;
descriptor = (struct acpi_vendor_descriptor *) vendor->reserved;
if (vendor->length <= sizeof(*info->descriptor) ||
descriptor->guid_id != info->descriptor->guid_id ||
@@ -151,7 +160,7 @@
}
acpi_status
-acpi_find_vendor_resource(acpi_handle obj, struct acpi_vendor_descriptor *id,
+acpi_find_vendor_resource (acpi_handle obj, struct acpi_vendor_descriptor *id,
u8 **data, u32 *length)
{
struct acpi_vendor_info info;
@@ -174,12 +183,11 @@
};
acpi_status
-acpi_hp_csr_space(acpi_handle obj, u64 *csr_base, u64 *csr_length)
+acpi_hp_csr_space (acpi_handle obj, u64 *csr_base, u64 *csr_length)
{
acpi_status status;
u8 *data;
u32 length;
- int i;
status = acpi_find_vendor_resource(obj, &hp_ccsr_descriptor, &data, &length);
@@ -200,7 +208,9 @@
#define ACPI_MAX_PLATFORM_INTERRUPTS 256
/* Array to record platform interrupt vectors for generic interrupt routing. */
-int platform_intr_list[ACPI_MAX_PLATFORM_INTERRUPTS] = { [0 ... ACPI_MAX_PLATFORM_INTERRUPTS - 1] = -1 };
+int platform_intr_list[ACPI_MAX_PLATFORM_INTERRUPTS] = {
+ [0 ... ACPI_MAX_PLATFORM_INTERRUPTS - 1] = -1
+};
enum acpi_irq_model_id acpi_irq_model = ACPI_IRQ_MODEL_IOSAPIC;
@@ -214,7 +224,7 @@
int vector = -1;
if (int_type < ACPI_MAX_PLATFORM_INTERRUPTS) {
- /* correctable platform error interrupt */
+ /* corrected platform error interrupt */
vector = platform_intr_list[int_type];
} else
printk(KERN_ERR "acpi_request_vector(): invalid interrupt type\n");
@@ -252,7 +262,6 @@
iounmap((void *) ipi_base_addr);
ipi_base_addr = (unsigned long) ioremap(lapic->address, 0);
}
-
return 0;
}
@@ -261,7 +270,6 @@
acpi_parse_lsapic (acpi_table_entry_header *header)
{
struct acpi_table_lsapic *lsapic;
- int phys_id;
lsapic = (struct acpi_table_lsapic *) header;
if (!lsapic)
@@ -269,30 +277,21 @@
acpi_table_print_madt_entry(header);
- phys_id = (lsapic->id << 8) | lsapic->eid;
-
- if (total_cpus == NR_CPUS) {
- printk(KERN_ERR PREFIX "Ignoring CPU (0x%04x) (NR_CPUS == %d)\n",
- phys_id, NR_CPUS);
- return 0;
- }
-
- printk(KERN_INFO "CPU %d (0x%04x)", total_cpus, phys_id);
+ printk(KERN_INFO "CPU %d (0x%04x)", total_cpus, (lsapic->id << 8) | lsapic->eid);
- if (lsapic->flags.enabled) {
- available_cpus++;
+ if (!lsapic->flags.enabled)
+ printk(" disabled");
+ else if (available_cpus >= NR_CPUS)
+ printk(" ignored (increase NR_CPUS)");
+ else {
printk(" enabled");
#ifdef CONFIG_SMP
- smp_boot_data.cpu_phys_id[total_cpus] = phys_id;
- if (hard_smp_processor_id() == smp_boot_data.cpu_phys_id[total_cpus])
+ smp_boot_data.cpu_phys_id[available_cpus] = (lsapic->id << 8) | lsapic->eid;
+ if (hard_smp_processor_id()
+ == (unsigned int) smp_boot_data.cpu_phys_id[available_cpus])
printk(" (BSP)");
#endif
- }
- else {
- printk(" disabled");
-#ifdef CONFIG_SMP
- smp_boot_data.cpu_phys_id[total_cpus] = -1;
-#endif
+ ++available_cpus;
}
printk("\n");
@@ -314,7 +313,6 @@
acpi_table_print_madt_entry(header);
/* TBD: Support lapic_nmi entries */
-
return 0;
}
@@ -405,7 +403,6 @@
acpi_table_print_madt_entry(header);
/* TBD: Support nimsrc entries */
-
return 0;
}
@@ -437,22 +434,209 @@
}
+#ifdef CONFIG_ACPI_NUMA
+
+#define PXM_FLAG_LEN ((MAX_PXM_DOMAINS + 1)/32)
+
+static int __initdata srat_num_cpus; /* number of cpus */
+static u32 __initdata pxm_flag[PXM_FLAG_LEN];
+#define pxm_bit_set(bit) (set_bit(bit,(void *)pxm_flag))
+#define pxm_bit_test(bit) (test_bit(bit,(void *)pxm_flag))
+/* maps to convert between proximity domain and logical node ID */
+int __initdata pxm_to_nid_map[MAX_PXM_DOMAINS];
+int __initdata nid_to_pxm_map[NR_NODES];
+struct acpi_table_slit __initdata *slit_table;
+
+/*
+ * ACPI 2.0 SLIT (System Locality Information Table)
+ * http://devresource.hp.com/devresource/Docs/TechPapers/IA64/slit.pdf
+ */
+void __init
+acpi_numa_slit_init (struct acpi_table_slit *slit)
+{
+ u32 len;
+
+ len = sizeof(struct acpi_table_header) + 8
+ + slit->localities * slit->localities;
+ if (slit->header.length != len) {
+ printk("KERN_INFO ACPI 2.0 SLIT: size mismatch: %d expected, %d actual\n",
+ len, slit->header.length);
+ memset(numa_slit, 10, sizeof(numa_slit));
+ return;
+ }
+ slit_table = slit;
+}
+
+void __init
+acpi_numa_processor_affinity_init (struct acpi_table_processor_affinity *pa)
+{
+ /* record this node in proximity bitmap */
+ pxm_bit_set(pa->proximity_domain);
+
+ node_cpuid[srat_num_cpus].phys_id = (pa->apic_id << 8) | (pa->lsapic_eid);
+ /* nid should be overridden as logical node id later */
+ node_cpuid[srat_num_cpus].nid = pa->proximity_domain;
+ srat_num_cpus++;
+}
+
+void __init
+acpi_numa_memory_affinity_init (struct acpi_table_memory_affinity *ma)
+{
+ unsigned long paddr, size, hole_size, min_hole_size;
+ u8 pxm;
+ struct node_memblk_s *p, *q, *pend;
+
+ pxm = ma->proximity_domain;
+
+ /* fill node memory chunk structure */
+ paddr = ma->base_addr_hi;
+ paddr = (paddr << 32) | ma->base_addr_lo;
+ size = ma->length_hi;
+ size = (size << 32) | ma->length_lo;
+
+ if (num_memblks >= NR_MEMBLKS) {
+ printk(KERN_ERR "Too many mem chunks in SRAT. Ignoring %ld MBytes at %lx\n",
+ size/(1024*1024), paddr);
+ return;
+ }
+
+ /* Ignore disabled entries */
+ if (!ma->flags.enabled)
+ return;
+
+ /*
+ * When the chunk is not the first one in the node, check distance
+ * from the other chunks. When the hole is too huge ignore the chunk.
+ * This restriction should be removed when multiple chunks per node
+ * is supported.
+ */
+ pend = &node_memblk[num_memblks];
+ min_hole_size = 0;
+ for (p = &node_memblk[0]; p < pend; p++) {
+ if (p->nid != pxm)
+ continue;
+ if (p->start_paddr < paddr)
+ hole_size = paddr - (p->start_paddr + p->size);
+ else
+ hole_size = p->start_paddr - (paddr + size);
+
+ if (!min_hole_size || hole_size < min_hole_size)
+ min_hole_size = hole_size;
+ }
+
+#if 0 /* test */
+ if (min_hole_size) {
+ if (min_hole_size > size) {
+ printk(KERN_ERR "Too huge memory hole. Ignoring %ld MBytes at %lx\n",
+ size/(1024*1024), paddr);
+ return;
+ }
+ }
+#endif
+
+ /* record this node in proximity bitmap */
+ pxm_bit_set(pxm);
+
+ /* Insertion sort based on base address */
+ pend = &node_memblk[num_memblks];
+ for (p = &node_memblk[0]; p < pend; p++) {
+ if (paddr < p->start_paddr)
+ break;
+ }
+ if (p < pend) {
+ for (q = pend; q >= p; q--)
+ *(q + 1) = *q;
+ }
+ p->start_paddr = paddr;
+ p->size = size;
+ p->nid = pxm;
+ num_memblks++;
+}
+
+void __init
+acpi_numa_arch_fixup(void)
+{
+ int i, j, node_from, node_to;
+
+ if (srat_num_cpus == 0) {
+ node_cpuid[0].phys_id = hard_smp_processor_id();
+ return;
+ }
+
+ /* calculate total number of nodes in system from PXM bitmap */
+ numnodes = 0; /* init total nodes in system */
+
+ memset(pxm_to_nid_map, -1, sizeof(pxm_to_nid_map));
+ memset(nid_to_pxm_map, -1, sizeof(nid_to_pxm_map));
+ for (i = 0; i < MAX_PXM_DOMAINS; i++) {
+ if (pxm_bit_test(i)) {
+ pxm_to_nid_map[i] = numnodes;
+ nid_to_pxm_map[numnodes++] = i;
+ }
+ }
+
+ /* set logical node id in memory chunk structure */
+ for (i = 0; i < num_memblks; i++)
+ node_memblk[i].nid = pxm_to_nid_map[node_memblk[i].nid];
+
+ /* assign memory bank numbers for each chunk on each node */
+ for (i = 0; i < numnodes; i++) {
+ int bank;
+
+ bank = 0;
+ for (j = 0; j < num_memblks; j++)
+ if (node_memblk[j].nid == i)
+ node_memblk[j].bank = bank++;
+ }
+
+ /* set logical node id in cpu structure */
+ for (i = 0; i < srat_num_cpus; i++)
+ node_cpuid[i].nid = pxm_to_nid_map[node_cpuid[i].nid];
+
+ printk(KERN_INFO "Number of logical nodes in system = %d\n", numnodes);
+ printk(KERN_INFO "Number of memory chunks in system = %d\n", num_memblks);
+
+ if (!slit_table) return;
+ memset(numa_slit, -1, sizeof(numa_slit));
+ for (i=0; i<slit_table->localities; i++) {
+ if (!pxm_bit_test(i))
+ continue;
+ node_from = pxm_to_nid_map[i];
+ for (j=0; j<slit_table->localities; j++) {
+ if (!pxm_bit_test(j))
+ continue;
+ node_to = pxm_to_nid_map[j];
+ node_distance(node_from, node_to) =
+ slit_table->entry[i*slit_table->localities + j];
+ }
+ }
+
+#ifdef SLIT_DEBUG
+ printk(KERN_DEBUG "ACPI 2.0 SLIT locality table:\n");
+ for (i = 0; i < numnodes; i++) {
+ for (j = 0; j < numnodes; j++)
+ printk(KERN_DEBUG "%03d ", node_distance(i,j));
+ printk("\n");
+ }
+#endif
+}
+#endif /* CONFIG_ACPI_NUMA */
+
static int __init
acpi_parse_fadt (unsigned long phys_addr, unsigned long size)
{
struct acpi_table_header *fadt_header;
- fadt_descriptor_rev2 *fadt;
+ struct fadt_descriptor_rev2 *fadt;
u32 sci_irq;
if (!phys_addr || !size)
return -EINVAL;
fadt_header = (struct acpi_table_header *) __va(phys_addr);
-
if (fadt_header->revision != 3)
return -ENODEV; /* Only deal with ACPI 2.0 FADT */
- fadt = (fadt_descriptor_rev2 *) fadt_header;
+ fadt = (struct fadt_descriptor_rev2 *) fadt_header;
if (!(fadt->iapc_boot_arch & BAF_8042_KEYBOARD_CONTROLLER))
acpi_kbd_controller_present = 0;
@@ -479,20 +663,13 @@
rsdp_phys = __pa(efi.acpi20);
else if (efi.acpi)
printk(KERN_WARNING PREFIX "v1.0/r0.71 tables no longer supported\n");
-
return rsdp_phys;
}
int __init
-acpi_boot_init (char *cmdline)
+acpi_boot_init (void)
{
- int result;
-
- /* Initialize the ACPI boot-time table parser */
- result = acpi_table_init(cmdline);
- if (result)
- return result;
/*
* MADT
@@ -509,65 +686,76 @@
/* Local APIC */
- if (acpi_table_parse_madt(ACPI_MADT_LAPIC_ADDR_OVR,
- acpi_parse_lapic_addr_ovr) < 0)
+ if (acpi_table_parse_madt(ACPI_MADT_LAPIC_ADDR_OVR, acpi_parse_lapic_addr_ovr) < 0)
printk(KERN_ERR PREFIX "Error parsing LAPIC address override entry\n");
- if (acpi_table_parse_madt(ACPI_MADT_LSAPIC,
- acpi_parse_lsapic) < 1)
+ if (acpi_table_parse_madt(ACPI_MADT_LSAPIC, acpi_parse_lsapic) < 1)
printk(KERN_ERR PREFIX "Error parsing MADT - no LAPIC entries\n");
- if (acpi_table_parse_madt(ACPI_MADT_LAPIC_NMI,
- acpi_parse_lapic_nmi) < 0)
+ if (acpi_table_parse_madt(ACPI_MADT_LAPIC_NMI, acpi_parse_lapic_nmi) < 0)
printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n");
/* I/O APIC */
- if (acpi_table_parse_madt(ACPI_MADT_IOSAPIC,
- acpi_parse_iosapic) < 1)
- printk(KERN_ERR PREFIX "Error parsing MADT - no IOAPIC entries\n");
+ if (acpi_table_parse_madt(ACPI_MADT_IOSAPIC, acpi_parse_iosapic) < 1)
+ printk(KERN_ERR PREFIX "Error parsing MADT - no IOSAPIC entries\n");
/* System-Level Interrupt Routing */
- if (acpi_table_parse_madt(ACPI_MADT_PLAT_INT_SRC,
- acpi_parse_plat_int_src) < 0)
+ if (acpi_table_parse_madt(ACPI_MADT_PLAT_INT_SRC, acpi_parse_plat_int_src) < 0)
printk(KERN_ERR PREFIX "Error parsing platform interrupt source entry\n");
- if (acpi_table_parse_madt(ACPI_MADT_INT_SRC_OVR,
- acpi_parse_int_src_ovr) < 0)
+ if (acpi_table_parse_madt(ACPI_MADT_INT_SRC_OVR, acpi_parse_int_src_ovr) < 0)
printk(KERN_ERR PREFIX "Error parsing interrupt source overrides entry\n");
- if (acpi_table_parse_madt(ACPI_MADT_NMI_SRC,
- acpi_parse_nmi_src) < 0)
+ if (acpi_table_parse_madt(ACPI_MADT_NMI_SRC, acpi_parse_nmi_src) < 0)
printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n");
-skip_madt:
+ skip_madt:
/*
- * The FADT table contains an SCI_INT line, by which the system
+ * FADT says whether a legacy keyboard controller is present.
+ * The FADT also contains an SCI_INT line, by which the system
* gets interrupts such as power and sleep buttons. If it's not
* on a Legacy interrupt, it needs to be setup.
*/
- if (acpi_table_parse(ACPI_FACP, acpi_parse_fadt) < 1)
+ if (acpi_table_parse(ACPI_FADT, acpi_parse_fadt) < 1)
printk(KERN_ERR PREFIX "Can't find FADT\n");
#ifdef CONFIG_SMP
if (available_cpus == 0) {
printk(KERN_INFO "ACPI: Found 0 CPUS; assuming 1\n");
+ printk(KERN_INFO "CPU 0 (0x%04x)", hard_smp_processor_id());
+ smp_boot_data.cpu_phys_id[available_cpus] = hard_smp_processor_id();
available_cpus = 1; /* We've got at least one of these, no? */
}
- smp_boot_data.cpu_count = total_cpus;
+ smp_boot_data.cpu_count = available_cpus;
+
+ smp_build_cpu_map();
+# ifdef CONFIG_NUMA
+ /* If the platform did not have an SRAT table, initialize the
+ * node_cpuid table from the smp_boot_data array. All cpus
+ * will be on node 0.
+ */
+ if (srat_num_cpus == 0) {
+ int cpu, i=1;
+ for (cpu=0; cpu<smp_boot_data.cpu_count; cpu++)
+ if (smp_boot_data.cpu_phys_id[cpu] != hard_smp_processor_id())
+ node_cpuid[i++].phys_id = smp_boot_data.cpu_phys_id[cpu];
+ }
+ build_cpu_to_node_map();
+# endif
+
#endif
/* Make boot-up look pretty */
printk(KERN_INFO "%d CPUs available, %d CPUs total\n", available_cpus, total_cpus);
return 0;
}
+/*
+ * PCI Interrupt Routing
+ */
-/* --------------------------------------------------------------------------
- PCI Interrupt Routing
- -------------------------------------------------------------------------- */
#ifdef CONFIG_PCI
-
int __init
acpi_get_prt (struct pci_vector_struct **vectors, int *count)
{
@@ -609,7 +797,8 @@
*count = acpi_prt.count;
return 0;
}
-#endif
+#endif /* CONFIG_PCI */
+
/* Assume IA64 always use I/O SAPIC */
int __init
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)