diff options
Diffstat (limited to 'arch/ia64/kernel/acpi.c')
| -rw-r--r-- | arch/ia64/kernel/acpi.c | 292 |
1 files changed, 143 insertions, 149 deletions
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index 78f28d825f3..615ef81def4 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c @@ -44,26 +44,19 @@ #include <linux/efi.h> #include <linux/mmzone.h> #include <linux/nodemask.h> +#include <linux/slab.h> +#include <acpi/processor.h> #include <asm/io.h> #include <asm/iosapic.h> #include <asm/machvec.h> #include <asm/page.h> -#include <asm/system.h> #include <asm/numa.h> #include <asm/sal.h> #include <asm/cyclone.h> -#define BAD_MADT_ENTRY(entry, end) ( \ - (!entry) || (unsigned long)entry + sizeof(*entry) > end || \ - ((struct acpi_subtable_header *)entry)->length < sizeof(*entry)) - #define PREFIX "ACPI: " -void (*pm_idle) (void); -EXPORT_SYMBOL(pm_idle); -void (*pm_power_off) (void); -EXPORT_SYMBOL(pm_power_off); - +int acpi_lapic; unsigned int acpi_cpei_override; unsigned int acpi_cpei_phys_cpuid; @@ -81,16 +74,17 @@ static unsigned long __init acpi_find_rsdp(void) "v1.0/r0.71 tables no longer supported\n"); return rsdp_phys; } -#endif const char __init * acpi_get_sysname(void) { -#ifdef CONFIG_IA64_GENERIC unsigned long rsdp_phys; struct acpi_table_rsdp *rsdp; struct acpi_table_xsdt *xsdt; struct acpi_table_header *hdr; +#ifdef CONFIG_INTEL_IOMMU + u64 i, nentries; +#endif rsdp_phys = acpi_find_rsdp(); if (!rsdp_phys) { @@ -117,28 +111,27 @@ acpi_get_sysname(void) if (!strcmp(hdr->oem_id, "HP")) { return "hpzx1"; } else if (!strcmp(hdr->oem_id, "SGI")) { - return "sn2"; + if (!strcmp(hdr->oem_table_id + 4, "UV")) + return "uv"; + else + return "sn2"; } - return "dig"; -#else -# if defined (CONFIG_IA64_HP_SIM) - return "hpsim"; -# elif defined (CONFIG_IA64_HP_ZX1) - return "hpzx1"; -# elif defined (CONFIG_IA64_HP_ZX1_SWIOTLB) - return "hpzx1_swiotlb"; -# elif defined (CONFIG_IA64_SGI_SN2) - return "sn2"; -# elif defined (CONFIG_IA64_DIG) - return "dig"; -# else -# error Unknown platform. Fix acpi.c. -# endif +#ifdef CONFIG_INTEL_IOMMU + /* Look for Intel IOMMU */ + nentries = (hdr->length - sizeof(*hdr)) / + sizeof(xsdt->table_offset_entry[0]); + for (i = 0; i < nentries; i++) { + hdr = __va(xsdt->table_offset_entry[i]); + if (strncmp(hdr->signature, ACPI_SIG_DMAR, + sizeof(ACPI_SIG_DMAR) - 1) == 0) + return "dig_vtd"; + } #endif -} -#ifdef CONFIG_ACPI + return "dig"; +} +#endif /* CONFIG_IA64_GENERIC */ #define ACPI_MAX_PLATFORM_INTERRUPTS 256 @@ -171,11 +164,14 @@ char *__init __acpi_map_table(unsigned long phys_addr, unsigned long size) return __va(phys_addr); } +void __init __acpi_unmap_table(char *map, unsigned long size) +{ +} + /* -------------------------------------------------------------------------- Boot-time Table Parsing -------------------------------------------------------------------------- */ -static int total_cpus __initdata; static int available_cpus __initdata; struct acpi_table_madt *acpi_madt __initdata; static u8 has_8259; @@ -345,11 +341,11 @@ acpi_parse_int_src_ovr(struct acpi_subtable_header * header, iosapic_override_isa_irq(p->source_irq, p->global_irq, ((p->inti_flags & ACPI_MADT_POLARITY_MASK) == - ACPI_MADT_POLARITY_ACTIVE_HIGH) ? - IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW, + ACPI_MADT_POLARITY_ACTIVE_LOW) ? + IOSAPIC_POL_LOW : IOSAPIC_POL_HIGH, ((p->inti_flags & ACPI_MADT_TRIGGER_MASK) == - ACPI_MADT_TRIGGER_EDGE) ? - IOSAPIC_EDGE : IOSAPIC_LEVEL); + ACPI_MADT_TRIGGER_LEVEL) ? + IOSAPIC_LEVEL : IOSAPIC_EDGE); return 0; } @@ -419,27 +415,30 @@ static int __init acpi_parse_madt(struct acpi_table_header *table) #define PXM_FLAG_LEN ((MAX_PXM_DOMAINS + 1)/32) static int __initdata srat_num_cpus; /* number of cpus */ -static u32 __devinitdata pxm_flag[PXM_FLAG_LEN]; +static u32 pxm_flag[PXM_FLAG_LEN]; #define pxm_bit_set(bit) (set_bit(bit,(void *)pxm_flag)) #define pxm_bit_test(bit) (test_bit(bit,(void *)pxm_flag)) static struct acpi_table_slit __initdata *slit_table; +cpumask_t early_cpu_possible_map = CPU_MASK_NONE; -static int get_processor_proximity_domain(struct acpi_srat_cpu_affinity *pa) +static int __init +get_processor_proximity_domain(struct acpi_srat_cpu_affinity *pa) { int pxm; pxm = pa->proximity_domain_lo; - if (ia64_platform_is("sn2")) + if (ia64_platform_is("sn2") || acpi_srat_revision >= 2) pxm += pa->proximity_domain_hi[0] << 8; return pxm; } -static int get_memory_proximity_domain(struct acpi_srat_mem_affinity *ma) +static int __init +get_memory_proximity_domain(struct acpi_srat_mem_affinity *ma) { int pxm; pxm = ma->proximity_domain; - if (!ia64_platform_is("sn2")) + if (!ia64_platform_is("sn2") && acpi_srat_revision <= 1) pxm &= 0xff; return pxm; @@ -459,7 +458,6 @@ void __init acpi_numa_slit_init(struct acpi_table_slit *slit) printk(KERN_ERR "ACPI 2.0 SLIT: size mismatch: %d expected, %d actual\n", len, slit->header.length); - memset(numa_slit, 10, sizeof(numa_slit)); return; } slit_table = slit; @@ -473,6 +471,12 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa) if (!(pa->flags & ACPI_SRAT_CPU_ENABLED)) return; + if (srat_num_cpus >= ARRAY_SIZE(node_cpuid)) { + printk_once(KERN_WARNING + "node_cpuid[%ld] is too small, may not be able to use all cpus\n", + ARRAY_SIZE(node_cpuid)); + return; + } pxm = get_processor_proximity_domain(pa); /* record this node in proximity bitmap */ @@ -482,10 +486,11 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa) (pa->apic_id << 8) | (pa->local_sapic_eid); /* nid should be overridden as logical node id later */ node_cpuid[srat_num_cpus].nid = pxm; + cpu_set(srat_num_cpus, early_cpu_possible_map); srat_num_cpus++; } -void __init +int __init acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma) { unsigned long paddr, size; @@ -500,7 +505,7 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma) /* Ignore disabled entries */ if (!(ma->flags & ACPI_SRAT_MEM_ENABLED)) - return; + return -1; /* record this node in proximity bitmap */ pxm_bit_set(pxm); @@ -519,6 +524,7 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma) p->size = size; p->nid = pxm; num_node_memblks++; + return 0; } void __init acpi_numa_arch_fixup(void) @@ -559,7 +565,7 @@ void __init acpi_numa_arch_fixup(void) } /* set logical node id in cpu structure */ - for (i = 0; i < srat_num_cpus; i++) + for_each_possible_early_cpu(i) node_cpuid[i].nid = pxm_to_node(node_cpuid[i].nid); printk(KERN_INFO "Number of logical nodes in system = %d\n", @@ -567,8 +573,14 @@ void __init acpi_numa_arch_fixup(void) printk(KERN_INFO "Number of memory chunks in system = %d\n", num_node_memblks); - if (!slit_table) + if (!slit_table) { + for (i = 0; i < MAX_NUMNODES; i++) + for (j = 0; j < MAX_NUMNODES; j++) + node_distance(i, j) = i == j ? LOCAL_DISTANCE : + REMOTE_DISTANCE; return; + } + memset(numa_slit, -1, sizeof(numa_slit)); for (i = 0; i < slit_table->locality_count; i++) { if (!pxm_bit_test(i)) @@ -598,7 +610,7 @@ void __init acpi_numa_arch_fixup(void) * success: return IRQ number (>=0) * failure: return < 0 */ -int acpi_register_gsi(u32 gsi, int triggering, int polarity) +int acpi_register_gsi(struct device *dev, u32 gsi, int triggering, int polarity) { if (acpi_irq_model == ACPI_IRQ_MODEL_PLATFORM) return gsi; @@ -614,14 +626,19 @@ int acpi_register_gsi(u32 gsi, int triggering, int polarity) ACPI_EDGE_SENSITIVE) ? IOSAPIC_EDGE : IOSAPIC_LEVEL); } +EXPORT_SYMBOL_GPL(acpi_register_gsi); void acpi_unregister_gsi(u32 gsi) { if (acpi_irq_model == ACPI_IRQ_MODEL_PLATFORM) return; + if (has_8259 && gsi < 16) + return; + iosapic_unregister_intr(gsi); } +EXPORT_SYMBOL_GPL(acpi_unregister_gsi); static int __init acpi_parse_fadt(struct acpi_table_header *table) { @@ -637,7 +654,46 @@ static int __init acpi_parse_fadt(struct acpi_table_header *table) fadt = (struct acpi_table_fadt *)fadt_header; - acpi_register_gsi(fadt->sci_interrupt, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW); + acpi_register_gsi(NULL, fadt->sci_interrupt, ACPI_LEVEL_SENSITIVE, + ACPI_ACTIVE_LOW); + return 0; +} + +int __init early_acpi_boot_init(void) +{ + int ret; + + /* + * do a partial walk of MADT to determine how many CPUs + * we have including offline CPUs + */ + if (acpi_table_parse(ACPI_SIG_MADT, acpi_parse_madt)) { + printk(KERN_ERR PREFIX "Can't find MADT\n"); + return 0; + } + + ret = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_SAPIC, + acpi_parse_lsapic, NR_CPUS); + if (ret < 1) + printk(KERN_ERR PREFIX + "Error parsing MADT - no LAPIC entries\n"); + else + acpi_lapic = 1; + +#ifdef CONFIG_SMP + if (available_cpus == 0) { + printk(KERN_INFO "ACPI: Found 0 CPUS; assuming 1\n"); + printk(KERN_INFO "CPU 0 (0x%04x)", hard_smp_processor_id()); + smp_boot_data.cpu_phys_id[available_cpus] = + hard_smp_processor_id(); + available_cpus = 1; /* We've got at least one of these, no? */ + } + smp_boot_data.cpu_count = available_cpus; +#endif + /* Make boot-up look pretty */ + printk(KERN_INFO "%d CPUs available, %d CPUs total\n", available_cpus, + total_cpus); + return 0; } @@ -664,11 +720,6 @@ int __init acpi_boot_init(void) printk(KERN_ERR PREFIX "Error parsing LAPIC address override entry\n"); - if (acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_SAPIC, acpi_parse_lsapic, NR_CPUS) - < 1) - printk(KERN_ERR PREFIX - "Error parsing MADT - no LAPIC entries\n"); - if (acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_NMI, acpi_parse_lapic_nmi, 0) < 0) printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n"); @@ -708,18 +759,8 @@ int __init acpi_boot_init(void) if (acpi_table_parse(ACPI_SIG_FADT, acpi_parse_fadt)) printk(KERN_ERR PREFIX "Can't find FADT\n"); +#ifdef CONFIG_ACPI_NUMA #ifdef CONFIG_SMP - if (available_cpus == 0) { - printk(KERN_INFO "ACPI: Found 0 CPUS; assuming 1\n"); - printk(KERN_INFO "CPU 0 (0x%04x)", hard_smp_processor_id()); - smp_boot_data.cpu_phys_id[available_cpus] = - hard_smp_processor_id(); - available_cpus = 1; /* We've got at least one of these, no? */ - } - smp_boot_data.cpu_count = available_cpus; - - smp_build_cpu_map(); -# ifdef CONFIG_ACPI_NUMA if (srat_num_cpus == 0) { int cpu, i = 1; for (cpu = 0; cpu < smp_boot_data.cpu_count; cpu++) @@ -728,14 +769,9 @@ int __init acpi_boot_init(void) node_cpuid[i++].phys_id = smp_boot_data.cpu_phys_id[cpu]; } -# endif #endif -#ifdef CONFIG_ACPI_NUMA build_cpu_to_node_map(); #endif - /* Make boot-up look pretty */ - printk(KERN_INFO "%d CPUs available, %d CPUs total\n", available_cpus, - total_cpus); return 0; } @@ -754,18 +790,21 @@ int acpi_gsi_to_irq(u32 gsi, unsigned int *irq) return 0; } +int acpi_isa_irq_to_gsi(unsigned isa_irq, u32 *gsi) +{ + if (isa_irq >= 16) + return -1; + *gsi = isa_irq; + return 0; +} + /* * ACPI based hotplug CPU support */ #ifdef CONFIG_ACPI_HOTPLUG_CPU -static -int acpi_map_cpu2node(acpi_handle handle, int cpu, long physid) +static int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid) { #ifdef CONFIG_ACPI_NUMA - int pxm_id; - int nid; - - pxm_id = acpi_get_pxm(handle); /* * We don't have cpu-only-node hotadd. But if the system equips * SRAT table, pxm is already found and node is ready. @@ -773,11 +812,10 @@ int acpi_map_cpu2node(acpi_handle handle, int cpu, long physid) * This code here is for the system which doesn't have full SRAT * table for possible cpus. */ - nid = acpi_map_pxm_to_node(pxm_id); node_cpuid[cpu].phys_id = physid; - node_cpuid[cpu].nid = nid; + node_cpuid[cpu].nid = acpi_get_node(handle); #endif - return (0); + return 0; } int additional_cpus __initdata = -1; @@ -793,11 +831,11 @@ static __init int setup_additional_cpus(char *s) early_param("additional_cpus", setup_additional_cpus); /* - * cpu_possible_map should be static, it cannot change as CPUs + * cpu_possible_mask should be static, it cannot change as CPUs * are onlined, or offlined. The reason is per-cpu data-structures * are allocated by some modules at init time, and dont expect to * do this dynamically on cpu arrival/departure. - * cpu_present_map on the other hand can change dynamically. + * cpu_present_mask on the other hand can change dynamically. * In case when cpu_hotplug is not compiled, then we resort to current * behaviour, which is cpu_possible == cpu_present. * - Ashok Raj @@ -823,72 +861,48 @@ __init void prefill_possible_map(void) possible = available_cpus + additional_cpus; - if (possible > NR_CPUS) - possible = NR_CPUS; + if (possible > nr_cpu_ids) + possible = nr_cpu_ids; printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n", possible, max((possible - available_cpus), 0)); for (i = 0; i < possible; i++) - cpu_set(i, cpu_possible_map); + set_cpu_possible(i, true); } -int acpi_map_lsapic(acpi_handle handle, int *pcpu) +static int _acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu) { - struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; - union acpi_object *obj; - struct acpi_madt_local_sapic *lsapic; cpumask_t tmp_map; - long physid; int cpu; - if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer))) - return -EINVAL; - - if (!buffer.length || !buffer.pointer) - return -EINVAL; - - obj = buffer.pointer; - if (obj->type != ACPI_TYPE_BUFFER) - { - kfree(buffer.pointer); - return -EINVAL; - } - - lsapic = (struct acpi_madt_local_sapic *)obj->buffer.pointer; - - if ((lsapic->header.type != ACPI_MADT_TYPE_LOCAL_SAPIC) || - (!(lsapic->lapic_flags & ACPI_MADT_ENABLED))) { - kfree(buffer.pointer); - return -EINVAL; - } - - physid = ((lsapic->id << 8) | (lsapic->eid)); - - kfree(buffer.pointer); - buffer.length = ACPI_ALLOCATE_BUFFER; - buffer.pointer = NULL; - - cpus_complement(tmp_map, cpu_present_map); - cpu = first_cpu(tmp_map); - if (cpu >= NR_CPUS) + cpumask_complement(&tmp_map, cpu_present_mask); + cpu = cpumask_first(&tmp_map); + if (cpu >= nr_cpu_ids) return -EINVAL; acpi_map_cpu2node(handle, cpu, physid); - cpu_set(cpu, cpu_present_map); + set_cpu_present(cpu, true); ia64_cpu_to_sapicid[cpu] = physid; + acpi_processor_set_pdc(handle); + *pcpu = cpu; return (0); } +/* wrapper to silence section mismatch warning */ +int __ref acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu) +{ + return _acpi_map_lsapic(handle, physid, pcpu); +} EXPORT_SYMBOL(acpi_map_lsapic); int acpi_unmap_lsapic(int cpu) { ia64_cpu_to_sapicid[cpu] = -1; - cpu_clear(cpu, cpu_present_map); + set_cpu_present(cpu, false); #ifdef CONFIG_ACPI_NUMA /* NUMA specific cleanup's */ @@ -901,14 +915,14 @@ EXPORT_SYMBOL(acpi_unmap_lsapic); #endif /* CONFIG_ACPI_HOTPLUG_CPU */ #ifdef CONFIG_ACPI_NUMA -static acpi_status __devinit -acpi_map_iosapic(acpi_handle handle, u32 depth, void *context, void **ret) +static acpi_status acpi_map_iosapic(acpi_handle handle, u32 depth, + void *context, void **ret) { struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; union acpi_object *obj; struct acpi_madt_io_sapic *iosapic; unsigned int gsi_base; - int pxm, node; + int node; /* Only care about objects w/ a method that returns the MADT */ if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer))) @@ -935,18 +949,10 @@ acpi_map_iosapic(acpi_handle handle, u32 depth, void *context, void **ret) kfree(buffer.pointer); - /* - * OK, it's an IOSAPIC MADT entry, look for a _PXM value to tell - * us which node to associate this with. - */ - pxm = acpi_get_pxm(handle); - if (pxm < 0) - return AE_OK; - - node = pxm_to_node(pxm); - - if (node >= MAX_NUMNODES || !node_online(node) || - cpus_empty(node_to_cpumask(node))) + /* OK, it's an IOSAPIC MADT entry; associate it with a node */ + node = acpi_get_node(handle); + if (node == NUMA_NO_NODE || !node_online(node) || + cpumask_empty(cpumask_of_node(node))) return AE_OK; /* We know a gsi to node mapping! */ @@ -964,7 +970,7 @@ acpi_map_iosapics (void) fs_initcall(acpi_map_iosapics); #endif /* CONFIG_ACPI_NUMA */ -int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base) +int __ref acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base) { int err; @@ -988,20 +994,8 @@ int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base) EXPORT_SYMBOL(acpi_unregister_ioapic); /* - * acpi_save_state_mem() - save kernel state + * acpi_suspend_lowlevel() - save kernel state and suspend. * * TBD when when IA64 starts to support suspend... */ -int acpi_save_state_mem(void) { return 0; } - -/* - * acpi_restore_state() - */ -void acpi_restore_state_mem(void) {} - -/* - * do_suspend_lowlevel() - */ -void do_suspend_lowlevel(void) {} - -#endif /* CONFIG_ACPI */ +int acpi_suspend_lowlevel(void) { return 0; } |
