diff options
Diffstat (limited to 'arch/ia64/kernel')
39 files changed, 497 insertions, 1288 deletions
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile index d959c84904b..20678a9ed11 100644 --- a/arch/ia64/kernel/Makefile +++ b/arch/ia64/kernel/Makefile @@ -23,7 +23,6 @@ obj-$(CONFIG_SMP) += smp.o smpboot.o obj-$(CONFIG_NUMA) += numa.o obj-$(CONFIG_PERFMON) += perfmon_default_smpl.o obj-$(CONFIG_IA64_CYCLONE) += cyclone.o -obj-$(CONFIG_CPU_FREQ) += cpufreq/ obj-$(CONFIG_IA64_MCA_RECOVERY) += mca_recovery.o obj-$(CONFIG_KPROBES) += kprobes.o jprobes.o obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index 335eb07480f..615ef81def4 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c @@ -53,15 +53,10 @@ #include <asm/numa.h> #include <asm/sal.h> #include <asm/cyclone.h> -#include <asm/xen/hypervisor.h> - -#define BAD_MADT_ENTRY(entry, end) ( \ - (!entry) || (unsigned long)entry + sizeof(*entry) > end || \ - ((struct acpi_subtable_header *)entry)->length < sizeof(*entry)) #define PREFIX "ACPI: " -u32 acpi_rsdt_forced; +int acpi_lapic; unsigned int acpi_cpei_override; unsigned int acpi_cpei_phys_cpuid; @@ -120,8 +115,6 @@ acpi_get_sysname(void) return "uv"; else return "sn2"; - } else if (xen_pv_domain() && !strcmp(hdr->oem_id, "XEN")) { - return "xen"; } #ifdef CONFIG_INTEL_IOMMU @@ -684,6 +677,8 @@ int __init early_acpi_boot_init(void) if (ret < 1) printk(KERN_ERR PREFIX "Error parsing MADT - no LAPIC entries\n"); + else + acpi_lapic = 1; #ifdef CONFIG_SMP if (available_cpus == 0) { @@ -807,14 +802,9 @@ int acpi_isa_irq_to_gsi(unsigned isa_irq, u32 *gsi) * ACPI based hotplug CPU support */ #ifdef CONFIG_ACPI_HOTPLUG_CPU -static __cpuinit -int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid) +static int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid) { #ifdef CONFIG_ACPI_NUMA - int pxm_id; - int nid; - - pxm_id = acpi_get_pxm(handle); /* * We don't have cpu-only-node hotadd. But if the system equips * SRAT table, pxm is already found and node is ready. @@ -822,11 +812,10 @@ int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid) * This code here is for the system which doesn't have full SRAT * table for possible cpus. */ - nid = acpi_map_pxm_to_node(pxm_id); node_cpuid[cpu].phys_id = physid; - node_cpuid[cpu].nid = nid; + node_cpuid[cpu].nid = acpi_get_node(handle); #endif - return (0); + return 0; } int additional_cpus __initdata = -1; @@ -882,40 +871,10 @@ __init void prefill_possible_map(void) set_cpu_possible(i, true); } -static int __cpuinit _acpi_map_lsapic(acpi_handle handle, int *pcpu) +static int _acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu) { - struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; - union acpi_object *obj; - struct acpi_madt_local_sapic *lsapic; cpumask_t tmp_map; - int cpu, physid; - - if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer))) - return -EINVAL; - - if (!buffer.length || !buffer.pointer) - return -EINVAL; - - obj = buffer.pointer; - if (obj->type != ACPI_TYPE_BUFFER) - { - kfree(buffer.pointer); - return -EINVAL; - } - - lsapic = (struct acpi_madt_local_sapic *)obj->buffer.pointer; - - if ((lsapic->header.type != ACPI_MADT_TYPE_LOCAL_SAPIC) || - (!(lsapic->lapic_flags & ACPI_MADT_ENABLED))) { - kfree(buffer.pointer); - return -EINVAL; - } - - physid = ((lsapic->id << 8) | (lsapic->eid)); - - kfree(buffer.pointer); - buffer.length = ACPI_ALLOCATE_BUFFER; - buffer.pointer = NULL; + int cpu; cpumask_complement(&tmp_map, cpu_present_mask); cpu = cpumask_first(&tmp_map); @@ -934,9 +893,9 @@ static int __cpuinit _acpi_map_lsapic(acpi_handle handle, int *pcpu) } /* wrapper to silence section mismatch warning */ -int __ref acpi_map_lsapic(acpi_handle handle, int *pcpu) +int __ref acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu) { - return _acpi_map_lsapic(handle, pcpu); + return _acpi_map_lsapic(handle, physid, pcpu); } EXPORT_SYMBOL(acpi_map_lsapic); @@ -963,7 +922,7 @@ static acpi_status acpi_map_iosapic(acpi_handle handle, u32 depth, union acpi_object *obj; struct acpi_madt_io_sapic *iosapic; unsigned int gsi_base; - int pxm, node; + int node; /* Only care about objects w/ a method that returns the MADT */ if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer))) @@ -990,17 +949,9 @@ static acpi_status acpi_map_iosapic(acpi_handle handle, u32 depth, kfree(buffer.pointer); - /* - * OK, it's an IOSAPIC MADT entry, look for a _PXM value to tell - * us which node to associate this with. - */ - pxm = acpi_get_pxm(handle); - if (pxm < 0) - return AE_OK; - - node = pxm_to_node(pxm); - - if (node >= MAX_NUMNODES || !node_online(node) || + /* OK, it's an IOSAPIC MADT entry; associate it with a node */ + node = acpi_get_node(handle); + if (node == NUMA_NO_NODE || !node_online(node) || cpumask_empty(cpumask_of_node(node))) return AE_OK; diff --git a/arch/ia64/kernel/asm-offsets.c b/arch/ia64/kernel/asm-offsets.c index 46c9e300731..60ef83e6db7 100644 --- a/arch/ia64/kernel/asm-offsets.c +++ b/arch/ia64/kernel/asm-offsets.c @@ -16,9 +16,6 @@ #include <asm/sigcontext.h> #include <asm/mca.h> -#include <asm/xen/interface.h> -#include <asm/xen/hypervisor.h> - #include "../kernel/sigframe.h" #include "../kernel/fsyscall_gtod_data.h" @@ -290,33 +287,4 @@ void foo(void) DEFINE(IA64_ITC_LASTCYCLE_OFFSET, offsetof (struct itc_jitter_data_t, itc_lastcycle)); -#ifdef CONFIG_XEN - BLANK(); - - DEFINE(XEN_NATIVE_ASM, XEN_NATIVE); - DEFINE(XEN_PV_DOMAIN_ASM, XEN_PV_DOMAIN); - -#define DEFINE_MAPPED_REG_OFS(sym, field) \ - DEFINE(sym, (XMAPPEDREGS_OFS + offsetof(struct mapped_regs, field))) - - DEFINE_MAPPED_REG_OFS(XSI_PSR_I_ADDR_OFS, interrupt_mask_addr); - DEFINE_MAPPED_REG_OFS(XSI_IPSR_OFS, ipsr); - DEFINE_MAPPED_REG_OFS(XSI_IIP_OFS, iip); - DEFINE_MAPPED_REG_OFS(XSI_IFS_OFS, ifs); - DEFINE_MAPPED_REG_OFS(XSI_PRECOVER_IFS_OFS, precover_ifs); - DEFINE_MAPPED_REG_OFS(XSI_ISR_OFS, isr); - DEFINE_MAPPED_REG_OFS(XSI_IFA_OFS, ifa); - DEFINE_MAPPED_REG_OFS(XSI_IIPA_OFS, iipa); - DEFINE_MAPPED_REG_OFS(XSI_IIM_OFS, iim); - DEFINE_MAPPED_REG_OFS(XSI_IHA_OFS, iha); - DEFINE_MAPPED_REG_OFS(XSI_ITIR_OFS, itir); - DEFINE_MAPPED_REG_OFS(XSI_PSR_IC_OFS, interrupt_collection_enabled); - DEFINE_MAPPED_REG_OFS(XSI_BANKNUM_OFS, banknum); - DEFINE_MAPPED_REG_OFS(XSI_BANK0_R16_OFS, bank0_regs[0]); - DEFINE_MAPPED_REG_OFS(XSI_BANK1_R16_OFS, bank1_regs[0]); - DEFINE_MAPPED_REG_OFS(XSI_B0NATS_OFS, vbnat); - DEFINE_MAPPED_REG_OFS(XSI_B1NATS_OFS, vnat); - DEFINE_MAPPED_REG_OFS(XSI_ITC_OFFSET_OFS, itc_offset); - DEFINE_MAPPED_REG_OFS(XSI_ITC_LAST_OFS, itc_last); -#endif /* CONFIG_XEN */ } diff --git a/arch/ia64/kernel/cpufreq/Kconfig b/arch/ia64/kernel/cpufreq/Kconfig deleted file mode 100644 index 2d9d5279b98..00000000000 --- a/arch/ia64/kernel/cpufreq/Kconfig +++ /dev/null @@ -1,29 +0,0 @@ - -# -# CPU Frequency scaling -# - -menu "CPU Frequency scaling" - -source "drivers/cpufreq/Kconfig" - -if CPU_FREQ - -comment "CPUFreq processor drivers" - -config IA64_ACPI_CPUFREQ - tristate "ACPI Processor P-States driver" - select CPU_FREQ_TABLE - depends on ACPI_PROCESSOR - help - This driver adds a CPUFreq driver which utilizes the ACPI - Processor Performance States. - - For details, take a look at <file:Documentation/cpu-freq/>. - - If in doubt, say N. - -endif # CPU_FREQ - -endmenu - diff --git a/arch/ia64/kernel/cpufreq/Makefile b/arch/ia64/kernel/cpufreq/Makefile deleted file mode 100644 index 4838f2a57c7..00000000000 --- a/arch/ia64/kernel/cpufreq/Makefile +++ /dev/null @@ -1,2 +0,0 @@ -obj-$(CONFIG_IA64_ACPI_CPUFREQ) += acpi-cpufreq.o - diff --git a/arch/ia64/kernel/cpufreq/acpi-cpufreq.c b/arch/ia64/kernel/cpufreq/acpi-cpufreq.c deleted file mode 100644 index f09b174244d..00000000000 --- a/arch/ia64/kernel/cpufreq/acpi-cpufreq.c +++ /dev/null @@ -1,437 +0,0 @@ -/* - * arch/ia64/kernel/cpufreq/acpi-cpufreq.c - * This file provides the ACPI based P-state support. This - * module works with generic cpufreq infrastructure. Most of - * the code is based on i386 version - * (arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c) - * - * Copyright (C) 2005 Intel Corp - * Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> - */ - -#include <linux/kernel.h> -#include <linux/slab.h> -#include <linux/module.h> -#include <linux/init.h> -#include <linux/cpufreq.h> -#include <linux/proc_fs.h> -#include <linux/seq_file.h> -#include <asm/io.h> -#include <asm/uaccess.h> -#include <asm/pal.h> - -#include <linux/acpi.h> -#include <acpi/processor.h> - -MODULE_AUTHOR("Venkatesh Pallipadi"); -MODULE_DESCRIPTION("ACPI Processor P-States Driver"); -MODULE_LICENSE("GPL"); - - -struct cpufreq_acpi_io { - struct acpi_processor_performance acpi_data; - struct cpufreq_frequency_table *freq_table; - unsigned int resume; -}; - -static struct cpufreq_acpi_io *acpi_io_data[NR_CPUS]; - -static struct cpufreq_driver acpi_cpufreq_driver; - - -static int -processor_set_pstate ( - u32 value) -{ - s64 retval; - - pr_debug("processor_set_pstate\n"); - - retval = ia64_pal_set_pstate((u64)value); - - if (retval) { - pr_debug("Failed to set freq to 0x%x, with error 0x%lx\n", - value, retval); - return -ENODEV; - } - return (int)retval; -} - - -static int -processor_get_pstate ( - u32 *value) -{ - u64 pstate_index = 0; - s64 retval; - - pr_debug("processor_get_pstate\n"); - - retval = ia64_pal_get_pstate(&pstate_index, - PAL_GET_PSTATE_TYPE_INSTANT); - *value = (u32) pstate_index; - - if (retval) - pr_debug("Failed to get current freq with " - "error 0x%lx, idx 0x%x\n", retval, *value); - - return (int)retval; -} - - -/* To be used only after data->acpi_data is initialized */ -static unsigned -extract_clock ( - struct cpufreq_acpi_io *data, - unsigned value, - unsigned int cpu) -{ - unsigned long i; - - pr_debug("extract_clock\n"); - - for (i = 0; i < data->acpi_data.state_count; i++) { - if (value == data->acpi_data.states[i].status) - return data->acpi_data.states[i].core_frequency; - } - return data->acpi_data.states[i-1].core_frequency; -} - - -static unsigned int -processor_get_freq ( - struct cpufreq_acpi_io *data, - unsigned int cpu) -{ - int ret = 0; - u32 value = 0; - cpumask_t saved_mask; - unsigned long clock_freq; - - pr_debug("processor_get_freq\n"); - - saved_mask = current->cpus_allowed; - set_cpus_allowed_ptr(current, cpumask_of(cpu)); - if (smp_processor_id() != cpu) - goto migrate_end; - - /* processor_get_pstate gets the instantaneous frequency */ - ret = processor_get_pstate(&value); - - if (ret) { - set_cpus_allowed_ptr(current, &saved_mask); - printk(KERN_WARNING "get performance failed with error %d\n", - ret); - ret = 0; - goto migrate_end; - } - clock_freq = extract_clock(data, value, cpu); - ret = (clock_freq*1000); - -migrate_end: - set_cpus_allowed_ptr(current, &saved_mask); - return ret; -} - - -static int -processor_set_freq ( - struct cpufreq_acpi_io *data, - unsigned int cpu, - int state) -{ - int ret = 0; - u32 value = 0; - struct cpufreq_freqs cpufreq_freqs; - cpumask_t saved_mask; - int retval; - - pr_debug("processor_set_freq\n"); - - saved_mask = current->cpus_allowed; - set_cpus_allowed_ptr(current, cpumask_of(cpu)); - if (smp_processor_id() != cpu) { - retval = -EAGAIN; - goto migrate_end; - } - - if (state == data->acpi_data.state) { - if (unlikely(data->resume)) { - pr_debug("Called after resume, resetting to P%d\n", state); - data->resume = 0; - } else { - pr_debug("Already at target state (P%d)\n", state); - retval = 0; - goto migrate_end; - } - } - - pr_debug("Transitioning from P%d to P%d\n", - data->acpi_data.state, state); - - /* cpufreq frequency struct */ - cpufreq_freqs.cpu = cpu; - cpufreq_freqs.old = data->freq_table[data->acpi_data.state].frequency; - cpufreq_freqs.new = data->freq_table[state].frequency; - - /* notify cpufreq */ - cpufreq_notify_transition(&cpufreq_freqs, CPUFREQ_PRECHANGE); - - /* - * First we write the target state's 'control' value to the - * control_register. - */ - - value = (u32) data->acpi_data.states[state].control; - - pr_debug("Transitioning to state: 0x%08x\n", value); - - ret = processor_set_pstate(value); - if (ret) { - unsigned int tmp = cpufreq_freqs.new; - cpufreq_notify_transition(&cpufreq_freqs, CPUFREQ_POSTCHANGE); - cpufreq_freqs.new = cpufreq_freqs.old; - cpufreq_freqs.old = tmp; - cpufreq_notify_transition(&cpufreq_freqs, CPUFREQ_PRECHANGE); - cpufreq_notify_transition(&cpufreq_freqs, CPUFREQ_POSTCHANGE); - printk(KERN_WARNING "Transition failed with error %d\n", ret); - retval = -ENODEV; - goto migrate_end; - } - - cpufreq_notify_transition(&cpufreq_freqs, CPUFREQ_POSTCHANGE); - - data->acpi_data.state = state; - - retval = 0; - -migrate_end: - set_cpus_allowed_ptr(current, &saved_mask); - return (retval); -} - - -static unsigned int -acpi_cpufreq_get ( - unsigned int cpu) -{ - struct cpufreq_acpi_io *data = acpi_io_data[cpu]; - - pr_debug("acpi_cpufreq_get\n"); - - return processor_get_freq(data, cpu); -} - - -static int -acpi_cpufreq_target ( - struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) -{ - struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu]; - unsigned int next_state = 0; - unsigned int result = 0; - - pr_debug("acpi_cpufreq_setpolicy\n"); - - result = cpufreq_frequency_table_target(policy, - data->freq_table, target_freq, relation, &next_state); - if (result) - return (result); - - result = processor_set_freq(data, policy->cpu, next_state); - - return (result); -} - - -static int -acpi_cpufreq_verify ( - struct cpufreq_policy *policy) -{ - unsigned int result = 0; - struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu]; - - pr_debug("acpi_cpufreq_verify\n"); - - result = cpufreq_frequency_table_verify(policy, - data->freq_table); - - return (result); -} - - -static int -acpi_cpufreq_cpu_init ( - struct cpufreq_policy *policy) -{ - unsigned int i; - unsigned int cpu = policy->cpu; - struct cpufreq_acpi_io *data; - unsigned int result = 0; - - pr_debug("acpi_cpufreq_cpu_init\n"); - - data = kzalloc(sizeof(struct cpufreq_acpi_io), GFP_KERNEL); - if (!data) - return (-ENOMEM); - - acpi_io_data[cpu] = data; - - result = acpi_processor_register_performance(&data->acpi_data, cpu); - - if (result) - goto err_free; - - /* capability check */ - if (data->acpi_data.state_count <= 1) { - pr_debug("No P-States\n"); - result = -ENODEV; - goto err_unreg; - } - - if ((data->acpi_data.control_register.space_id != - ACPI_ADR_SPACE_FIXED_HARDWARE) || - (data->acpi_data.status_register.space_id != - ACPI_ADR_SPACE_FIXED_HARDWARE)) { - pr_debug("Unsupported address space [%d, %d]\n", - (u32) (data->acpi_data.control_register.space_id), - (u32) (data->acpi_data.status_register.space_id)); - result = -ENODEV; - goto err_unreg; - } - - /* alloc freq_table */ - data->freq_table = kmalloc(sizeof(struct cpufreq_frequency_table) * - (data->acpi_data.state_count + 1), - GFP_KERNEL); - if (!data->freq_table) { - result = -ENOMEM; - goto err_unreg; - } - - /* detect transition latency */ - policy->cpuinfo.transition_latency = 0; - for (i=0; i<data->acpi_data.state_count; i++) { - if ((data->acpi_data.states[i].transition_latency * 1000) > - policy->cpuinfo.transition_latency) { - policy->cpuinfo.transition_latency = - data->acpi_data.states[i].transition_latency * 1000; - } - } - policy->cur = processor_get_freq(data, policy->cpu); - - /* table init */ - for (i = 0; i <= data->acpi_data.state_count; i++) - { - data->freq_table[i].index = i; - if (i < data->acpi_data.state_count) { - data->freq_table[i].frequency = - data->acpi_data.states[i].core_frequency * 1000; - } else { - data->freq_table[i].frequency = CPUFREQ_TABLE_END; - } - } - - result = cpufreq_frequency_table_cpuinfo(policy, data->freq_table); - if (result) { - goto err_freqfree; - } - - /* notify BIOS that we exist */ - acpi_processor_notify_smm(THIS_MODULE); - - printk(KERN_INFO "acpi-cpufreq: CPU%u - ACPI performance management " - "activated.\n", cpu); - - for (i = 0; i < data->acpi_data.state_count; i++) - pr_debug(" %cP%d: %d MHz, %d mW, %d uS, %d uS, 0x%x 0x%x\n", - (i == data->acpi_data.state?'*':' '), i, - (u32) data->acpi_data.states[i].core_frequency, - (u32) data->acpi_data.states[i].power, - (u32) data->acpi_data.states[i].transition_latency, - (u32) data->acpi_data.states[i].bus_master_latency, - (u32) data->acpi_data.states[i].status, - (u32) data->acpi_data.states[i].control); - - cpufreq_frequency_table_get_attr(data->freq_table, policy->cpu); - - /* the first call to ->target() should result in us actually - * writing something to the appropriate registers. */ - data->resume = 1; - - return (result); - - err_freqfree: - kfree(data->freq_table); - err_unreg: - acpi_processor_unregister_performance(&data->acpi_data, cpu); - err_free: - kfree(data); - acpi_io_data[cpu] = NULL; - - return (result); -} - - -static int -acpi_cpufreq_cpu_exit ( - struct cpufreq_policy *policy) -{ - struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu]; - - pr_debug("acpi_cpufreq_cpu_exit\n"); - - if (data) { - cpufreq_frequency_table_put_attr(policy->cpu); - acpi_io_data[policy->cpu] = NULL; - acpi_processor_unregister_performance(&data->acpi_data, - policy->cpu); - kfree(data); - } - - return (0); -} - - -static struct freq_attr* acpi_cpufreq_attr[] = { - &cpufreq_freq_attr_scaling_available_freqs, - NULL, -}; - - -static struct cpufreq_driver acpi_cpufreq_driver = { - .verify = acpi_cpufreq_verify, - .target = acpi_cpufreq_target, - .get = acpi_cpufreq_get, - .init = acpi_cpufreq_cpu_init, - .exit = acpi_cpufreq_cpu_exit, - .name = "acpi-cpufreq", - .owner = THIS_MODULE, - .attr = acpi_cpufreq_attr, -}; - - -static int __init -acpi_cpufreq_init (void) -{ - pr_debug("acpi_cpufreq_init\n"); - - return cpufreq_register_driver(&acpi_cpufreq_driver); -} - - -static void __exit -acpi_cpufreq_exit (void) -{ - pr_debug("acpi_cpufreq_exit\n"); - - cpufreq_unregister_driver(&acpi_cpufreq_driver); - return; -} - - -late_initcall(acpi_cpufreq_init); -module_exit(acpi_cpufreq_exit); - diff --git a/arch/ia64/kernel/crash.c b/arch/ia64/kernel/crash.c index b942f4032d7..2955f359e2a 100644 --- a/arch/ia64/kernel/crash.c +++ b/arch/ia64/kernel/crash.c @@ -237,7 +237,7 @@ kdump_init_notifier(struct notifier_block *self, unsigned long val, void *data) } #ifdef CONFIG_SYSCTL -static ctl_table kdump_ctl_table[] = { +static struct ctl_table kdump_ctl_table[] = { { .procname = "kdump_on_init", .data = &kdump_on_init, @@ -255,7 +255,7 @@ static ctl_table kdump_ctl_table[] = { { } }; -static ctl_table sys_table[] = { +static struct ctl_table sys_table[] = { { .procname = "kernel", .mode = 0555, diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c index f034563aeae..741b99c1a0b 100644 --- a/arch/ia64/kernel/efi.c +++ b/arch/ia64/kernel/efi.c @@ -44,10 +44,15 @@ #define EFI_DEBUG 0 +static __initdata unsigned long palo_phys; + +static __initdata efi_config_table_type_t arch_tables[] = { + {PROCESSOR_ABSTRACTION_LAYER_OVERWRITE_GUID, "PALO", &palo_phys}, + {NULL_GUID, NULL, 0}, +}; + extern efi_status_t efi_call_phys (void *, ...); -struct efi efi; -EXPORT_SYMBOL(efi); static efi_runtime_services_t *runtime; static u64 mem_limit = ~0UL, max_addr = ~0UL, min_addr = 0UL; @@ -423,9 +428,9 @@ static u8 __init palo_checksum(u8 *buffer, u32 length) * Parse and handle PALO table which is published at: * http://www.dig64.org/home/DIG64_PALO_R1_0.pdf */ -static void __init handle_palo(unsigned long palo_phys) +static void __init handle_palo(unsigned long phys_addr) { - struct palo_table *palo = __va(palo_phys); + struct palo_table *palo = __va(phys_addr); u8 checksum; if (strncmp(palo->signature, PALO_SIG, sizeof(PALO_SIG) - 1)) { @@ -467,12 +472,13 @@ void __init efi_init (void) { void *efi_map_start, *efi_map_end; - efi_config_table_t *config_tables; efi_char16_t *c16; u64 efi_desc_size; char *cp, vendor[100] = "unknown"; int i; - unsigned long palo_phys; + + set_bit(EFI_BOOT, &efi.flags); + set_bit(EFI_64BIT, &efi.flags); /* * It's too early to be able to use the standard kernel command line @@ -514,8 +520,6 @@ efi_init (void) efi.systab->hdr.revision >> 16, efi.systab->hdr.revision & 0xffff); - config_tables = __va(efi.systab->tables); - /* Show what we know for posterity */ c16 = __va(efi.systab->fw_vendor); if (c16) { @@ -528,43 +532,12 @@ efi_init (void) efi.systab->hdr.revision >> 16, efi.systab->hdr.revision & 0xffff, vendor); - efi.mps = EFI_INVALID_TABLE_ADDR; - efi.acpi = EFI_INVALID_TABLE_ADDR; - efi.acpi20 = EFI_INVALID_TABLE_ADDR; - efi.smbios = EFI_INVALID_TABLE_ADDR; - efi.sal_systab = EFI_INVALID_TABLE_ADDR; - efi.boot_info = EFI_INVALID_TABLE_ADDR; - efi.hcdp = EFI_INVALID_TABLE_ADDR; - efi.uga = EFI_INVALID_TABLE_ADDR; + set_bit(EFI_SYSTEM_TABLES, &efi.flags); palo_phys = EFI_INVALID_TABLE_ADDR; - for (i = 0; i < (int) efi.systab->nr_tables; i++) { - if (efi_guidcmp(config_tables[i].guid, MPS_TABLE_GUID) == 0) { - efi.mps = config_tables[i].table; - printk(" MPS=0x%lx", config_tables[i].table); - } else if (efi_guidcmp(config_tables[i].guid, ACPI_20_TABLE_GUID) == 0) { - efi.acpi20 = config_tables[i].table; - printk(" ACPI 2.0=0x%lx", config_tables[i].table); - } else if (efi_guidcmp(config_tables[i].guid, ACPI_TABLE_GUID) == 0) { - efi.acpi = config_tables[i].table; - printk(" ACPI=0x%lx", config_tables[i].table); - } else if (efi_guidcmp(config_tables[i].guid, SMBIOS_TABLE_GUID) == 0) { - efi.smbios = config_tables[i].table; - printk(" SMBIOS=0x%lx", config_tables[i].table); - } else if (efi_guidcmp(config_tables[i].guid, SAL_SYSTEM_TABLE_GUID) == 0) { - efi.sal_systab = config_tables[i].table; - printk(" SALsystab=0x%lx", config_tables[i].table); - } else if (efi_guidcmp(config_tables[i].guid, HCDP_TABLE_GUID) == 0) { - efi.hcdp = config_tables[i].table; - printk(" HCDP=0x%lx", config_tables[i].table); - } else if (efi_guidcmp(config_tables[i].guid, - PROCESSOR_ABSTRACTION_LAYER_OVERWRITE_GUID) == 0) { - palo_phys = config_tables[i].table; - printk(" PALO=0x%lx", config_tables[i].table); - } - } - printk("\n"); + if (efi_config_init(arch_tables) != 0) + return; if (palo_phys != EFI_INVALID_TABLE_ADDR) handle_palo(palo_phys); @@ -689,6 +662,8 @@ efi_enter_virtual_mode (void) return; } + set_bit(EFI_RUNTIME_SERVICES, &efi.flags); + /* * Now that EFI is in virtual mode, we call the EFI functions more * efficiently: @@ -1116,11 +1091,6 @@ efi_memmap_init(u64 *s, u64 *e) if (!is_memory_available(md)) continue; -#ifdef CONFIG_CRASH_DUMP - /* saved_max_pfn should ignore max_addr= command line arg */ - if (saved_max_pfn < (efi_md_end(md) >> PAGE_SHIFT)) - saved_max_pfn = (efi_md_end(md) >> PAGE_SHIFT); -#endif /* * Round ends inward to granule boundaries * Give trimmings to uncached allocator diff --git a/arch/ia64/kernel/elfcore.c b/arch/ia64/kernel/elfcore.c index bac1639bc32..04bc8fd5f89 100644 --- a/arch/ia64/kernel/elfcore.c +++ b/arch/ia64/kernel/elfcore.c @@ -11,8 +11,7 @@ Elf64_Half elf_core_extra_phdrs(void) return GATE_EHDR->e_phnum; } -int elf_core_write_extra_phdrs(struct file *file, loff_t offset, size_t *size, - unsigned long limit) +int elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset) { const struct elf_phdr *const gate_phdrs = (const struct elf_phdr *) (GATE_ADDR + GATE_EHDR->e_phoff); @@ -35,15 +34,13 @@ int elf_core_write_extra_phdrs(struct file *file, loff_t offset, size_t *size, phdr.p_offset += ofs; } phdr.p_paddr = 0; /* match other core phdrs */ - *size += sizeof(phdr); - if (*size > limit || !dump_write(file, &phdr, sizeof(phdr))) + if (!dump_emit(cprm, &phdr, sizeof(phdr))) return 0; } return 1; } -int elf_core_write_extra_data(struct file *file, size_t *size, - unsigned long limit) +int elf_core_write_extra_data(struct coredump_params *cprm) { const struct elf_phdr *const gate_phdrs = (const struct elf_phdr *) (GATE_ADDR + GATE_EHDR->e_phoff); @@ -54,8 +51,7 @@ int elf_core_write_extra_data(struct file *file, size_t *size, void *addr = (void *)gate_phdrs[i].p_vaddr; size_t memsz = PAGE_ALIGN(gate_phdrs[i].p_memsz); - *size += memsz; - if (*size > limit || !dump_write(file, addr, memsz)) + if (!dump_emit(cprm, addr, memsz)) return 0; break; } diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S index 7a53530f22c..ba3d03503e8 100644 --- a/arch/ia64/kernel/entry.S +++ b/arch/ia64/kernel/entry.S @@ -1169,21 +1169,8 @@ skip_rbs_switch: .work_pending: tbit.z p6,p0=r31,TIF_NEED_RESCHED // is resched not needed? (p6) br.cond.sptk.few .notify -#ifdef CONFIG_PREEMPT -(pKStk) dep r21=-1,r0,PREEMPT_ACTIVE_BIT,1 - ;; -(pKStk) st4 [r20]=r21 -#endif - SSM_PSR_I(p0, p6, r2) // enable interrupts - br.call.spnt.many rp=schedule + br.call.spnt.many rp=preempt_schedule_irq .ret9: cmp.eq p6,p0=r0,r0 // p6 <- 1 (re-check) - RSM_PSR_I(p0, r2, r20) // disable interrupts - ;; -#ifdef CONFIG_PREEMPT -(pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13 - ;; -(pKStk) st4 [r20]=r0 // preempt_count() <- 0 -#endif (pLvSys)br.cond.sptk.few __paravirt_pending_syscall_end br.cond.sptk.many .work_processed_kernel @@ -1786,6 +1773,9 @@ sys_call_table: data8 sys_process_vm_writev data8 sys_accept4 data8 sys_finit_module // 1335 + data8 sys_sched_setattr + data8 sys_sched_getattr + data8 sys_renameat2 .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls #endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */ diff --git a/arch/ia64/kernel/err_inject.c b/arch/ia64/kernel/err_inject.c index 2d67317a1ec..0c161ed6d18 100644 --- a/arch/ia64/kernel/err_inject.c +++ b/arch/ia64/kernel/err_inject.c @@ -225,17 +225,17 @@ static struct attribute_group err_inject_attr_group = { .name = "err_inject" }; /* Add/Remove err_inject interface for CPU device */ -static int __cpuinit err_inject_add_dev(struct device * sys_dev) +static int err_inject_add_dev(struct device *sys_dev) { return sysfs_create_group(&sys_dev->kobj, &err_inject_attr_group); } -static int __cpuinit err_inject_remove_dev(struct device * sys_dev) +static int err_inject_remove_dev(struct device *sys_dev) { sysfs_remove_group(&sys_dev->kobj, &err_inject_attr_group); return 0; } -static int __cpuinit err_inject_cpu_callback(struct notifier_block *nfb, +static int err_inject_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { unsigned int cpu = (unsigned long)hcpu; @@ -256,7 +256,7 @@ static int __cpuinit err_inject_cpu_callback(struct notifier_block *nfb, return NOTIFY_OK; } -static struct notifier_block __cpuinitdata err_inject_cpu_notifier = +static struct notifier_block err_inject_cpu_notifier = { .notifier_call = err_inject_cpu_callback, }; @@ -269,12 +269,17 @@ err_inject_init(void) #ifdef ERR_INJ_DEBUG printk(KERN_INFO "Enter error injection driver.\n"); #endif + + cpu_notifier_register_begin(); + for_each_online_cpu(i) { err_inject_cpu_callback(&err_inject_cpu_notifier, CPU_ONLINE, (void *)(long)i); } - register_hotcpu_notifier(&err_inject_cpu_notifier); + __register_hotcpu_notifier(&err_inject_cpu_notifier); + + cpu_notifier_register_done(); return 0; } @@ -288,11 +293,17 @@ err_inject_exit(void) #ifdef ERR_INJ_DEBUG printk(KERN_INFO "Exit error injection driver.\n"); #endif + + cpu_notifier_register_begin(); + for_each_online_cpu(i) { sys_dev = get_cpu_device(i); sysfs_remove_group(&sys_dev->kobj, &err_inject_attr_group); } - unregister_hotcpu_notifier(&err_inject_cpu_notifier); + + __unregister_hotcpu_notifier(&err_inject_cpu_notifier); + + cpu_notifier_register_done(); } module_init(err_inject_init); diff --git a/arch/ia64/kernel/fsys.S b/arch/ia64/kernel/fsys.S index c4cd45d9774..abc6dee3799 100644 --- a/arch/ia64/kernel/fsys.S +++ b/arch/ia64/kernel/fsys.S @@ -90,53 +90,6 @@ ENTRY(fsys_getpid) FSYS_RETURN END(fsys_getpid) -ENTRY(fsys_getppid) - .prologue - .altrp b6 - .body - add r17=IA64_TASK_GROUP_LEADER_OFFSET,r16 - ;; - ld8 r17=[r17] // r17 = current->group_leader - add r9=TI_FLAGS+IA64_TASK_SIZE,r16 - ;; - - ld4 r9=[r9] - add r17=IA64_TASK_REAL_PARENT_OFFSET,r17 // r17 = ¤t->group_leader->real_parent - ;; - and r9=TIF_ALLWORK_MASK,r9 - -1: ld8 r18=[r17] // r18 = current->group_leader->real_parent - ;; - cmp.ne p8,p0=0,r9 - add r8=IA64_TASK_TGID_OFFSET,r18 // r8 = ¤t->group_leader->real_parent->tgid - ;; - - /* - * The .acq is needed to ensure that the read of tgid has returned its data before - * we re-check "real_parent". - */ - ld4.acq r8=[r8] // r8 = current->group_leader->real_parent->tgid -#ifdef CONFIG_SMP - /* - * Re-read current->group_leader->real_parent. - */ - ld8 r19=[r17] // r19 = current->group_leader->real_parent -(p8) br.spnt.many fsys_fallback_syscall - ;; - cmp.ne p6,p0=r18,r19 // did real_parent change? - mov r19=0 // i must not leak kernel bits... -(p6) br.cond.spnt.few 1b // yes -> redo the read of tgid and the check - ;; - mov r17=0 // i must not leak kernel bits... - mov r18=0 // i must not leak kernel bits... -#else - mov r17=0 // i must not leak kernel bits... - mov r18=0 // i must not leak kernel bits... - mov r19=0 // i must not leak kernel bits... -#endif - FSYS_RETURN -END(fsys_getppid) - ENTRY(fsys_set_tid_address) .prologue .altrp b6 @@ -614,7 +567,7 @@ paravirt_fsyscall_table: data8 0 // chown data8 0 // lseek // 1040 data8 fsys_getpid // getpid - data8 fsys_getppid // getppid + data8 0 // getppid data8 0 // mount data8 0 // umount data8 0 // setuid // 1045 diff --git a/arch/ia64/kernel/ftrace.c b/arch/ia64/kernel/ftrace.c index 7fc8c961b1f..3b0c2aa0785 100644 --- a/arch/ia64/kernel/ftrace.c +++ b/arch/ia64/kernel/ftrace.c @@ -198,9 +198,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func) } /* run from kstop_machine */ -int __init ftrace_dyn_arch_init(void *data) +int __init ftrace_dyn_arch_init(void) { - *(unsigned long *)data = 0; - return 0; } diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S index 9be4e497f3d..a4acddad0c7 100644 --- a/arch/ia64/kernel/head.S +++ b/arch/ia64/kernel/head.S @@ -259,7 +259,7 @@ start_ap: * Switch into virtual mode: */ movl r16=(IA64_PSR_IT|IA64_PSR_IC|IA64_PSR_DT|IA64_PSR_RT|IA64_PSR_DFH|IA64_PSR_BN \ - |IA64_PSR_DI|IA64_PSR_AC) + |IA64_PSR_DI) ;; mov cr.ipsr=r16 movl r17=1f @@ -416,8 +416,6 @@ start_ap: default_setup_hook = 0 // Currently nothing needs to be done. - .weak xen_setup_hook - .global hypervisor_type hypervisor_type: data8 PARAVIRT_HYPERVISOR_TYPE_DEFAULT @@ -426,7 +424,6 @@ hypervisor_type: hypervisor_setup_hooks: data8 default_setup_hook - data8 xen_setup_hook num_hypervisor_hooks = (. - hypervisor_setup_hooks) / 8 .previous @@ -1035,7 +1032,7 @@ END(ia64_delay_loop) * Return a CPU-local timestamp in nano-seconds. This timestamp is * NOT synchronized across CPUs its return value must never be * compared against the values returned on another CPU. The usage in - * kernel/sched.c ensures that. + * kernel/sched/core.c ensures that. * * The return-value of sched_clock() is NOT supposed to wrap-around. * If it did, it would cause some scheduling hiccups (at the worst). diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c index ee33c3aaa2f..cd44a57c73b 100644 --- a/arch/ia64/kernel/iosapic.c +++ b/arch/ia64/kernel/iosapic.c @@ -76,7 +76,7 @@ * PCI pin -> global system interrupt (GSI) -> IA-64 vector <-> IRQ * * Note: The term "IRQ" is loosely used everywhere in Linux kernel to - * describeinterrupts. Now we use "IRQ" only for Linux IRQ's. ISA IRQ + * describe interrupts. Now we use "IRQ" only for Linux IRQ's. ISA IRQ * (isa_irq) is the only exception in this source code. */ @@ -735,7 +735,7 @@ iosapic_register_intr (unsigned int gsi, rte = find_rte(irq, gsi); if(iosapic_intr_info[irq].count == 0) { assign_irq_vector(irq); - dynamic_irq_init(irq); + irq_init_desc(irq); } else if (rte->refcnt != NO_REF_RTE) { rte->refcnt++; goto unlock_iosapic_lock; @@ -1010,6 +1010,26 @@ iosapic_check_gsi_range (unsigned int gsi_base, unsigned int ver) return 0; } +static int +iosapic_delete_rte(unsigned int irq, unsigned int gsi) +{ + struct iosapic_rte_info *rte, *temp; + + list_for_each_entry_safe(rte, temp, &iosapic_intr_info[irq].rtes, + rte_list) { + if (rte->iosapic->gsi_base + rte->rte_index == gsi) { + if (rte->refcnt) + return -EBUSY; + + list_del(&rte->rte_list); + kfree(rte); + return 0; + } + } + + return -EINVAL; +} + int iosapic_init(unsigned long phys_addr, unsigned int gsi_base) { int num_rte, err, index; @@ -1069,7 +1089,7 @@ int iosapic_init(unsigned long phys_addr, unsigned int gsi_base) int iosapic_remove(unsigned int gsi_base) { - int index, err = 0; + int i, irq, index, err = 0; unsigned long flags; spin_lock_irqsave(&iosapic_lock, flags); @@ -1087,6 +1107,16 @@ int iosapic_remove(unsigned int gsi_base) goto out; } + for (i = gsi_base; i < gsi_base + iosapic_lists[index].num_rte; i++) { + irq = __gsi_to_irq(i); + if (irq < 0) + continue; + + err = iosapic_delete_rte(irq, i); + if (err) + goto out; + } + iounmap(iosapic_lists[index].addr); iosapic_free(index); out: diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c index ad69606613e..f2c41828113 100644 --- a/arch/ia64/kernel/irq.c +++ b/arch/ia64/kernel/irq.c @@ -23,6 +23,8 @@ #include <linux/interrupt.h> #include <linux/kernel_stat.h> +#include <asm/mca.h> + /* * 'what should we do if we get a hw irq event on an illegal vector'. * each architecture has to answer this themselves. @@ -83,6 +85,12 @@ bool is_affinity_mask_valid(const struct cpumask *cpumask) #endif /* CONFIG_SMP */ +int __init arch_early_irq_init(void) +{ + ia64_mca_irq_init(); + return 0; +} + #ifdef CONFIG_HOTPLUG_CPU unsigned int vectors_in_migration[NR_IRQS]; diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c index 1034884b77d..03ea78ed64a 100644 --- a/arch/ia64/kernel/irq_ia64.c +++ b/arch/ia64/kernel/irq_ia64.c @@ -93,14 +93,6 @@ static int irq_status[NR_IRQS] = { [0 ... NR_IRQS -1] = IRQ_UNUSED }; -int check_irq_used(int irq) -{ - if (irq_status[irq] == IRQ_USED) - return 1; - - return -1; -} - static inline int find_unassigned_irq(void) { int irq; @@ -364,7 +356,6 @@ static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id) static struct irqaction irq_move_irqaction = { .handler = smp_irq_move_cleanup_interrupt, - .flags = IRQF_DISABLED, .name = "irq_move" }; @@ -391,8 +382,7 @@ void destroy_and_reserve_irq(unsigned int irq) { unsigned long flags; - dynamic_irq_cleanup(irq); - + irq_init_desc(irq); spin_lock_irqsave(&vector_lock, flags); __clear_irq_vector(irq); irq_status[irq] = IRQ_RSVD; @@ -425,13 +415,13 @@ int create_irq(void) out: spin_unlock_irqrestore(&vector_lock, flags); if (irq >= 0) - dynamic_irq_init(irq); + irq_init_desc(irq); return irq; } void destroy_irq(unsigned int irq) { - dynamic_irq_cleanup(irq); + irq_init_desc(irq); clear_irq_vector(irq); } @@ -489,14 +479,13 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs) ia64_srlz_d(); while (vector != IA64_SPURIOUS_INT_VECTOR) { int irq = local_vector_to_irq(vector); - struct irq_desc *desc = irq_to_desc(irq); if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) { smp_local_flush_tlb(); - kstat_incr_irqs_this_cpu(irq, desc); + kstat_incr_irq_this_cpu(irq); } else if (unlikely(IS_RESCHEDULE(vector))) { scheduler_ipi(); - kstat_incr_irqs_this_cpu(irq, desc); + kstat_incr_irq_this_cpu(irq); } else { ia64_setreg(_IA64_REG_CR_TPR, vector); ia64_srlz_d(); @@ -549,13 +538,12 @@ void ia64_process_pending_intr(void) */ while (vector != IA64_SPURIOUS_INT_VECTOR) { int irq = local_vector_to_irq(vector); - struct irq_desc *desc = irq_to_desc(irq); if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) { smp_local_flush_tlb(); - kstat_incr_irqs_this_cpu(irq, desc); + kstat_incr_irq_this_cpu(irq); } else if (unlikely(IS_RESCHEDULE(vector))) { - kstat_incr_irqs_this_cpu(irq, desc); + kstat_incr_irq_this_cpu(irq); } else { struct pt_regs *old_regs = set_irq_regs(NULL); @@ -602,7 +590,6 @@ static irqreturn_t dummy_handler (int irq, void *dev_id) static struct irqaction ipi_irqaction = { .handler = handle_IPI, - .flags = IRQF_DISABLED, .name = "IPI" }; @@ -611,13 +598,11 @@ static struct irqaction ipi_irqaction = { */ static struct irqaction resched_irqaction = { .handler = dummy_handler, - .flags = IRQF_DISABLED, .name = "resched" }; static struct irqaction tlb_irqaction = { .handler = dummy_handler, - .flags = IRQF_DISABLED, .name = "tlb_flush" }; diff --git a/arch/ia64/kernel/ivt.S b/arch/ia64/kernel/ivt.S index 689ffcaa284..18e794a5724 100644 --- a/arch/ia64/kernel/ivt.S +++ b/arch/ia64/kernel/ivt.S @@ -58,7 +58,7 @@ #include <asm/unistd.h> #include <asm/errno.h> -#if 1 +#if 0 # define PSR_DEFAULT_BITS psr.ac #else # define PSR_DEFAULT_BITS 0 diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c index f8280a766a7..074fde49c9e 100644 --- a/arch/ia64/kernel/kprobes.c +++ b/arch/ia64/kernel/kprobes.c @@ -947,7 +947,7 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) case KPROBE_HIT_SSDONE: /* * We increment the nmissed count for accounting, - * we can also use npre/npostfault count for accouting + * we can also use npre/npostfault count for accounting * these specific fault cases. */ kprobes_inc_nmissed_count(cur); diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 65bf9cd3904..db7b36bb068 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c @@ -217,7 +217,7 @@ void ia64_mca_printk(const char *fmt, ...) /* Copy the output into mlogbuf */ if (oops_in_progress) { /* mlogbuf was abandoned, use printk directly instead. */ - printk(temp_buf); + printk("%s", temp_buf); } else { spin_lock(&mlogbuf_wlock); for (p = temp_buf; *p; p++) { @@ -268,7 +268,7 @@ void ia64_mlogbuf_dump(void) } *p = '\0'; if (temp_buf[0]) - printk(temp_buf); + printk("%s", temp_buf); mlogbuf_start = index; mlogbuf_timestamp = 0; @@ -631,7 +631,7 @@ ia64_mca_register_cpev (int cpev) * Outputs * None */ -void __cpuinit +void ia64_mca_cmc_vector_setup (void) { cmcv_reg_t cmcv; @@ -1772,38 +1772,32 @@ __setup("disable_cpe_poll", ia64_mca_disable_cpe_polling); static struct irqaction cmci_irqaction = { .handler = ia64_mca_cmc_int_handler, - .flags = IRQF_DISABLED, .name = "cmc_hndlr" }; static struct irqaction cmcp_irqaction = { .handler = ia64_mca_cmc_int_caller, - .flags = IRQF_DISABLED, .name = "cmc_poll" }; static struct irqaction mca_rdzv_irqaction = { .handler = ia64_mca_rendez_int_handler, - .flags = IRQF_DISABLED, .name = "mca_rdzv" }; static struct irqaction mca_wkup_irqaction = { .handler = ia64_mca_wakeup_int_handler, - .flags = IRQF_DISABLED, .name = "mca_wkup" }; #ifdef CONFIG_ACPI static struct irqaction mca_cpe_irqaction = { .handler = ia64_mca_cpe_int_handler, - .flags = IRQF_DISABLED, .name = "cpe_hndlr" }; static struct irqaction mca_cpep_irqaction = { .handler = ia64_mca_cpe_int_caller, - .flags = IRQF_DISABLED, .name = "cpe_poll" }; #endif /* CONFIG_ACPI */ @@ -1814,7 +1808,7 @@ static struct irqaction mca_cpep_irqaction = { * format most of the fields. */ -static void __cpuinit +static void format_mca_init_stack(void *mca_data, unsigned long offset, const char *type, int cpu) { @@ -1844,7 +1838,7 @@ static void * __init_refok mca_bootmem(void) } /* Do per-CPU MCA-related initialization. */ -void __cpuinit +void ia64_mca_cpu_init(void *cpu_data) { void *pal_vaddr; @@ -1896,7 +1890,7 @@ ia64_mca_cpu_init(void *cpu_data) PAGE_KERNEL)); } -static void __cpuinit ia64_mca_cmc_vector_adjust(void *dummy) +static void ia64_mca_cmc_vector_adjust(void *dummy) { unsigned long flags; @@ -1906,7 +1900,7 @@ static void __cpuinit ia64_mca_cmc_vector_adjust(void *dummy) local_irq_restore(flags); } -static int __cpuinit mca_cpu_callback(struct notifier_block *nfb, +static int mca_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { @@ -1922,7 +1916,7 @@ static int __cpuinit mca_cpu_callback(struct notifier_block *nfb, return NOTIFY_OK; } -static struct notifier_block mca_cpu_notifier __cpuinitdata = { +static struct notifier_block mca_cpu_notifier = { .notifier_call = mca_cpu_callback }; @@ -2074,22 +2068,16 @@ ia64_mca_init(void) printk(KERN_INFO "MCA related initialization done\n"); } + /* - * ia64_mca_late_init - * - * Opportunity to setup things that require initialization later - * than ia64_mca_init. Setup a timer to poll for CPEs if the - * platform doesn't support an interrupt driven mechanism. - * - * Inputs : None - * Outputs : Status + * These pieces cannot be done in ia64_mca_init() because it is called before + * early_irq_init() which would wipe out our percpu irq registrations. But we + * cannot leave them until ia64_mca_late_init() because by then all the other + * processors have been brought online and have set their own CMC vectors to + * point at a non-existant action. Called from arch_early_irq_init(). */ -static int __init -ia64_mca_late_init(void) +void __init ia64_mca_irq_init(void) { - if (!mca_init) - return 0; - /* * Configure the CMCI/P vector and handler. Interrupts for CMC are * per-processor, so AP CMC interrupts are setup in smp_callin() (smpboot.c). @@ -2108,6 +2096,23 @@ ia64_mca_late_init(void) /* Setup the CPEI/P handler */ register_percpu_irq(IA64_CPEP_VECTOR, &mca_cpep_irqaction); #endif +} + +/* + * ia64_mca_late_init + * + * Opportunity to setup things that require initialization later + * than ia64_mca_init. Setup a timer to poll for CPEs if the + * platform doesn't support an interrupt driven mechanism. + * + * Inputs : None + * Outputs : Status + */ +static int __init +ia64_mca_late_init(void) +{ + if (!mca_init) + return 0; register_hotcpu_notifier(&mca_cpu_notifier); diff --git a/arch/ia64/kernel/mca_drv.c b/arch/ia64/kernel/mca_drv.c index 9392e021c93..94f8bf777af 100644 --- a/arch/ia64/kernel/mca_drv.c +++ b/arch/ia64/kernel/mca_drv.c @@ -349,7 +349,7 @@ init_record_index_pools(void) /* - 3 - */ slidx_pool.max_idx = (rec_max_size/sect_min_size) * 2 + 1; - slidx_pool.buffer = (slidx_list_t *) + slidx_pool.buffer = kmalloc(slidx_pool.max_idx * sizeof(slidx_list_t), GFP_KERNEL); return slidx_pool.buffer ? 0 : -ENOMEM; diff --git a/arch/ia64/kernel/msi_ia64.c b/arch/ia64/kernel/msi_ia64.c index fb2f1e62287..c430f9198d1 100644 --- a/arch/ia64/kernel/msi_ia64.c +++ b/arch/ia64/kernel/msi_ia64.c @@ -17,12 +17,9 @@ static int ia64_set_msi_irq_affinity(struct irq_data *idata, { struct msi_msg msg; u32 addr, data; - int cpu = first_cpu(*cpu_mask); + int cpu = cpumask_first_and(cpu_mask, cpu_online_mask); unsigned int irq = idata->irq; - if (!cpu_online(cpu)) - return -1; - if (irq_prepare_move(irq, cpu)) return -1; @@ -139,10 +136,7 @@ static int dmar_msi_set_affinity(struct irq_data *data, unsigned int irq = data->irq; struct irq_cfg *cfg = irq_cfg + irq; struct msi_msg msg; - int cpu = cpumask_first(mask); - - if (!cpu_online(cpu)) - return -1; + int cpu = cpumask_first_and(mask, cpu_online_mask); if (irq_prepare_move(irq, cpu)) return -1; diff --git a/arch/ia64/kernel/nr-irqs.c b/arch/ia64/kernel/nr-irqs.c index ee564575148..f6769cd54bd 100644 --- a/arch/ia64/kernel/nr-irqs.c +++ b/arch/ia64/kernel/nr-irqs.c @@ -10,15 +10,11 @@ #include <linux/kbuild.h> #include <linux/threads.h> #include <asm/native/irq.h> -#include <asm/xen/irq.h> void foo(void) { union paravirt_nr_irqs_max { char ia64_native_nr_irqs[IA64_NATIVE_NR_IRQS]; -#ifdef CONFIG_XEN - char xen_nr_irqs[XEN_NR_IRQS]; -#endif }; DEFINE(NR_IRQS, sizeof (union paravirt_nr_irqs_max)); diff --git a/arch/ia64/kernel/numa.c b/arch/ia64/kernel/numa.c index c93420c9740..d288cde9360 100644 --- a/arch/ia64/kernel/numa.c +++ b/arch/ia64/kernel/numa.c @@ -30,7 +30,7 @@ EXPORT_SYMBOL(cpu_to_node_map); cpumask_t node_to_cpu_mask[MAX_NUMNODES] __cacheline_aligned; EXPORT_SYMBOL(node_to_cpu_mask); -void __cpuinit map_cpu_to_node(int cpu, int nid) +void map_cpu_to_node(int cpu, int nid) { int oldnid; if (nid < 0) { /* just initialize by zero */ @@ -51,7 +51,7 @@ void __cpuinit map_cpu_to_node(int cpu, int nid) return; } -void __cpuinit unmap_cpu_from_node(int cpu, int nid) +void unmap_cpu_from_node(int cpu, int nid) { WARN_ON(!cpu_isset(cpu, node_to_cpu_mask[nid])); WARN_ON(cpu_to_node_map[cpu] != nid); diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c index 77597e5ea60..c39c3cd3ac3 100644 --- a/arch/ia64/kernel/palinfo.c +++ b/arch/ia64/kernel/palinfo.c @@ -22,6 +22,7 @@ #include <linux/errno.h> #include <linux/init.h> #include <linux/proc_fs.h> +#include <linux/seq_file.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/efi.h> @@ -41,7 +42,7 @@ MODULE_LICENSE("GPL"); #define PALINFO_VERSION "0.5" -typedef int (*palinfo_func_t)(char*); +typedef int (*palinfo_func_t)(struct seq_file *); typedef struct { const char *name; /* name of the proc entry */ @@ -54,7 +55,7 @@ typedef struct { * A bunch of string array to get pretty printing */ -static char *cache_types[] = { +static const char *cache_types[] = { "", /* not used */ "Instruction", "Data", @@ -122,19 +123,16 @@ static const char *mem_attrib[]={ * - a pointer to the end of the buffer * */ -static char * -bitvector_process(char *p, u64 vector) +static void bitvector_process(struct seq_file *m, u64 vector) { int i,j; - const char *units[]={ "", "K", "M", "G", "T" }; + static const char *units[]={ "", "K", "M", "G", "T" }; for (i=0, j=0; i < 64; i++ , j=i/10) { - if (vector & 0x1) { - p += sprintf(p, "%d%s ", 1 << (i-j*10), units[j]); - } + if (vector & 0x1) + seq_printf(m, "%d%s ", 1 << (i-j*10), units[j]); vector >>= 1; } - return p; } /* @@ -149,8 +147,7 @@ bitvector_process(char *p, u64 vector) * - a pointer to the end of the buffer * */ -static char * -bitregister_process(char *p, u64 *reg_info, int max) +static void bitregister_process(struct seq_file *m, u64 *reg_info, int max) { int i, begin, skip = 0; u64 value = reg_info[0]; @@ -163,9 +160,9 @@ bitregister_process(char *p, u64 *reg_info, int max) if ((value & 0x1) == 0 && skip == 0) { if (begin <= i - 2) - p += sprintf(p, "%d-%d ", begin, i-1); + seq_printf(m, "%d-%d ", begin, i-1); else - p += sprintf(p, "%d ", i-1); + seq_printf(m, "%d ", i-1); skip = 1; begin = -1; } else if ((value & 0x1) && skip == 1) { @@ -176,19 +173,15 @@ bitregister_process(char *p, u64 *reg_info, int max) } if (begin > -1) { if (begin < 127) - p += sprintf(p, "%d-127", begin); + seq_printf(m, "%d-127", begin); else - p += sprintf(p, "127"); + seq_puts(m, "127"); } - - return p; } -static int -power_info(char *page) +static int power_info(struct seq_file *m) { s64 status; - char *p = page; u64 halt_info_buffer[8]; pal_power_mgmt_info_u_t *halt_info =(pal_power_mgmt_info_u_t *)halt_info_buffer; int i; @@ -198,26 +191,25 @@ power_info(char *page) for (i=0; i < 8 ; i++ ) { if (halt_info[i].pal_power_mgmt_info_s.im == 1) { - p += sprintf(p, "Power level %d:\n" - "\tentry_latency : %d cycles\n" - "\texit_latency : %d cycles\n" - "\tpower consumption : %d mW\n" - "\tCache+TLB coherency : %s\n", i, - halt_info[i].pal_power_mgmt_info_s.entry_latency, - halt_info[i].pal_power_mgmt_info_s.exit_latency, - halt_info[i].pal_power_mgmt_info_s.power_consumption, - halt_info[i].pal_power_mgmt_info_s.co ? "Yes" : "No"); + seq_printf(m, + "Power level %d:\n" + "\tentry_latency : %d cycles\n" + "\texit_latency : %d cycles\n" + "\tpower consumption : %d mW\n" + "\tCache+TLB coherency : %s\n", i, + halt_info[i].pal_power_mgmt_info_s.entry_latency, + halt_info[i].pal_power_mgmt_info_s.exit_latency, + halt_info[i].pal_power_mgmt_info_s.power_consumption, + halt_info[i].pal_power_mgmt_info_s.co ? "Yes" : "No"); } else { - p += sprintf(p,"Power level %d: not implemented\n",i); + seq_printf(m,"Power level %d: not implemented\n", i); } } - return p - page; + return 0; } -static int -cache_info(char *page) +static int cache_info(struct seq_file *m) { - char *p = page; unsigned long i, levels, unique_caches; pal_cache_config_info_t cci; int j, k; @@ -228,73 +220,74 @@ cache_info(char *page) return 0; } - p += sprintf(p, "Cache levels : %ld\nUnique caches : %ld\n\n", levels, unique_caches); + seq_printf(m, "Cache levels : %ld\nUnique caches : %ld\n\n", + levels, unique_caches); for (i=0; i < levels; i++) { - for (j=2; j >0 ; j--) { - /* even without unification some level may not be present */ - if ((status=ia64_pal_cache_config_info(i,j, &cci)) != 0) { + if ((status=ia64_pal_cache_config_info(i,j, &cci)) != 0) continue; - } - p += sprintf(p, - "%s Cache level %lu:\n" - "\tSize : %u bytes\n" - "\tAttributes : ", - cache_types[j+cci.pcci_unified], i+1, - cci.pcci_cache_size); - - if (cci.pcci_unified) p += sprintf(p, "Unified "); - - p += sprintf(p, "%s\n", cache_mattrib[cci.pcci_cache_attr]); - - p += sprintf(p, - "\tAssociativity : %d\n" - "\tLine size : %d bytes\n" - "\tStride : %d bytes\n", - cci.pcci_assoc, 1<<cci.pcci_line_size, 1<<cci.pcci_stride); + + seq_printf(m, + "%s Cache level %lu:\n" + "\tSize : %u bytes\n" + "\tAttributes : ", + cache_types[j+cci.pcci_unified], i+1, + cci.pcci_cache_size); + + if (cci.pcci_unified) + seq_puts(m, "Unified "); + + seq_printf(m, "%s\n", cache_mattrib[cci.pcci_cache_attr]); + + seq_printf(m, + "\tAssociativity : %d\n" + "\tLine size : %d bytes\n" + "\tStride : %d bytes\n", + cci.pcci_assoc, + 1<<cci.pcci_line_size, + 1<<cci.pcci_stride); if (j == 1) - p += sprintf(p, "\tStore latency : N/A\n"); + seq_puts(m, "\tStore latency : N/A\n"); else - p += sprintf(p, "\tStore latency : %d cycle(s)\n", - cci.pcci_st_latency); + seq_printf(m, "\tStore latency : %d cycle(s)\n", + cci.pcci_st_latency); - p += sprintf(p, - "\tLoad latency : %d cycle(s)\n" - "\tStore hints : ", cci.pcci_ld_latency); + seq_printf(m, + "\tLoad latency : %d cycle(s)\n" + "\tStore hints : ", cci.pcci_ld_latency); for(k=0; k < 8; k++ ) { if ( cci.pcci_st_hints & 0x1) - p += sprintf(p, "[%s]", cache_st_hints[k]); + seq_printf(m, "[%s]", cache_st_hints[k]); cci.pcci_st_hints >>=1; } - p += sprintf(p, "\n\tLoad hints : "); + seq_puts(m, "\n\tLoad hints : "); for(k=0; k < 8; k++ ) { if (cci.pcci_ld_hints & 0x1) - p += sprintf(p, "[%s]", cache_ld_hints[k]); + seq_printf(m, "[%s]", cache_ld_hints[k]); cci.pcci_ld_hints >>=1; } - p += sprintf(p, - "\n\tAlias boundary : %d byte(s)\n" - "\tTag LSB : %d\n" - "\tTag MSB : %d\n", - 1<<cci.pcci_alias_boundary, cci.pcci_tag_lsb, - cci.pcci_tag_msb); + seq_printf(m, + "\n\tAlias boundary : %d byte(s)\n" + "\tTag LSB : %d\n" + "\tTag MSB : %d\n", + 1<<cci.pcci_alias_boundary, cci.pcci_tag_lsb, + cci.pcci_tag_msb); /* when unified, data(j=2) is enough */ - if (cci.pcci_unified) break; + if (cci.pcci_unified) + break; } } - return p - page; + return 0; } -static int -vm_info(char *page) +static int vm_info(struct seq_file *m) { - char *p = page; u64 tr_pages =0, vw_pages=0, tc_pages; u64 attrib; pal_vm_info_1_u_t vm_info_1; @@ -309,7 +302,7 @@ vm_info(char *page) printk(KERN_ERR "ia64_pal_vm_summary=%ld\n", status); } else { - p += sprintf(p, + seq_printf(m, "Physical Address Space : %d bits\n" "Virtual Address Space : %d bits\n" "Protection Key Registers(PKR) : %d\n" @@ -324,49 +317,49 @@ vm_info(char *page) vm_info_1.pal_vm_info_1_s.hash_tag_id, vm_info_2.pal_vm_info_2_s.rid_size); if (vm_info_2.pal_vm_info_2_s.max_purges == PAL_MAX_PURGES) - p += sprintf(p, "unlimited\n"); + seq_puts(m, "unlimited\n"); else - p += sprintf(p, "%d\n", + seq_printf(m, "%d\n", vm_info_2.pal_vm_info_2_s.max_purges ? vm_info_2.pal_vm_info_2_s.max_purges : 1); } if (ia64_pal_mem_attrib(&attrib) == 0) { - p += sprintf(p, "Supported memory attributes : "); + seq_puts(m, "Supported memory attributes : "); sep = ""; for (i = 0; i < 8; i++) { if (attrib & (1 << i)) { - p += sprintf(p, "%s%s", sep, mem_attrib[i]); + seq_printf(m, "%s%s", sep, mem_attrib[i]); sep = ", "; } } - p += sprintf(p, "\n"); + seq_putc(m, '\n'); } if ((status = ia64_pal_vm_page_size(&tr_pages, &vw_pages)) !=0) { printk(KERN_ERR "ia64_pal_vm_page_size=%ld\n", status); } else { - p += sprintf(p, - "\nTLB walker : %simplemented\n" - "Number of DTR : %d\n" - "Number of ITR : %d\n" - "TLB insertable page sizes : ", - vm_info_1.pal_vm_info_1_s.vw ? "" : "not ", - vm_info_1.pal_vm_info_1_s.max_dtr_entry+1, - vm_info_1.pal_vm_info_1_s.max_itr_entry+1); + seq_printf(m, + "\nTLB walker : %simplemented\n" + "Number of DTR : %d\n" + "Number of ITR : %d\n" + "TLB insertable page sizes : ", + vm_info_1.pal_vm_info_1_s.vw ? "" : "not ", + vm_info_1.pal_vm_info_1_s.max_dtr_entry+1, + vm_info_1.pal_vm_info_1_s.max_itr_entry+1); + bitvector_process(m, tr_pages); - p = bitvector_process(p, tr_pages); + seq_puts(m, "\nTLB purgeable page sizes : "); - p += sprintf(p, "\nTLB purgeable page sizes : "); - - p = bitvector_process(p, vw_pages); + bitvector_process(m, vw_pages); } - if ((status=ia64_get_ptce(&ptce)) != 0) { + + if ((status = ia64_get_ptce(&ptce)) != 0) { printk(KERN_ERR "ia64_get_ptce=%ld\n", status); } else { - p += sprintf(p, + seq_printf(m, "\nPurge base address : 0x%016lx\n" "Purge outer loop count : %d\n" "Purge inner loop count : %d\n" @@ -375,7 +368,7 @@ vm_info(char *page) ptce.base, ptce.count[0], ptce.count[1], ptce.stride[0], ptce.stride[1]); - p += sprintf(p, + seq_printf(m, "TC Levels : %d\n" "Unique TC(s) : %d\n", vm_info_1.pal_vm_info_1_s.num_tc_levels, @@ -385,13 +378,11 @@ vm_info(char *page) for (j=2; j>0 ; j--) { tc_pages = 0; /* just in case */ - /* even without unification, some levels may not be present */ - if ((status=ia64_pal_vm_info(i,j, &tc_info, &tc_pages)) != 0) { + if ((status=ia64_pal_vm_info(i,j, &tc_info, &tc_pages)) != 0) continue; - } - p += sprintf(p, + seq_printf(m, "\n%s Translation Cache Level %d:\n" "\tHash sets : %d\n" "\tAssociativity : %d\n" @@ -403,15 +394,15 @@ vm_info(char *page) tc_info.tc_num_entries); if (tc_info.tc_pf) - p += sprintf(p, "PreferredPageSizeOptimized "); + seq_puts(m, "PreferredPageSizeOptimized "); if (tc_info.tc_unified) - p += sprintf(p, "Unified "); + seq_puts(m, "Unified "); if (tc_info.tc_reduce_tr) - p += sprintf(p, "TCReduction"); + seq_puts(m, "TCReduction"); - p += sprintf(p, "\n\tSupported page sizes: "); + seq_puts(m, "\n\tSupported page sizes: "); - p = bitvector_process(p, tc_pages); + bitvector_process(m, tc_pages); /* when unified date (j=2) is enough */ if (tc_info.tc_unified) @@ -419,16 +410,14 @@ vm_info(char *page) } } } - p += sprintf(p, "\n"); - return p - page; + seq_putc(m, '\n'); + return 0; } -static int -register_info(char *page) +static int register_info(struct seq_file *m) { - char *p = page; u64 reg_info[2]; u64 info; unsigned long phys_stacked; @@ -442,35 +431,31 @@ register_info(char *page) }; for(info=0; info < 4; info++) { - - if (ia64_pal_register_info(info, ®_info[0], ®_info[1]) != 0) return 0; - - p += sprintf(p, "%-32s : ", info_type[info]); - - p = bitregister_process(p, reg_info, 128); - - p += sprintf(p, "\n"); + if (ia64_pal_register_info(info, ®_info[0], ®_info[1]) != 0) + return 0; + seq_printf(m, "%-32s : ", info_type[info]); + bitregister_process(m, reg_info, 128); + seq_putc(m, '\n'); } - if (ia64_pal_rse_info(&phys_stacked, &hints) == 0) { + if (ia64_pal_rse_info(&phys_stacked, &hints) == 0) + seq_printf(m, + "RSE stacked physical registers : %ld\n" + "RSE load/store hints : %ld (%s)\n", + phys_stacked, hints.ph_data, + hints.ph_data < RSE_HINTS_COUNT ? rse_hints[hints.ph_data]: "(??)"); - p += sprintf(p, - "RSE stacked physical registers : %ld\n" - "RSE load/store hints : %ld (%s)\n", - phys_stacked, hints.ph_data, - hints.ph_data < RSE_HINTS_COUNT ? rse_hints[hints.ph_data]: "(??)"); - } if (ia64_pal_debug_info(&iregs, &dregs)) return 0; - p += sprintf(p, - "Instruction debug register pairs : %ld\n" - "Data debug register pairs : %ld\n", iregs, dregs); + seq_printf(m, + "Instruction debug register pairs : %ld\n" + "Data debug register pairs : %ld\n", iregs, dregs); - return p - page; + return 0; } -static char *proc_features_0[]={ /* Feature set 0 */ +static const char *const proc_features_0[]={ /* Feature set 0 */ NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL, NULL,NULL,NULL,NULL,NULL,NULL,NULL, NULL,NULL, NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL, @@ -502,7 +487,7 @@ static char *proc_features_0[]={ /* Feature set 0 */ "Enable BERR promotion" }; -static char *proc_features_16[]={ /* Feature set 16 */ +static const char *const proc_features_16[]={ /* Feature set 16 */ "Disable ETM", "Enable ETM", "Enable MCA on half-way timer", @@ -522,7 +507,7 @@ static char *proc_features_16[]={ /* Feature set 16 */ NULL, NULL, NULL, NULL, NULL }; -static char **proc_features[]={ +static const char *const *const proc_features[]={ proc_features_0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, @@ -530,11 +515,10 @@ static char **proc_features[]={ NULL, NULL, NULL, NULL, }; -static char * feature_set_info(char *page, u64 avail, u64 status, u64 control, - unsigned long set) +static void feature_set_info(struct seq_file *m, u64 avail, u64 status, u64 control, + unsigned long set) { - char *p = page; - char **vf, **v; + const char *const *vf, *const *v; int i; vf = v = proc_features[set]; @@ -547,13 +531,13 @@ static char * feature_set_info(char *page, u64 avail, u64 status, u64 control, if (vf) v = vf + i; if ( v && *v ) { - p += sprintf(p, "%-40s : %s %s\n", *v, + seq_printf(m, "%-40s : %s %s\n", *v, avail & 0x1 ? (status & 0x1 ? - "On " : "Off"): "", + "On " : "Off"): "", avail & 0x1 ? (control & 0x1 ? "Ctrl" : "NoCtrl"): ""); } else { - p += sprintf(p, "Feature set %2ld bit %2d\t\t\t" + seq_printf(m, "Feature set %2ld bit %2d\t\t\t" " : %s %s\n", set, i, avail & 0x1 ? (status & 0x1 ? @@ -562,36 +546,32 @@ static char * feature_set_info(char *page, u64 avail, u64 status, u64 control, "Ctrl" : "NoCtrl"): ""); } } - return p; } -static int -processor_info(char *page) +static int processor_info(struct seq_file *m) { - char *p = page; u64 avail=1, status=1, control=1, feature_set=0; s64 ret; do { ret = ia64_pal_proc_get_features(&avail, &status, &control, feature_set); - if (ret < 0) { - return p - page; - } + if (ret < 0) + return 0; + if (ret == 1) { feature_set++; continue; } - p = feature_set_info(p, avail, status, control, feature_set); - + feature_set_info(m, avail, status, control, feature_set); feature_set++; } while(1); - return p - page; + return 0; } -static const char *bus_features[]={ +static const char *const bus_features[]={ NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL, NULL,NULL,NULL,NULL,NULL,NULL,NULL, NULL,NULL, NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL, @@ -617,125 +597,118 @@ static const char *bus_features[]={ }; -static int -bus_info(char *page) +static int bus_info(struct seq_file *m) { - char *p = page; - const char **v = bus_features; + const char *const *v = bus_features; pal_bus_features_u_t av, st, ct; u64 avail, status, control; int i; s64 ret; - if ((ret=ia64_pal_bus_get_features(&av, &st, &ct)) != 0) return 0; + if ((ret=ia64_pal_bus_get_features(&av, &st, &ct)) != 0) + return 0; avail = av.pal_bus_features_val; status = st.pal_bus_features_val; control = ct.pal_bus_features_val; for(i=0; i < 64; i++, v++, avail >>=1, status >>=1, control >>=1) { - if ( ! *v ) continue; - p += sprintf(p, "%-48s : %s%s %s\n", *v, - avail & 0x1 ? "" : "NotImpl", - avail & 0x1 ? (status & 0x1 ? "On" : "Off"): "", - avail & 0x1 ? (control & 0x1 ? "Ctrl" : "NoCtrl"): ""); + if ( ! *v ) + continue; + seq_printf(m, "%-48s : %s%s %s\n", *v, + avail & 0x1 ? "" : "NotImpl", + avail & 0x1 ? (status & 0x1 ? "On" : "Off"): "", + avail & 0x1 ? (control & 0x1 ? "Ctrl" : "NoCtrl"): ""); } - return p - page; + return 0; } -static int -version_info(char *page) +static int version_info(struct seq_file *m) { pal_version_u_t min_ver, cur_ver; - char *p = page; if (ia64_pal_version(&min_ver, &cur_ver) != 0) return 0; - p += sprintf(p, - "PAL_vendor : 0x%02x (min=0x%02x)\n" - "PAL_A : %02x.%02x (min=%02x.%02x)\n" - "PAL_B : %02x.%02x (min=%02x.%02x)\n", - cur_ver.pal_version_s.pv_pal_vendor, - min_ver.pal_version_s.pv_pal_vendor, - cur_ver.pal_version_s.pv_pal_a_model, - cur_ver.pal_version_s.pv_pal_a_rev, - min_ver.pal_version_s.pv_pal_a_model, - min_ver.pal_version_s.pv_pal_a_rev, - cur_ver.pal_version_s.pv_pal_b_model, - cur_ver.pal_version_s.pv_pal_b_rev, - min_ver.pal_version_s.pv_pal_b_model, - min_ver.pal_version_s.pv_pal_b_rev); - return p - page; + seq_printf(m, + "PAL_vendor : 0x%02x (min=0x%02x)\n" + "PAL_A : %02x.%02x (min=%02x.%02x)\n" + "PAL_B : %02x.%02x (min=%02x.%02x)\n", + cur_ver.pal_version_s.pv_pal_vendor, + min_ver.pal_version_s.pv_pal_vendor, + cur_ver.pal_version_s.pv_pal_a_model, + cur_ver.pal_version_s.pv_pal_a_rev, + min_ver.pal_version_s.pv_pal_a_model, + min_ver.pal_version_s.pv_pal_a_rev, + cur_ver.pal_version_s.pv_pal_b_model, + cur_ver.pal_version_s.pv_pal_b_rev, + min_ver.pal_version_s.pv_pal_b_model, + min_ver.pal_version_s.pv_pal_b_rev); + return 0; } -static int -perfmon_info(char *page) +static int perfmon_info(struct seq_file *m) { - char *p = page; u64 pm_buffer[16]; pal_perf_mon_info_u_t pm_info; - if (ia64_pal_perf_mon_info(pm_buffer, &pm_info) != 0) return 0; - - p += sprintf(p, - "PMC/PMD pairs : %d\n" - "Counter width : %d bits\n" - "Cycle event number : %d\n" - "Retired event number : %d\n" - "Implemented PMC : ", - pm_info.pal_perf_mon_info_s.generic, pm_info.pal_perf_mon_info_s.width, - pm_info.pal_perf_mon_info_s.cycles, pm_info.pal_perf_mon_info_s.retired); + if (ia64_pal_perf_mon_info(pm_buffer, &pm_info) != 0) + return 0; - p = bitregister_process(p, pm_buffer, 256); - p += sprintf(p, "\nImplemented PMD : "); - p = bitregister_process(p, pm_buffer+4, 256); - p += sprintf(p, "\nCycles count capable : "); - p = bitregister_process(p, pm_buffer+8, 256); - p += sprintf(p, "\nRetired bundles count capable : "); + seq_printf(m, + "PMC/PMD pairs : %d\n" + "Counter width : %d bits\n" + "Cycle event number : %d\n" + "Retired event number : %d\n" + "Implemented PMC : ", + pm_info.pal_perf_mon_info_s.generic, + pm_info.pal_perf_mon_info_s.width, + pm_info.pal_perf_mon_info_s.cycles, + pm_info.pal_perf_mon_info_s.retired); + + bitregister_process(m, pm_buffer, 256); + seq_puts(m, "\nImplemented PMD : "); + bitregister_process(m, pm_buffer+4, 256); + seq_puts(m, "\nCycles count capable : "); + bitregister_process(m, pm_buffer+8, 256); + seq_puts(m, "\nRetired bundles count capable : "); #ifdef CONFIG_ITANIUM /* * PAL_PERF_MON_INFO reports that only PMC4 can be used to count CPU_CYCLES * which is wrong, both PMC4 and PMD5 support it. */ - if (pm_buffer[12] == 0x10) pm_buffer[12]=0x30; + if (pm_buffer[12] == 0x10) + pm_buffer[12]=0x30; #endif - p = bitregister_process(p, pm_buffer+12, 256); - - p += sprintf(p, "\n"); - - return p - page; + bitregister_process(m, pm_buffer+12, 256); + seq_putc(m, '\n'); + return 0; } -static int -frequency_info(char *page) +static int frequency_info(struct seq_file *m) { - char *p = page; struct pal_freq_ratio proc, itc, bus; unsigned long base; if (ia64_pal_freq_base(&base) == -1) - p += sprintf(p, "Output clock : not implemented\n"); + seq_puts(m, "Output clock : not implemented\n"); else - p += sprintf(p, "Output clock : %ld ticks/s\n", base); + seq_printf(m, "Output clock : %ld ticks/s\n", base); if (ia64_pal_freq_ratios(&proc, &bus, &itc) != 0) return 0; - p += sprintf(p, + seq_printf(m, "Processor/Clock ratio : %d/%d\n" "Bus/Clock ratio : %d/%d\n" "ITC/Clock ratio : %d/%d\n", proc.num, proc.den, bus.num, bus.den, itc.num, itc.den); - - return p - page; + return 0; } -static int -tr_info(char *page) +static int tr_info(struct seq_file *m) { - char *p = page; long status; pal_tr_valid_u_t tr_valid; u64 tr_buffer[4]; @@ -794,39 +767,40 @@ tr_info(char *page) ifa_reg = (struct ifa_reg *)&tr_buffer[2]; - if (ifa_reg->valid == 0) continue; + if (ifa_reg->valid == 0) + continue; gr_reg = (struct gr_reg *)tr_buffer; itir_reg = (struct itir_reg *)&tr_buffer[1]; rid_reg = (struct rid_reg *)&tr_buffer[3]; pgm = -1 << (itir_reg->ps - 12); - p += sprintf(p, - "%cTR%lu: av=%d pv=%d dv=%d mv=%d\n" - "\tppn : 0x%lx\n" - "\tvpn : 0x%lx\n" - "\tps : ", - "ID"[i], j, - tr_valid.pal_tr_valid_s.access_rights_valid, - tr_valid.pal_tr_valid_s.priv_level_valid, - tr_valid.pal_tr_valid_s.dirty_bit_valid, - tr_valid.pal_tr_valid_s.mem_attr_valid, - (gr_reg->ppn & pgm)<< 12, (ifa_reg->vpn & pgm)<< 12); - - p = bitvector_process(p, 1<< itir_reg->ps); - - p += sprintf(p, - "\n\tpl : %d\n" - "\tar : %d\n" - "\trid : %x\n" - "\tp : %d\n" - "\tma : %d\n" - "\td : %d\n", - gr_reg->pl, gr_reg->ar, rid_reg->rid, gr_reg->p, gr_reg->ma, - gr_reg->d); + seq_printf(m, + "%cTR%lu: av=%d pv=%d dv=%d mv=%d\n" + "\tppn : 0x%lx\n" + "\tvpn : 0x%lx\n" + "\tps : ", + "ID"[i], j, + tr_valid.pal_tr_valid_s.access_rights_valid, + tr_valid.pal_tr_valid_s.priv_level_valid, + tr_valid.pal_tr_valid_s.dirty_bit_valid, + tr_valid.pal_tr_valid_s.mem_attr_valid, + (gr_reg->ppn & pgm)<< 12, (ifa_reg->vpn & pgm)<< 12); + + bitvector_process(m, 1<< itir_reg->ps); + + seq_printf(m, + "\n\tpl : %d\n" + "\tar : %d\n" + "\trid : %x\n" + "\tp : %d\n" + "\tma : %d\n" + "\td : %d\n", + gr_reg->pl, gr_reg->ar, rid_reg->rid, gr_reg->p, gr_reg->ma, + gr_reg->d); } } - return p - page; + return 0; } @@ -834,7 +808,7 @@ tr_info(char *page) /* * List {name,function} pairs for every entry in /proc/palinfo/cpu* */ -static palinfo_entry_t palinfo_entries[]={ +static const palinfo_entry_t palinfo_entries[]={ { "version_info", version_info, }, { "vm_info", vm_info, }, { "cache_info", cache_info, }, @@ -849,17 +823,6 @@ static palinfo_entry_t palinfo_entries[]={ #define NR_PALINFO_ENTRIES (int) ARRAY_SIZE(palinfo_entries) -/* - * this array is used to keep track of the proc entries we create. This is - * required in the module mode when we need to remove all entries. The procfs code - * does not do recursion of deletion - * - * Notes: - * - +1 accounts for the cpuN directory entry in /proc/pal - */ -#define NR_PALINFO_PROC_ENTRIES (NR_CPUS*(NR_PALINFO_ENTRIES+1)) - -static struct proc_dir_entry *palinfo_proc_entries[NR_PALINFO_PROC_ENTRIES]; static struct proc_dir_entry *palinfo_dir; /* @@ -887,7 +850,7 @@ typedef union { */ typedef struct { palinfo_func_t func; /* pointer to function to call */ - char *page; /* buffer to store results */ + struct seq_file *m; /* buffer to store results */ int ret; /* return value from call */ } palinfo_smp_data_t; @@ -900,7 +863,7 @@ static void palinfo_smp_call(void *info) { palinfo_smp_data_t *data = (palinfo_smp_data_t *)info; - data->ret = (*data->func)(data->page); + data->ret = (*data->func)(data->m); } /* @@ -910,13 +873,13 @@ palinfo_smp_call(void *info) * otherwise how many bytes in the "page" buffer were written */ static -int palinfo_handle_smp(pal_func_cpu_u_t *f, char *page) +int palinfo_handle_smp(struct seq_file *m, pal_func_cpu_u_t *f) { palinfo_smp_data_t ptr; int ret; ptr.func = palinfo_entries[f->func_id].proc_read; - ptr.page = page; + ptr.m = m; ptr.ret = 0; /* just in case */ @@ -930,7 +893,7 @@ int palinfo_handle_smp(pal_func_cpu_u_t *f, char *page) } #else /* ! CONFIG_SMP */ static -int palinfo_handle_smp(pal_func_cpu_u_t *f, char *page) +int palinfo_handle_smp(struct seq_file *m, pal_func_cpu_u_t *f) { printk(KERN_ERR "palinfo: should not be called with non SMP kernel\n"); return 0; @@ -940,94 +903,66 @@ int palinfo_handle_smp(pal_func_cpu_u_t *f, char *page) /* * Entry point routine: all calls go through this function */ -static int -palinfo_read_entry(char *page, char **start, off_t off, int count, int *eof, void *data) +static int proc_palinfo_show(struct seq_file *m, void *v) { - int len=0; - pal_func_cpu_u_t *f = (pal_func_cpu_u_t *)&data; + pal_func_cpu_u_t *f = (pal_func_cpu_u_t *)&m->private; /* * in SMP mode, we may need to call another CPU to get correct * information. PAL, by definition, is processor specific */ if (f->req_cpu == get_cpu()) - len = (*palinfo_entries[f->func_id].proc_read)(page); + (*palinfo_entries[f->func_id].proc_read)(m); else - len = palinfo_handle_smp(f, page); + palinfo_handle_smp(m, f); put_cpu(); + return 0; +} - if (len <= off+count) *eof = 1; - - *start = page + off; - len -= off; - - if (len>count) len = count; - if (len<0) len = 0; - - return len; +static int proc_palinfo_open(struct inode *inode, struct file *file) +{ + return single_open(file, proc_palinfo_show, PDE_DATA(inode)); } -static void __cpuinit +static const struct file_operations proc_palinfo_fops = { + .open = proc_palinfo_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static void create_palinfo_proc_entries(unsigned int cpu) { -# define CPUSTR "cpu%d" - pal_func_cpu_u_t f; - struct proc_dir_entry **pdir; struct proc_dir_entry *cpu_dir; int j; - char cpustr[sizeof(CPUSTR)]; - - - /* - * we keep track of created entries in a depth-first order for - * cleanup purposes. Each entry is stored into palinfo_proc_entries - */ - sprintf(cpustr,CPUSTR, cpu); + char cpustr[3+4+1]; /* cpu numbers are up to 4095 on itanic */ + sprintf(cpustr, "cpu%d", cpu); cpu_dir = proc_mkdir(cpustr, palinfo_dir); + if (!cpu_dir) + return; f.req_cpu = cpu; - /* - * Compute the location to store per cpu entries - * We dont store the top level entry in this list, but - * remove it finally after removing all cpu entries. - */ - pdir = &palinfo_proc_entries[cpu*(NR_PALINFO_ENTRIES+1)]; - *pdir++ = cpu_dir; for (j=0; j < NR_PALINFO_ENTRIES; j++) { f.func_id = j; - *pdir = create_proc_read_entry( - palinfo_entries[j].name, 0, cpu_dir, - palinfo_read_entry, (void *)f.value); - pdir++; + proc_create_data(palinfo_entries[j].name, 0, cpu_dir, + &proc_palinfo_fops, (void *)f.value); } } static void remove_palinfo_proc_entries(unsigned int hcpu) { - int j; - struct proc_dir_entry *cpu_dir, **pdir; - - pdir = &palinfo_proc_entries[hcpu*(NR_PALINFO_ENTRIES+1)]; - cpu_dir = *pdir; - *pdir++=NULL; - for (j=0; j < (NR_PALINFO_ENTRIES); j++) { - if ((*pdir)) { - remove_proc_entry ((*pdir)->name, cpu_dir); - *pdir ++= NULL; - } - } - - if (cpu_dir) { - remove_proc_entry(cpu_dir->name, palinfo_dir); - } + char cpustr[3+4+1]; /* cpu numbers are up to 4095 on itanic */ + sprintf(cpustr, "cpu%d", hcpu); + remove_proc_subtree(cpustr, palinfo_dir); } -static int __cpuinit palinfo_cpu_callback(struct notifier_block *nfb, +static int palinfo_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { unsigned int hotcpu = (unsigned long)hcpu; @@ -1058,6 +993,10 @@ palinfo_init(void) printk(KERN_INFO "PAL Information Facility v%s\n", PALINFO_VERSION); palinfo_dir = proc_mkdir("pal", NULL); + if (!palinfo_dir) + return -ENOMEM; + + cpu_notifier_register_begin(); /* Create palinfo dirs in /proc for all online cpus */ for_each_online_cpu(i) { @@ -1065,7 +1004,9 @@ palinfo_init(void) } /* Register for future delivery via notify registration */ - register_hotcpu_notifier(&palinfo_cpu_notifier); + __register_hotcpu_notifier(&palinfo_cpu_notifier); + + cpu_notifier_register_done(); return 0; } @@ -1073,22 +1014,8 @@ palinfo_init(void) static void __exit palinfo_exit(void) { - int i = 0; - - /* remove all nodes: depth first pass. Could optimize this */ - for_each_online_cpu(i) { - remove_palinfo_proc_entries(i); - } - - /* - * Remove the top level entry finally - */ - remove_proc_entry(palinfo_dir->name, NULL); - - /* - * Unregister from cpu notifier callbacks - */ unregister_hotcpu_notifier(&palinfo_cpu_notifier); + remove_proc_subtree("pal", NULL); } module_init(palinfo_init); diff --git a/arch/ia64/kernel/paravirt_inst.h b/arch/ia64/kernel/paravirt_inst.h index 64d6d810c64..1ad7512b5f6 100644 --- a/arch/ia64/kernel/paravirt_inst.h +++ b/arch/ia64/kernel/paravirt_inst.h @@ -22,9 +22,6 @@ #ifdef __IA64_ASM_PARAVIRTUALIZED_PVCHECK #include <asm/native/pvchk_inst.h> -#elif defined(__IA64_ASM_PARAVIRTUALIZED_XEN) -#include <asm/xen/inst.h> -#include <asm/xen/minstate.h> #else #include <asm/native/inst.h> #endif diff --git a/arch/ia64/kernel/paravirt_patchlist.h b/arch/ia64/kernel/paravirt_patchlist.h index 0684aa6c650..67cffc3643a 100644 --- a/arch/ia64/kernel/paravirt_patchlist.h +++ b/arch/ia64/kernel/paravirt_patchlist.h @@ -20,9 +20,5 @@ * */ -#if defined(__IA64_GATE_PARAVIRTUALIZED_XEN) -#include <asm/xen/patchlist.h> -#else #include <asm/native/patchlist.h> -#endif diff --git a/arch/ia64/kernel/pci-dma.c b/arch/ia64/kernel/pci-dma.c index 1ddcfe5ef35..992c1098c52 100644 --- a/arch/ia64/kernel/pci-dma.c +++ b/arch/ia64/kernel/pci-dma.c @@ -33,15 +33,6 @@ int force_iommu __read_mostly; int iommu_pass_through; -/* Dummy device used for NULL arguments (normally ISA). Better would - be probably a smaller DMA mask, but this is bug-to-bug compatible - to i386. */ -struct device fallback_dev = { - .init_name = "fallback device", - .coherent_dma_mask = DMA_BIT_MASK(32), - .dma_mask = &fallback_dev.coherent_dma_mask, -}; - extern struct dma_map_ops intel_dma_ops; static int __init pci_iommu_init(void) diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index 2eda28414ab..5845ffea67c 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c @@ -42,6 +42,7 @@ #include <linux/completion.h> #include <linux/tracehook.h> #include <linux/slab.h> +#include <linux/cpu.h> #include <asm/errno.h> #include <asm/intrinsics.h> @@ -520,7 +521,7 @@ static pmu_config_t *pmu_conf; pfm_sysctl_t pfm_sysctl; EXPORT_SYMBOL(pfm_sysctl); -static ctl_table pfm_ctl_table[]={ +static struct ctl_table pfm_ctl_table[] = { { .procname = "debug", .data = &pfm_sysctl.debug, @@ -551,7 +552,7 @@ static ctl_table pfm_ctl_table[]={ }, {} }; -static ctl_table pfm_sysctl_dir[] = { +static struct ctl_table pfm_sysctl_dir[] = { { .procname = "perfmon", .mode = 0555, @@ -559,7 +560,7 @@ static ctl_table pfm_sysctl_dir[] = { }, {} }; -static ctl_table pfm_sysctl_root[] = { +static struct ctl_table pfm_sysctl_root[] = { { .procname = "kernel", .mode = 0555, @@ -1322,8 +1323,6 @@ out: } EXPORT_SYMBOL(pfm_unregister_buffer_fmt); -extern void update_pal_halt_status(int); - static int pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu) { @@ -1371,9 +1370,9 @@ pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu) cpu)); /* - * disable default_idle() to go to PAL_HALT + * Force idle() into poll mode */ - update_pal_halt_status(0); + cpu_idle_poll_ctrl(true); UNLOCK_PFS(flags); @@ -1430,11 +1429,8 @@ pfm_unreserve_session(pfm_context_t *ctx, int is_syswide, unsigned int cpu) is_syswide, cpu)); - /* - * if possible, enable default_idle() to go into PAL_HALT - */ - if (pfm_sessions.pfs_task_sessions == 0 && pfm_sessions.pfs_sys_sessions == 0) - update_pal_halt_status(1); + /* Undo forced polling. Last session reenables pal_halt */ + cpu_idle_poll_ctrl(false); UNLOCK_PFS(flags); @@ -2170,12 +2166,6 @@ static const struct file_operations pfm_file_ops = { .flush = pfm_flush }; -static int -pfmfs_delete_dentry(const struct dentry *dentry) -{ - return 1; -} - static char *pfmfs_dname(struct dentry *dentry, char *buffer, int buflen) { return dynamic_dname(dentry, buffer, buflen, "pfm:[%lu]", @@ -2183,7 +2173,7 @@ static char *pfmfs_dname(struct dentry *dentry, char *buffer, int buflen) } static const struct dentry_operations pfmfs_dentry_operations = { - .d_delete = pfmfs_delete_dentry, + .d_delete = always_delete_dentry, .d_dname = pfmfs_dname, }; @@ -5651,24 +5641,8 @@ pfm_proc_show_header(struct seq_file *m) list_for_each(pos, &pfm_buffer_fmt_list) { entry = list_entry(pos, pfm_buffer_fmt_t, fmt_list); - seq_printf(m, "format : %02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x %s\n", - entry->fmt_uuid[0], - entry->fmt_uuid[1], - entry->fmt_uuid[2], - entry->fmt_uuid[3], - entry->fmt_uuid[4], - entry->fmt_uuid[5], - entry->fmt_uuid[6], - entry->fmt_uuid[7], - entry->fmt_uuid[8], - entry->fmt_uuid[9], - entry->fmt_uuid[10], - entry->fmt_uuid[11], - entry->fmt_uuid[12], - entry->fmt_uuid[13], - entry->fmt_uuid[14], - entry->fmt_uuid[15], - entry->fmt_name); + seq_printf(m, "format : %16phD %s\n", + entry->fmt_uuid, entry->fmt_name); } spin_unlock(&pfm_buffer_fmt_lock); @@ -6413,7 +6387,6 @@ pfm_flush_pmds(struct task_struct *task, pfm_context_t *ctx) static struct irqaction perfmon_irqaction = { .handler = pfm_interrupt_handler, - .flags = IRQF_DISABLED, .name = "perfmon" }; diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c index e34f565f595..55d4ba47a90 100644 --- a/arch/ia64/kernel/process.c +++ b/arch/ia64/kernel/process.c @@ -96,21 +96,13 @@ show_stack (struct task_struct *task, unsigned long *sp) } void -dump_stack (void) -{ - show_stack(NULL, NULL); -} - -EXPORT_SYMBOL(dump_stack); - -void show_regs (struct pt_regs *regs) { unsigned long ip = regs->cr_iip + ia64_psr(regs)->ri; print_modules(); - printk("\nPid: %d, CPU %d, comm: %20s\n", task_pid_nr(current), - smp_processor_id(), current->comm); + printk("\n"); + show_regs_print_info(KERN_DEFAULT); printk("psr : %016lx ifs : %016lx ip : [<%016lx>] %s (%s)\n", regs->cr_ipsr, regs->cr_ifs, ip, print_tainted(), init_utsname()->release); @@ -209,41 +201,13 @@ do_notify_resume_user(sigset_t *unused, struct sigscratch *scr, long in_syscall) local_irq_disable(); /* force interrupt disable */ } -static int pal_halt = 1; -static int can_do_pal_halt = 1; - static int __init nohalt_setup(char * str) { - pal_halt = can_do_pal_halt = 0; + cpu_idle_poll_ctrl(true); return 1; } __setup("nohalt", nohalt_setup); -void -update_pal_halt_status(int status) -{ - can_do_pal_halt = pal_halt && status; -} - -/* - * We use this if we don't have any better idle routine.. - */ -void -default_idle (void) -{ - local_irq_enable(); - while (!need_resched()) { - if (can_do_pal_halt) { - local_irq_disable(); - if (!need_resched()) { - safe_halt(); - } - local_irq_enable(); - } else - cpu_relax(); - } -} - #ifdef CONFIG_HOTPLUG_CPU /* We don't actually take CPU down, just spin without interrupts. */ static inline void play_dead(void) @@ -270,50 +234,29 @@ static inline void play_dead(void) } #endif /* CONFIG_HOTPLUG_CPU */ -void __attribute__((noreturn)) -cpu_idle (void) +void arch_cpu_idle_dead(void) +{ + play_dead(); +} + +void arch_cpu_idle(void) { void (*mark_idle)(int) = ia64_mark_idle; - int cpu = smp_processor_id(); - - /* endless idle loop with no priority at all */ - while (1) { - rcu_idle_enter(); - if (can_do_pal_halt) { - current_thread_info()->status &= ~TS_POLLING; - /* - * TS_POLLING-cleared state must be visible before we - * test NEED_RESCHED: - */ - smp_mb(); - } else { - current_thread_info()->status |= TS_POLLING; - } - if (!need_resched()) { - void (*idle)(void); #ifdef CONFIG_SMP - min_xtp(); + min_xtp(); #endif - rmb(); - if (mark_idle) - (*mark_idle)(1); - - if (!idle) - idle = default_idle; - (*idle)(); - if (mark_idle) - (*mark_idle)(0); + rmb(); + if (mark_idle) + (*mark_idle)(1); + + safe_halt(); + + if (mark_idle) + (*mark_idle)(0); #ifdef CONFIG_SMP - normal_xtp(); + normal_xtp(); #endif - } - rcu_idle_exit(); - schedule_preempt_disabled(); - check_pgt_cache(); - if (cpu_is_offline(cpu)) - play_dead(); - } } void diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c index aa527d7e91f..ee9719eebb1 100644 --- a/arch/ia64/kernel/salinfo.c +++ b/arch/ia64/kernel/salinfo.c @@ -40,6 +40,7 @@ #include <linux/cpu.h> #include <linux/types.h> #include <linux/proc_fs.h> +#include <linux/seq_file.h> #include <linux/module.h> #include <linux/smp.h> #include <linux/timer.h> @@ -53,7 +54,7 @@ MODULE_AUTHOR("Jesse Barnes <jbarnes@sgi.com>"); MODULE_DESCRIPTION("/proc interface to IA-64 SAL features"); MODULE_LICENSE("GPL"); -static int salinfo_read(char *page, char **start, off_t off, int count, int *eof, void *data); +static const struct file_operations proc_salinfo_fops; typedef struct { const char *name; /* name of the proc entry */ @@ -65,7 +66,7 @@ typedef struct { * List {name,feature} pairs for every entry in /proc/sal/<feature> * that this module exports */ -static salinfo_entry_t salinfo_entries[]={ +static const salinfo_entry_t salinfo_entries[]={ { "bus_lock", IA64_SAL_PLATFORM_FEATURE_BUS_LOCK, }, { "irq_redirection", IA64_SAL_PLATFORM_FEATURE_IRQ_REDIR_HINT, }, { "ipi_redirection", IA64_SAL_PLATFORM_FEATURE_IPI_REDIR_HINT, }, @@ -301,9 +302,7 @@ salinfo_event_open(struct inode *inode, struct file *file) static ssize_t salinfo_event_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos) { - struct inode *inode = file_inode(file); - struct proc_dir_entry *entry = PDE(inode); - struct salinfo_data *data = entry->data; + struct salinfo_data *data = PDE_DATA(file_inode(file)); char cmd[32]; size_t size; int i, n, cpu = -1; @@ -360,8 +359,7 @@ static const struct file_operations salinfo_event_fops = { static int salinfo_log_open(struct inode *inode, struct file *file) { - struct proc_dir_entry *entry = PDE(inode); - struct salinfo_data *data = entry->data; + struct salinfo_data *data = PDE_DATA(inode); if (!capable(CAP_SYS_ADMIN)) return -EPERM; @@ -386,8 +384,7 @@ salinfo_log_open(struct inode *inode, struct file *file) static int salinfo_log_release(struct inode *inode, struct file *file) { - struct proc_dir_entry *entry = PDE(inode); - struct salinfo_data *data = entry->data; + struct salinfo_data *data = PDE_DATA(inode); if (data->state == STATE_NO_DATA) { vfree(data->log_buffer); @@ -463,9 +460,7 @@ retry: static ssize_t salinfo_log_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos) { - struct inode *inode = file_inode(file); - struct proc_dir_entry *entry = PDE(inode); - struct salinfo_data *data = entry->data; + struct salinfo_data *data = PDE_DATA(file_inode(file)); u8 *buf; u64 bufsize; @@ -524,9 +519,7 @@ salinfo_log_clear(struct salinfo_data *data, int cpu) static ssize_t salinfo_log_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos) { - struct inode *inode = file_inode(file); - struct proc_dir_entry *entry = PDE(inode); - struct salinfo_data *data = entry->data; + struct salinfo_data *data = PDE_DATA(file_inode(file)); char cmd[32]; size_t size; u32 offset; @@ -575,7 +568,7 @@ static const struct file_operations salinfo_data_fops = { .llseek = default_llseek, }; -static int __cpuinit +static int salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu) { unsigned int i, cpu = (unsigned long)hcpu; @@ -616,7 +609,7 @@ salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu return NOTIFY_OK; } -static struct notifier_block salinfo_cpu_notifier __cpuinitdata = +static struct notifier_block salinfo_cpu_notifier = { .notifier_call = salinfo_cpu_callback, .priority = 0, @@ -637,10 +630,13 @@ salinfo_init(void) for (i=0; i < NR_SALINFO_ENTRIES; i++) { /* pass the feature bit in question as misc data */ - *sdir++ = create_proc_read_entry (salinfo_entries[i].name, 0, salinfo_dir, - salinfo_read, (void *)salinfo_entries[i].feature); + *sdir++ = proc_create_data(salinfo_entries[i].name, 0, salinfo_dir, + &proc_salinfo_fops, + (void *)salinfo_entries[i].feature); } + cpu_notifier_register_begin(); + for (i = 0; i < ARRAY_SIZE(salinfo_log_name); i++) { data = salinfo_data + i; data->type = i; @@ -675,7 +671,9 @@ salinfo_init(void) salinfo_timer.function = &salinfo_timeout; add_timer(&salinfo_timer); - register_hotcpu_notifier(&salinfo_cpu_notifier); + __register_hotcpu_notifier(&salinfo_cpu_notifier); + + cpu_notifier_register_done(); return 0; } @@ -684,22 +682,23 @@ salinfo_init(void) * 'data' contains an integer that corresponds to the feature we're * testing */ -static int -salinfo_read(char *page, char **start, off_t off, int count, int *eof, void *data) +static int proc_salinfo_show(struct seq_file *m, void *v) { - int len = 0; - - len = sprintf(page, (sal_platform_features & (unsigned long)data) ? "1\n" : "0\n"); - - if (len <= off+count) *eof = 1; - - *start = page + off; - len -= off; - - if (len>count) len = count; - if (len<0) len = 0; + unsigned long data = (unsigned long)v; + seq_puts(m, (sal_platform_features & data) ? "1\n" : "0\n"); + return 0; +} - return len; +static int proc_salinfo_open(struct inode *inode, struct file *file) +{ + return single_open(file, proc_salinfo_show, PDE_DATA(inode)); } +static const struct file_operations proc_salinfo_fops = { + .open = proc_salinfo_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + module_init(salinfo_init); diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index 2029cc0d2fc..d86669bcdfb 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c @@ -748,7 +748,7 @@ const struct seq_operations cpuinfo_op = { #define MAX_BRANDS 8 static char brandname[MAX_BRANDS][128]; -static char * __cpuinit +static char * get_model_name(__u8 family, __u8 model) { static int overflow; @@ -778,7 +778,7 @@ get_model_name(__u8 family, __u8 model) return "Unknown"; } -static void __cpuinit +static void identify_cpu (struct cpuinfo_ia64 *c) { union { @@ -850,7 +850,7 @@ identify_cpu (struct cpuinfo_ia64 *c) * 2. the minimum of the i-cache stride sizes for "flush_icache_range()". * 3. the minimum of the cache stride sizes for "clflush_cache_range()". */ -static void __cpuinit +static void get_cache_info(void) { unsigned long line_size, max = 1; @@ -915,10 +915,10 @@ get_cache_info(void) * cpu_init() initializes state that is per-CPU. This function acts * as a 'CPU state barrier', nothing should get across. */ -void __cpuinit +void cpu_init (void) { - extern void __cpuinit ia64_mmu_init (void *); + extern void ia64_mmu_init(void *); static unsigned long max_num_phys_stacked = IA64_NUM_PHYS_STACK_REG; unsigned long num_phys_stacked; pal_vm_info_2_u_t vmi; @@ -1063,6 +1063,8 @@ check_bugs (void) static int __init run_dmi_scan(void) { dmi_scan_machine(); + dmi_memdev_walk(); + dmi_set_dump_stack_arch_desc(); return 0; } core_initcall(run_dmi_scan); diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c index 3637e03d228..33cab9a8adf 100644 --- a/arch/ia64/kernel/signal.c +++ b/arch/ia64/kernel/signal.c @@ -105,7 +105,7 @@ restore_sigcontext (struct sigcontext __user *sc, struct sigscratch *scr) } int -copy_siginfo_to_user (siginfo_t __user *to, siginfo_t *from) +copy_siginfo_to_user (siginfo_t __user *to, const siginfo_t *from) { if (!access_ok(VERIFY_WRITE, to, sizeof(siginfo_t))) return -EFAULT; diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index 500f1e4d9f9..547a48d78bd 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c @@ -351,7 +351,7 @@ static inline void smp_setup_percpu_timer(void) { } -static void __cpuinit +static void smp_callin (void) { int cpuid, phys_id, itc_master; @@ -442,7 +442,7 @@ smp_callin (void) /* * Activate a secondary processor. head.S calls this. */ -int __cpuinit +int start_secondary (void *unused) { /* Early console may use I/O ports */ @@ -455,11 +455,11 @@ start_secondary (void *unused) preempt_disable(); smp_callin(); - cpu_idle(); + cpu_startup_entry(CPUHP_ONLINE); return 0; } -static int __cpuinit +static int do_boot_cpu (int sapicid, int cpu, struct task_struct *idle) { int timeout; @@ -728,7 +728,7 @@ static inline void set_cpu_sibling_map(int cpu) } } -int __cpuinit +int __cpu_up(unsigned int cpu, struct task_struct *tidle) { int ret; diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c index fbaac1afb84..71c52bc7c28 100644 --- a/arch/ia64/kernel/time.c +++ b/arch/ia64/kernel/time.c @@ -380,7 +380,7 @@ static cycle_t itc_get_cycles(struct clocksource *cs) static struct irqaction timer_irqaction = { .handler = timer_interrupt, - .flags = IRQF_DISABLED | IRQF_IRQPOLL, + .flags = IRQF_IRQPOLL, .name = "timer" }; diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c index dc00b2c1b42..f295f9abba4 100644 --- a/arch/ia64/kernel/topology.c +++ b/arch/ia64/kernel/topology.c @@ -135,11 +135,11 @@ struct cpu_cache_info { struct kobject kobj; }; -static struct cpu_cache_info all_cpu_cache_info[NR_CPUS] __cpuinitdata; +static struct cpu_cache_info all_cpu_cache_info[NR_CPUS]; #define LEAF_KOBJECT_PTR(x,y) (&all_cpu_cache_info[x].cache_leaves[y]) #ifdef CONFIG_SMP -static void __cpuinit cache_shared_cpu_map_setup( unsigned int cpu, +static void cache_shared_cpu_map_setup(unsigned int cpu, struct cache_info * this_leaf) { pal_cache_shared_info_t csi; @@ -174,7 +174,7 @@ static void __cpuinit cache_shared_cpu_map_setup( unsigned int cpu, &csi) == PAL_STATUS_SUCCESS); } #else -static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, +static void cache_shared_cpu_map_setup(unsigned int cpu, struct cache_info * this_leaf) { cpu_set(cpu, this_leaf->shared_cpu_map); @@ -298,7 +298,7 @@ static struct kobj_type cache_ktype_percpu_entry = { .sysfs_ops = &cache_sysfs_ops, }; -static void __cpuinit cpu_cache_sysfs_exit(unsigned int cpu) +static void cpu_cache_sysfs_exit(unsigned int cpu) { kfree(all_cpu_cache_info[cpu].cache_leaves); all_cpu_cache_info[cpu].cache_leaves = NULL; @@ -307,7 +307,7 @@ static void __cpuinit cpu_cache_sysfs_exit(unsigned int cpu) return; } -static int __cpuinit cpu_cache_sysfs_init(unsigned int cpu) +static int cpu_cache_sysfs_init(unsigned int cpu) { unsigned long i, levels, unique_caches; pal_cache_config_info_t cci; @@ -351,7 +351,7 @@ static int __cpuinit cpu_cache_sysfs_init(unsigned int cpu) } /* Add cache interface for CPU device */ -static int __cpuinit cache_add_dev(struct device * sys_dev) +static int cache_add_dev(struct device *sys_dev) { unsigned int cpu = sys_dev->id; unsigned long i, j; @@ -401,7 +401,7 @@ static int __cpuinit cache_add_dev(struct device * sys_dev) } /* Remove cache interface for CPU device */ -static int __cpuinit cache_remove_dev(struct device * sys_dev) +static int cache_remove_dev(struct device *sys_dev) { unsigned int cpu = sys_dev->id; unsigned long i; @@ -425,7 +425,7 @@ static int __cpuinit cache_remove_dev(struct device * sys_dev) * When a cpu is hot-plugged, do a check and initiate * cache kobject if necessary */ -static int __cpuinit cache_cpu_callback(struct notifier_block *nfb, +static int cache_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { unsigned int cpu = (unsigned long)hcpu; @@ -445,7 +445,7 @@ static int __cpuinit cache_cpu_callback(struct notifier_block *nfb, return NOTIFY_OK; } -static struct notifier_block __cpuinitdata cache_cpu_notifier = +static struct notifier_block cache_cpu_notifier = { .notifier_call = cache_cpu_callback }; @@ -454,12 +454,16 @@ static int __init cache_sysfs_init(void) { int i; + cpu_notifier_register_begin(); + for_each_online_cpu(i) { struct device *sys_dev = get_cpu_device((unsigned int)i); cache_add_dev(sys_dev); } - register_hotcpu_notifier(&cache_cpu_notifier); + __register_hotcpu_notifier(&cache_cpu_notifier); + + cpu_notifier_register_done(); return 0; } diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c index f7f9f9c6caf..d3636e67a98 100644 --- a/arch/ia64/kernel/traps.c +++ b/arch/ia64/kernel/traps.c @@ -630,7 +630,7 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa, printk(KERN_ERR " iip - 0x%lx, ifa - 0x%lx, isr - 0x%lx\n", iip, ifa, isr); force_sig(SIGSEGV, current); - break; + return; case 46: printk(KERN_ERR "Unexpected IA-32 intercept trap (Trap 46)\n"); diff --git a/arch/ia64/kernel/uncached.c b/arch/ia64/kernel/uncached.c index a96bcf83a73..20e8a9b21d7 100644 --- a/arch/ia64/kernel/uncached.c +++ b/arch/ia64/kernel/uncached.c @@ -98,7 +98,7 @@ static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid) /* attempt to allocate a granule's worth of cached memory pages */ page = alloc_pages_exact_node(nid, - GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, + GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE, IA64_GRANULE_SHIFT-PAGE_SHIFT); if (!page) { mutex_unlock(&uc_pool->add_chunk_mutex); diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S index 0ccb28fab27..84f8a52ac5a 100644 --- a/arch/ia64/kernel/vmlinux.lds.S +++ b/arch/ia64/kernel/vmlinux.lds.S @@ -182,12 +182,6 @@ SECTIONS { __start_gate_section = .; *(.data..gate) __stop_gate_section = .; -#ifdef CONFIG_XEN - . = ALIGN(PAGE_SIZE); - __xen_start_gate_section = .; - *(.data..gate.xen) - __xen_stop_gate_section = .; -#endif } /* * make sure the gate page doesn't expose |
