diff options
Diffstat (limited to 'arch/ia64/kernel')
31 files changed, 131 insertions, 313 deletions
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index 335eb07480f..615ef81def4 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c @@ -53,15 +53,10 @@ #include <asm/numa.h> #include <asm/sal.h> #include <asm/cyclone.h> -#include <asm/xen/hypervisor.h> - -#define BAD_MADT_ENTRY(entry, end) ( \ - (!entry) || (unsigned long)entry + sizeof(*entry) > end || \ - ((struct acpi_subtable_header *)entry)->length < sizeof(*entry)) #define PREFIX "ACPI: " -u32 acpi_rsdt_forced; +int acpi_lapic; unsigned int acpi_cpei_override; unsigned int acpi_cpei_phys_cpuid; @@ -120,8 +115,6 @@ acpi_get_sysname(void) return "uv"; else return "sn2"; - } else if (xen_pv_domain() && !strcmp(hdr->oem_id, "XEN")) { - return "xen"; } #ifdef CONFIG_INTEL_IOMMU @@ -684,6 +677,8 @@ int __init early_acpi_boot_init(void) if (ret < 1) printk(KERN_ERR PREFIX "Error parsing MADT - no LAPIC entries\n"); + else + acpi_lapic = 1; #ifdef CONFIG_SMP if (available_cpus == 0) { @@ -807,14 +802,9 @@ int acpi_isa_irq_to_gsi(unsigned isa_irq, u32 *gsi) * ACPI based hotplug CPU support */ #ifdef CONFIG_ACPI_HOTPLUG_CPU -static __cpuinit -int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid) +static int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid) { #ifdef CONFIG_ACPI_NUMA - int pxm_id; - int nid; - - pxm_id = acpi_get_pxm(handle); /* * We don't have cpu-only-node hotadd. But if the system equips * SRAT table, pxm is already found and node is ready. @@ -822,11 +812,10 @@ int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid) * This code here is for the system which doesn't have full SRAT * table for possible cpus. */ - nid = acpi_map_pxm_to_node(pxm_id); node_cpuid[cpu].phys_id = physid; - node_cpuid[cpu].nid = nid; + node_cpuid[cpu].nid = acpi_get_node(handle); #endif - return (0); + return 0; } int additional_cpus __initdata = -1; @@ -882,40 +871,10 @@ __init void prefill_possible_map(void) set_cpu_possible(i, true); } -static int __cpuinit _acpi_map_lsapic(acpi_handle handle, int *pcpu) +static int _acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu) { - struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; - union acpi_object *obj; - struct acpi_madt_local_sapic *lsapic; cpumask_t tmp_map; - int cpu, physid; - - if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer))) - return -EINVAL; - - if (!buffer.length || !buffer.pointer) - return -EINVAL; - - obj = buffer.pointer; - if (obj->type != ACPI_TYPE_BUFFER) - { - kfree(buffer.pointer); - return -EINVAL; - } - - lsapic = (struct acpi_madt_local_sapic *)obj->buffer.pointer; - - if ((lsapic->header.type != ACPI_MADT_TYPE_LOCAL_SAPIC) || - (!(lsapic->lapic_flags & ACPI_MADT_ENABLED))) { - kfree(buffer.pointer); - return -EINVAL; - } - - physid = ((lsapic->id << 8) | (lsapic->eid)); - - kfree(buffer.pointer); - buffer.length = ACPI_ALLOCATE_BUFFER; - buffer.pointer = NULL; + int cpu; cpumask_complement(&tmp_map, cpu_present_mask); cpu = cpumask_first(&tmp_map); @@ -934,9 +893,9 @@ static int __cpuinit _acpi_map_lsapic(acpi_handle handle, int *pcpu) } /* wrapper to silence section mismatch warning */ -int __ref acpi_map_lsapic(acpi_handle handle, int *pcpu) +int __ref acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu) { - return _acpi_map_lsapic(handle, pcpu); + return _acpi_map_lsapic(handle, physid, pcpu); } EXPORT_SYMBOL(acpi_map_lsapic); @@ -963,7 +922,7 @@ static acpi_status acpi_map_iosapic(acpi_handle handle, u32 depth, union acpi_object *obj; struct acpi_madt_io_sapic *iosapic; unsigned int gsi_base; - int pxm, node; + int node; /* Only care about objects w/ a method that returns the MADT */ if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer))) @@ -990,17 +949,9 @@ static acpi_status acpi_map_iosapic(acpi_handle handle, u32 depth, kfree(buffer.pointer); - /* - * OK, it's an IOSAPIC MADT entry, look for a _PXM value to tell - * us which node to associate this with. - */ - pxm = acpi_get_pxm(handle); - if (pxm < 0) - return AE_OK; - - node = pxm_to_node(pxm); - - if (node >= MAX_NUMNODES || !node_online(node) || + /* OK, it's an IOSAPIC MADT entry; associate it with a node */ + node = acpi_get_node(handle); + if (node == NUMA_NO_NODE || !node_online(node) || cpumask_empty(cpumask_of_node(node))) return AE_OK; diff --git a/arch/ia64/kernel/asm-offsets.c b/arch/ia64/kernel/asm-offsets.c index 46c9e300731..60ef83e6db7 100644 --- a/arch/ia64/kernel/asm-offsets.c +++ b/arch/ia64/kernel/asm-offsets.c @@ -16,9 +16,6 @@ #include <asm/sigcontext.h> #include <asm/mca.h> -#include <asm/xen/interface.h> -#include <asm/xen/hypervisor.h> - #include "../kernel/sigframe.h" #include "../kernel/fsyscall_gtod_data.h" @@ -290,33 +287,4 @@ void foo(void) DEFINE(IA64_ITC_LASTCYCLE_OFFSET, offsetof (struct itc_jitter_data_t, itc_lastcycle)); -#ifdef CONFIG_XEN - BLANK(); - - DEFINE(XEN_NATIVE_ASM, XEN_NATIVE); - DEFINE(XEN_PV_DOMAIN_ASM, XEN_PV_DOMAIN); - -#define DEFINE_MAPPED_REG_OFS(sym, field) \ - DEFINE(sym, (XMAPPEDREGS_OFS + offsetof(struct mapped_regs, field))) - - DEFINE_MAPPED_REG_OFS(XSI_PSR_I_ADDR_OFS, interrupt_mask_addr); - DEFINE_MAPPED_REG_OFS(XSI_IPSR_OFS, ipsr); - DEFINE_MAPPED_REG_OFS(XSI_IIP_OFS, iip); - DEFINE_MAPPED_REG_OFS(XSI_IFS_OFS, ifs); - DEFINE_MAPPED_REG_OFS(XSI_PRECOVER_IFS_OFS, precover_ifs); - DEFINE_MAPPED_REG_OFS(XSI_ISR_OFS, isr); - DEFINE_MAPPED_REG_OFS(XSI_IFA_OFS, ifa); - DEFINE_MAPPED_REG_OFS(XSI_IIPA_OFS, iipa); - DEFINE_MAPPED_REG_OFS(XSI_IIM_OFS, iim); - DEFINE_MAPPED_REG_OFS(XSI_IHA_OFS, iha); - DEFINE_MAPPED_REG_OFS(XSI_ITIR_OFS, itir); - DEFINE_MAPPED_REG_OFS(XSI_PSR_IC_OFS, interrupt_collection_enabled); - DEFINE_MAPPED_REG_OFS(XSI_BANKNUM_OFS, banknum); - DEFINE_MAPPED_REG_OFS(XSI_BANK0_R16_OFS, bank0_regs[0]); - DEFINE_MAPPED_REG_OFS(XSI_BANK1_R16_OFS, bank1_regs[0]); - DEFINE_MAPPED_REG_OFS(XSI_B0NATS_OFS, vbnat); - DEFINE_MAPPED_REG_OFS(XSI_B1NATS_OFS, vnat); - DEFINE_MAPPED_REG_OFS(XSI_ITC_OFFSET_OFS, itc_offset); - DEFINE_MAPPED_REG_OFS(XSI_ITC_LAST_OFS, itc_last); -#endif /* CONFIG_XEN */ } diff --git a/arch/ia64/kernel/crash.c b/arch/ia64/kernel/crash.c index b942f4032d7..2955f359e2a 100644 --- a/arch/ia64/kernel/crash.c +++ b/arch/ia64/kernel/crash.c @@ -237,7 +237,7 @@ kdump_init_notifier(struct notifier_block *self, unsigned long val, void *data) } #ifdef CONFIG_SYSCTL -static ctl_table kdump_ctl_table[] = { +static struct ctl_table kdump_ctl_table[] = { { .procname = "kdump_on_init", .data = &kdump_on_init, @@ -255,7 +255,7 @@ static ctl_table kdump_ctl_table[] = { { } }; -static ctl_table sys_table[] = { +static struct ctl_table sys_table[] = { { .procname = "kernel", .mode = 0555, diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c index f034563aeae..741b99c1a0b 100644 --- a/arch/ia64/kernel/efi.c +++ b/arch/ia64/kernel/efi.c @@ -44,10 +44,15 @@ #define EFI_DEBUG 0 +static __initdata unsigned long palo_phys; + +static __initdata efi_config_table_type_t arch_tables[] = { + {PROCESSOR_ABSTRACTION_LAYER_OVERWRITE_GUID, "PALO", &palo_phys}, + {NULL_GUID, NULL, 0}, +}; + extern efi_status_t efi_call_phys (void *, ...); -struct efi efi; -EXPORT_SYMBOL(efi); static efi_runtime_services_t *runtime; static u64 mem_limit = ~0UL, max_addr = ~0UL, min_addr = 0UL; @@ -423,9 +428,9 @@ static u8 __init palo_checksum(u8 *buffer, u32 length) * Parse and handle PALO table which is published at: * http://www.dig64.org/home/DIG64_PALO_R1_0.pdf */ -static void __init handle_palo(unsigned long palo_phys) +static void __init handle_palo(unsigned long phys_addr) { - struct palo_table *palo = __va(palo_phys); + struct palo_table *palo = __va(phys_addr); u8 checksum; if (strncmp(palo->signature, PALO_SIG, sizeof(PALO_SIG) - 1)) { @@ -467,12 +472,13 @@ void __init efi_init (void) { void *efi_map_start, *efi_map_end; - efi_config_table_t *config_tables; efi_char16_t *c16; u64 efi_desc_size; char *cp, vendor[100] = "unknown"; int i; - unsigned long palo_phys; + + set_bit(EFI_BOOT, &efi.flags); + set_bit(EFI_64BIT, &efi.flags); /* * It's too early to be able to use the standard kernel command line @@ -514,8 +520,6 @@ efi_init (void) efi.systab->hdr.revision >> 16, efi.systab->hdr.revision & 0xffff); - config_tables = __va(efi.systab->tables); - /* Show what we know for posterity */ c16 = __va(efi.systab->fw_vendor); if (c16) { @@ -528,43 +532,12 @@ efi_init (void) efi.systab->hdr.revision >> 16, efi.systab->hdr.revision & 0xffff, vendor); - efi.mps = EFI_INVALID_TABLE_ADDR; - efi.acpi = EFI_INVALID_TABLE_ADDR; - efi.acpi20 = EFI_INVALID_TABLE_ADDR; - efi.smbios = EFI_INVALID_TABLE_ADDR; - efi.sal_systab = EFI_INVALID_TABLE_ADDR; - efi.boot_info = EFI_INVALID_TABLE_ADDR; - efi.hcdp = EFI_INVALID_TABLE_ADDR; - efi.uga = EFI_INVALID_TABLE_ADDR; + set_bit(EFI_SYSTEM_TABLES, &efi.flags); palo_phys = EFI_INVALID_TABLE_ADDR; - for (i = 0; i < (int) efi.systab->nr_tables; i++) { - if (efi_guidcmp(config_tables[i].guid, MPS_TABLE_GUID) == 0) { - efi.mps = config_tables[i].table; - printk(" MPS=0x%lx", config_tables[i].table); - } else if (efi_guidcmp(config_tables[i].guid, ACPI_20_TABLE_GUID) == 0) { - efi.acpi20 = config_tables[i].table; - printk(" ACPI 2.0=0x%lx", config_tables[i].table); - } else if (efi_guidcmp(config_tables[i].guid, ACPI_TABLE_GUID) == 0) { - efi.acpi = config_tables[i].table; - printk(" ACPI=0x%lx", config_tables[i].table); - } else if (efi_guidcmp(config_tables[i].guid, SMBIOS_TABLE_GUID) == 0) { - efi.smbios = config_tables[i].table; - printk(" SMBIOS=0x%lx", config_tables[i].table); - } else if (efi_guidcmp(config_tables[i].guid, SAL_SYSTEM_TABLE_GUID) == 0) { - efi.sal_systab = config_tables[i].table; - printk(" SALsystab=0x%lx", config_tables[i].table); - } else if (efi_guidcmp(config_tables[i].guid, HCDP_TABLE_GUID) == 0) { - efi.hcdp = config_tables[i].table; - printk(" HCDP=0x%lx", config_tables[i].table); - } else if (efi_guidcmp(config_tables[i].guid, - PROCESSOR_ABSTRACTION_LAYER_OVERWRITE_GUID) == 0) { - palo_phys = config_tables[i].table; - printk(" PALO=0x%lx", config_tables[i].table); - } - } - printk("\n"); + if (efi_config_init(arch_tables) != 0) + return; if (palo_phys != EFI_INVALID_TABLE_ADDR) handle_palo(palo_phys); @@ -689,6 +662,8 @@ efi_enter_virtual_mode (void) return; } + set_bit(EFI_RUNTIME_SERVICES, &efi.flags); + /* * Now that EFI is in virtual mode, we call the EFI functions more * efficiently: @@ -1116,11 +1091,6 @@ efi_memmap_init(u64 *s, u64 *e) if (!is_memory_available(md)) continue; -#ifdef CONFIG_CRASH_DUMP - /* saved_max_pfn should ignore max_addr= command line arg */ - if (saved_max_pfn < (efi_md_end(md) >> PAGE_SHIFT)) - saved_max_pfn = (efi_md_end(md) >> PAGE_SHIFT); -#endif /* * Round ends inward to granule boundaries * Give trimmings to uncached allocator diff --git a/arch/ia64/kernel/elfcore.c b/arch/ia64/kernel/elfcore.c index bac1639bc32..04bc8fd5f89 100644 --- a/arch/ia64/kernel/elfcore.c +++ b/arch/ia64/kernel/elfcore.c @@ -11,8 +11,7 @@ Elf64_Half elf_core_extra_phdrs(void) return GATE_EHDR->e_phnum; } -int elf_core_write_extra_phdrs(struct file *file, loff_t offset, size_t *size, - unsigned long limit) +int elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset) { const struct elf_phdr *const gate_phdrs = (const struct elf_phdr *) (GATE_ADDR + GATE_EHDR->e_phoff); @@ -35,15 +34,13 @@ int elf_core_write_extra_phdrs(struct file *file, loff_t offset, size_t *size, phdr.p_offset += ofs; } phdr.p_paddr = 0; /* match other core phdrs */ - *size += sizeof(phdr); - if (*size > limit || !dump_write(file, &phdr, sizeof(phdr))) + if (!dump_emit(cprm, &phdr, sizeof(phdr))) return 0; } return 1; } -int elf_core_write_extra_data(struct file *file, size_t *size, - unsigned long limit) +int elf_core_write_extra_data(struct coredump_params *cprm) { const struct elf_phdr *const gate_phdrs = (const struct elf_phdr *) (GATE_ADDR + GATE_EHDR->e_phoff); @@ -54,8 +51,7 @@ int elf_core_write_extra_data(struct file *file, size_t *size, void *addr = (void *)gate_phdrs[i].p_vaddr; size_t memsz = PAGE_ALIGN(gate_phdrs[i].p_memsz); - *size += memsz; - if (*size > limit || !dump_write(file, addr, memsz)) + if (!dump_emit(cprm, addr, memsz)) return 0; break; } diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S index 7a53530f22c..ba3d03503e8 100644 --- a/arch/ia64/kernel/entry.S +++ b/arch/ia64/kernel/entry.S @@ -1169,21 +1169,8 @@ skip_rbs_switch: .work_pending: tbit.z p6,p0=r31,TIF_NEED_RESCHED // is resched not needed? (p6) br.cond.sptk.few .notify -#ifdef CONFIG_PREEMPT -(pKStk) dep r21=-1,r0,PREEMPT_ACTIVE_BIT,1 - ;; -(pKStk) st4 [r20]=r21 -#endif - SSM_PSR_I(p0, p6, r2) // enable interrupts - br.call.spnt.many rp=schedule + br.call.spnt.many rp=preempt_schedule_irq .ret9: cmp.eq p6,p0=r0,r0 // p6 <- 1 (re-check) - RSM_PSR_I(p0, r2, r20) // disable interrupts - ;; -#ifdef CONFIG_PREEMPT -(pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13 - ;; -(pKStk) st4 [r20]=r0 // preempt_count() <- 0 -#endif (pLvSys)br.cond.sptk.few __paravirt_pending_syscall_end br.cond.sptk.many .work_processed_kernel @@ -1786,6 +1773,9 @@ sys_call_table: data8 sys_process_vm_writev data8 sys_accept4 data8 sys_finit_module // 1335 + data8 sys_sched_setattr + data8 sys_sched_getattr + data8 sys_renameat2 .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls #endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */ diff --git a/arch/ia64/kernel/err_inject.c b/arch/ia64/kernel/err_inject.c index 2d67317a1ec..0c161ed6d18 100644 --- a/arch/ia64/kernel/err_inject.c +++ b/arch/ia64/kernel/err_inject.c @@ -225,17 +225,17 @@ static struct attribute_group err_inject_attr_group = { .name = "err_inject" }; /* Add/Remove err_inject interface for CPU device */ -static int __cpuinit err_inject_add_dev(struct device * sys_dev) +static int err_inject_add_dev(struct device *sys_dev) { return sysfs_create_group(&sys_dev->kobj, &err_inject_attr_group); } -static int __cpuinit err_inject_remove_dev(struct device * sys_dev) +static int err_inject_remove_dev(struct device *sys_dev) { sysfs_remove_group(&sys_dev->kobj, &err_inject_attr_group); return 0; } -static int __cpuinit err_inject_cpu_callback(struct notifier_block *nfb, +static int err_inject_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { unsigned int cpu = (unsigned long)hcpu; @@ -256,7 +256,7 @@ static int __cpuinit err_inject_cpu_callback(struct notifier_block *nfb, return NOTIFY_OK; } -static struct notifier_block __cpuinitdata err_inject_cpu_notifier = +static struct notifier_block err_inject_cpu_notifier = { .notifier_call = err_inject_cpu_callback, }; @@ -269,12 +269,17 @@ err_inject_init(void) #ifdef ERR_INJ_DEBUG printk(KERN_INFO "Enter error injection driver.\n"); #endif + + cpu_notifier_register_begin(); + for_each_online_cpu(i) { err_inject_cpu_callback(&err_inject_cpu_notifier, CPU_ONLINE, (void *)(long)i); } - register_hotcpu_notifier(&err_inject_cpu_notifier); + __register_hotcpu_notifier(&err_inject_cpu_notifier); + + cpu_notifier_register_done(); return 0; } @@ -288,11 +293,17 @@ err_inject_exit(void) #ifdef ERR_INJ_DEBUG printk(KERN_INFO "Exit error injection driver.\n"); #endif + + cpu_notifier_register_begin(); + for_each_online_cpu(i) { sys_dev = get_cpu_device(i); sysfs_remove_group(&sys_dev->kobj, &err_inject_attr_group); } - unregister_hotcpu_notifier(&err_inject_cpu_notifier); + + __unregister_hotcpu_notifier(&err_inject_cpu_notifier); + + cpu_notifier_register_done(); } module_init(err_inject_init); diff --git a/arch/ia64/kernel/ftrace.c b/arch/ia64/kernel/ftrace.c index 7fc8c961b1f..3b0c2aa0785 100644 --- a/arch/ia64/kernel/ftrace.c +++ b/arch/ia64/kernel/ftrace.c @@ -198,9 +198,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func) } /* run from kstop_machine */ -int __init ftrace_dyn_arch_init(void *data) +int __init ftrace_dyn_arch_init(void) { - *(unsigned long *)data = 0; - return 0; } diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S index 9be4e497f3d..a4acddad0c7 100644 --- a/arch/ia64/kernel/head.S +++ b/arch/ia64/kernel/head.S @@ -259,7 +259,7 @@ start_ap: * Switch into virtual mode: */ movl r16=(IA64_PSR_IT|IA64_PSR_IC|IA64_PSR_DT|IA64_PSR_RT|IA64_PSR_DFH|IA64_PSR_BN \ - |IA64_PSR_DI|IA64_PSR_AC) + |IA64_PSR_DI) ;; mov cr.ipsr=r16 movl r17=1f @@ -416,8 +416,6 @@ start_ap: default_setup_hook = 0 // Currently nothing needs to be done. - .weak xen_setup_hook - .global hypervisor_type hypervisor_type: data8 PARAVIRT_HYPERVISOR_TYPE_DEFAULT @@ -426,7 +424,6 @@ hypervisor_type: hypervisor_setup_hooks: data8 default_setup_hook - data8 xen_setup_hook num_hypervisor_hooks = (. - hypervisor_setup_hooks) / 8 .previous @@ -1035,7 +1032,7 @@ END(ia64_delay_loop) * Return a CPU-local timestamp in nano-seconds. This timestamp is * NOT synchronized across CPUs its return value must never be * compared against the values returned on another CPU. The usage in - * kernel/sched.c ensures that. + * kernel/sched/core.c ensures that. * * The return-value of sched_clock() is NOT supposed to wrap-around. * If it did, it would cause some scheduling hiccups (at the worst). diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c index 19f107be734..cd44a57c73b 100644 --- a/arch/ia64/kernel/iosapic.c +++ b/arch/ia64/kernel/iosapic.c @@ -735,7 +735,7 @@ iosapic_register_intr (unsigned int gsi, rte = find_rte(irq, gsi); if(iosapic_intr_info[irq].count == 0) { assign_irq_vector(irq); - dynamic_irq_init(irq); + irq_init_desc(irq); } else if (rte->refcnt != NO_REF_RTE) { rte->refcnt++; goto unlock_iosapic_lock; diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c index 1034884b77d..03ea78ed64a 100644 --- a/arch/ia64/kernel/irq_ia64.c +++ b/arch/ia64/kernel/irq_ia64.c @@ -93,14 +93,6 @@ static int irq_status[NR_IRQS] = { [0 ... NR_IRQS -1] = IRQ_UNUSED }; -int check_irq_used(int irq) -{ - if (irq_status[irq] == IRQ_USED) - return 1; - - return -1; -} - static inline int find_unassigned_irq(void) { int irq; @@ -364,7 +356,6 @@ static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id) static struct irqaction irq_move_irqaction = { .handler = smp_irq_move_cleanup_interrupt, - .flags = IRQF_DISABLED, .name = "irq_move" }; @@ -391,8 +382,7 @@ void destroy_and_reserve_irq(unsigned int irq) { unsigned long flags; - dynamic_irq_cleanup(irq); - + irq_init_desc(irq); spin_lock_irqsave(&vector_lock, flags); __clear_irq_vector(irq); irq_status[irq] = IRQ_RSVD; @@ -425,13 +415,13 @@ int create_irq(void) out: spin_unlock_irqrestore(&vector_lock, flags); if (irq >= 0) - dynamic_irq_init(irq); + irq_init_desc(irq); return irq; } void destroy_irq(unsigned int irq) { - dynamic_irq_cleanup(irq); + irq_init_desc(irq); clear_irq_vector(irq); } @@ -489,14 +479,13 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs) ia64_srlz_d(); while (vector != IA64_SPURIOUS_INT_VECTOR) { int irq = local_vector_to_irq(vector); - struct irq_desc *desc = irq_to_desc(irq); if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) { smp_local_flush_tlb(); - kstat_incr_irqs_this_cpu(irq, desc); + kstat_incr_irq_this_cpu(irq); } else if (unlikely(IS_RESCHEDULE(vector))) { scheduler_ipi(); - kstat_incr_irqs_this_cpu(irq, desc); + kstat_incr_irq_this_cpu(irq); } else { ia64_setreg(_IA64_REG_CR_TPR, vector); ia64_srlz_d(); @@ -549,13 +538,12 @@ void ia64_process_pending_intr(void) */ while (vector != IA64_SPURIOUS_INT_VECTOR) { int irq = local_vector_to_irq(vector); - struct irq_desc *desc = irq_to_desc(irq); if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) { smp_local_flush_tlb(); - kstat_incr_irqs_this_cpu(irq, desc); + kstat_incr_irq_this_cpu(irq); } else if (unlikely(IS_RESCHEDULE(vector))) { - kstat_incr_irqs_this_cpu(irq, desc); + kstat_incr_irq_this_cpu(irq); } else { struct pt_regs *old_regs = set_irq_regs(NULL); @@ -602,7 +590,6 @@ static irqreturn_t dummy_handler (int irq, void *dev_id) static struct irqaction ipi_irqaction = { .handler = handle_IPI, - .flags = IRQF_DISABLED, .name = "IPI" }; @@ -611,13 +598,11 @@ static struct irqaction ipi_irqaction = { */ static struct irqaction resched_irqaction = { .handler = dummy_handler, - .flags = IRQF_DISABLED, .name = "resched" }; static struct irqaction tlb_irqaction = { .handler = dummy_handler, - .flags = IRQF_DISABLED, .name = "tlb_flush" }; diff --git a/arch/ia64/kernel/ivt.S b/arch/ia64/kernel/ivt.S index 689ffcaa284..18e794a5724 100644 --- a/arch/ia64/kernel/ivt.S +++ b/arch/ia64/kernel/ivt.S @@ -58,7 +58,7 @@ #include <asm/unistd.h> #include <asm/errno.h> -#if 1 +#if 0 # define PSR_DEFAULT_BITS psr.ac #else # define PSR_DEFAULT_BITS 0 diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c index f8280a766a7..074fde49c9e 100644 --- a/arch/ia64/kernel/kprobes.c +++ b/arch/ia64/kernel/kprobes.c @@ -947,7 +947,7 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) case KPROBE_HIT_SSDONE: /* * We increment the nmissed count for accounting, - * we can also use npre/npostfault count for accouting + * we can also use npre/npostfault count for accounting * these specific fault cases. */ kprobes_inc_nmissed_count(cur); diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index d7396dbb07b..db7b36bb068 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c @@ -217,7 +217,7 @@ void ia64_mca_printk(const char *fmt, ...) /* Copy the output into mlogbuf */ if (oops_in_progress) { /* mlogbuf was abandoned, use printk directly instead. */ - printk(temp_buf); + printk("%s", temp_buf); } else { spin_lock(&mlogbuf_wlock); for (p = temp_buf; *p; p++) { @@ -268,7 +268,7 @@ void ia64_mlogbuf_dump(void) } *p = '\0'; if (temp_buf[0]) - printk(temp_buf); + printk("%s", temp_buf); mlogbuf_start = index; mlogbuf_timestamp = 0; @@ -631,7 +631,7 @@ ia64_mca_register_cpev (int cpev) * Outputs * None */ -void __cpuinit +void ia64_mca_cmc_vector_setup (void) { cmcv_reg_t cmcv; @@ -1772,38 +1772,32 @@ __setup("disable_cpe_poll", ia64_mca_disable_cpe_polling); static struct irqaction cmci_irqaction = { .handler = ia64_mca_cmc_int_handler, - .flags = IRQF_DISABLED, .name = "cmc_hndlr" }; static struct irqaction cmcp_irqaction = { .handler = ia64_mca_cmc_int_caller, - .flags = IRQF_DISABLED, .name = "cmc_poll" }; static struct irqaction mca_rdzv_irqaction = { .handler = ia64_mca_rendez_int_handler, - .flags = IRQF_DISABLED, .name = "mca_rdzv" }; static struct irqaction mca_wkup_irqaction = { .handler = ia64_mca_wakeup_int_handler, - .flags = IRQF_DISABLED, .name = "mca_wkup" }; #ifdef CONFIG_ACPI static struct irqaction mca_cpe_irqaction = { .handler = ia64_mca_cpe_int_handler, - .flags = IRQF_DISABLED, .name = "cpe_hndlr" }; static struct irqaction mca_cpep_irqaction = { .handler = ia64_mca_cpe_int_caller, - .flags = IRQF_DISABLED, .name = "cpe_poll" }; #endif /* CONFIG_ACPI */ @@ -1814,7 +1808,7 @@ static struct irqaction mca_cpep_irqaction = { * format most of the fields. */ -static void __cpuinit +static void format_mca_init_stack(void *mca_data, unsigned long offset, const char *type, int cpu) { @@ -1844,7 +1838,7 @@ static void * __init_refok mca_bootmem(void) } /* Do per-CPU MCA-related initialization. */ -void __cpuinit +void ia64_mca_cpu_init(void *cpu_data) { void *pal_vaddr; @@ -1896,7 +1890,7 @@ ia64_mca_cpu_init(void *cpu_data) PAGE_KERNEL)); } -static void __cpuinit ia64_mca_cmc_vector_adjust(void *dummy) +static void ia64_mca_cmc_vector_adjust(void *dummy) { unsigned long flags; @@ -1906,7 +1900,7 @@ static void __cpuinit ia64_mca_cmc_vector_adjust(void *dummy) local_irq_restore(flags); } -static int __cpuinit mca_cpu_callback(struct notifier_block *nfb, +static int mca_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { @@ -1922,7 +1916,7 @@ static int __cpuinit mca_cpu_callback(struct notifier_block *nfb, return NOTIFY_OK; } -static struct notifier_block mca_cpu_notifier __cpuinitdata = { +static struct notifier_block mca_cpu_notifier = { .notifier_call = mca_cpu_callback }; diff --git a/arch/ia64/kernel/msi_ia64.c b/arch/ia64/kernel/msi_ia64.c index fb2f1e62287..c430f9198d1 100644 --- a/arch/ia64/kernel/msi_ia64.c +++ b/arch/ia64/kernel/msi_ia64.c @@ -17,12 +17,9 @@ static int ia64_set_msi_irq_affinity(struct irq_data *idata, { struct msi_msg msg; u32 addr, data; - int cpu = first_cpu(*cpu_mask); + int cpu = cpumask_first_and(cpu_mask, cpu_online_mask); unsigned int irq = idata->irq; - if (!cpu_online(cpu)) - return -1; - if (irq_prepare_move(irq, cpu)) return -1; @@ -139,10 +136,7 @@ static int dmar_msi_set_affinity(struct irq_data *data, unsigned int irq = data->irq; struct irq_cfg *cfg = irq_cfg + irq; struct msi_msg msg; - int cpu = cpumask_first(mask); - - if (!cpu_online(cpu)) - return -1; + int cpu = cpumask_first_and(mask, cpu_online_mask); if (irq_prepare_move(irq, cpu)) return -1; diff --git a/arch/ia64/kernel/nr-irqs.c b/arch/ia64/kernel/nr-irqs.c index ee564575148..f6769cd54bd 100644 --- a/arch/ia64/kernel/nr-irqs.c +++ b/arch/ia64/kernel/nr-irqs.c @@ -10,15 +10,11 @@ #include <linux/kbuild.h> #include <linux/threads.h> #include <asm/native/irq.h> -#include <asm/xen/irq.h> void foo(void) { union paravirt_nr_irqs_max { char ia64_native_nr_irqs[IA64_NATIVE_NR_IRQS]; -#ifdef CONFIG_XEN - char xen_nr_irqs[XEN_NR_IRQS]; -#endif }; DEFINE(NR_IRQS, sizeof (union paravirt_nr_irqs_max)); diff --git a/arch/ia64/kernel/numa.c b/arch/ia64/kernel/numa.c index c93420c9740..d288cde9360 100644 --- a/arch/ia64/kernel/numa.c +++ b/arch/ia64/kernel/numa.c @@ -30,7 +30,7 @@ EXPORT_SYMBOL(cpu_to_node_map); cpumask_t node_to_cpu_mask[MAX_NUMNODES] __cacheline_aligned; EXPORT_SYMBOL(node_to_cpu_mask); -void __cpuinit map_cpu_to_node(int cpu, int nid) +void map_cpu_to_node(int cpu, int nid) { int oldnid; if (nid < 0) { /* just initialize by zero */ @@ -51,7 +51,7 @@ void __cpuinit map_cpu_to_node(int cpu, int nid) return; } -void __cpuinit unmap_cpu_from_node(int cpu, int nid) +void unmap_cpu_from_node(int cpu, int nid) { WARN_ON(!cpu_isset(cpu, node_to_cpu_mask[nid])); WARN_ON(cpu_to_node_map[cpu] != nid); diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c index 2b3c2d79256..c39c3cd3ac3 100644 --- a/arch/ia64/kernel/palinfo.c +++ b/arch/ia64/kernel/palinfo.c @@ -932,7 +932,7 @@ static const struct file_operations proc_palinfo_fops = { .release = single_release, }; -static void __cpuinit +static void create_palinfo_proc_entries(unsigned int cpu) { pal_func_cpu_u_t f; @@ -962,7 +962,7 @@ remove_palinfo_proc_entries(unsigned int hcpu) remove_proc_subtree(cpustr, palinfo_dir); } -static int __cpuinit palinfo_cpu_callback(struct notifier_block *nfb, +static int palinfo_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { unsigned int hotcpu = (unsigned long)hcpu; @@ -996,13 +996,17 @@ palinfo_init(void) if (!palinfo_dir) return -ENOMEM; + cpu_notifier_register_begin(); + /* Create palinfo dirs in /proc for all online cpus */ for_each_online_cpu(i) { create_palinfo_proc_entries(i); } /* Register for future delivery via notify registration */ - register_hotcpu_notifier(&palinfo_cpu_notifier); + __register_hotcpu_notifier(&palinfo_cpu_notifier); + + cpu_notifier_register_done(); return 0; } diff --git a/arch/ia64/kernel/paravirt_inst.h b/arch/ia64/kernel/paravirt_inst.h index 64d6d810c64..1ad7512b5f6 100644 --- a/arch/ia64/kernel/paravirt_inst.h +++ b/arch/ia64/kernel/paravirt_inst.h @@ -22,9 +22,6 @@ #ifdef __IA64_ASM_PARAVIRTUALIZED_PVCHECK #include <asm/native/pvchk_inst.h> -#elif defined(__IA64_ASM_PARAVIRTUALIZED_XEN) -#include <asm/xen/inst.h> -#include <asm/xen/minstate.h> #else #include <asm/native/inst.h> #endif diff --git a/arch/ia64/kernel/paravirt_patchlist.h b/arch/ia64/kernel/paravirt_patchlist.h index 0684aa6c650..67cffc3643a 100644 --- a/arch/ia64/kernel/paravirt_patchlist.h +++ b/arch/ia64/kernel/paravirt_patchlist.h @@ -20,9 +20,5 @@ * */ -#if defined(__IA64_GATE_PARAVIRTUALIZED_XEN) -#include <asm/xen/patchlist.h> -#else #include <asm/native/patchlist.h> -#endif diff --git a/arch/ia64/kernel/pci-dma.c b/arch/ia64/kernel/pci-dma.c index 1ddcfe5ef35..992c1098c52 100644 --- a/arch/ia64/kernel/pci-dma.c +++ b/arch/ia64/kernel/pci-dma.c @@ -33,15 +33,6 @@ int force_iommu __read_mostly; int iommu_pass_through; -/* Dummy device used for NULL arguments (normally ISA). Better would - be probably a smaller DMA mask, but this is bug-to-bug compatible - to i386. */ -struct device fallback_dev = { - .init_name = "fallback device", - .coherent_dma_mask = DMA_BIT_MASK(32), - .dma_mask = &fallback_dev.coherent_dma_mask, -}; - extern struct dma_map_ops intel_dma_ops; static int __init pci_iommu_init(void) diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index 9ea25fce06d..5845ffea67c 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c @@ -521,7 +521,7 @@ static pmu_config_t *pmu_conf; pfm_sysctl_t pfm_sysctl; EXPORT_SYMBOL(pfm_sysctl); -static ctl_table pfm_ctl_table[]={ +static struct ctl_table pfm_ctl_table[] = { { .procname = "debug", .data = &pfm_sysctl.debug, @@ -552,7 +552,7 @@ static ctl_table pfm_ctl_table[]={ }, {} }; -static ctl_table pfm_sysctl_dir[] = { +static struct ctl_table pfm_sysctl_dir[] = { { .procname = "perfmon", .mode = 0555, @@ -560,7 +560,7 @@ static ctl_table pfm_sysctl_dir[] = { }, {} }; -static ctl_table pfm_sysctl_root[] = { +static struct ctl_table pfm_sysctl_root[] = { { .procname = "kernel", .mode = 0555, @@ -2166,12 +2166,6 @@ static const struct file_operations pfm_file_ops = { .flush = pfm_flush }; -static int -pfmfs_delete_dentry(const struct dentry *dentry) -{ - return 1; -} - static char *pfmfs_dname(struct dentry *dentry, char *buffer, int buflen) { return dynamic_dname(dentry, buffer, buflen, "pfm:[%lu]", @@ -2179,7 +2173,7 @@ static char *pfmfs_dname(struct dentry *dentry, char *buffer, int buflen) } static const struct dentry_operations pfmfs_dentry_operations = { - .d_delete = pfmfs_delete_dentry, + .d_delete = always_delete_dentry, .d_dname = pfmfs_dname, }; @@ -5647,24 +5641,8 @@ pfm_proc_show_header(struct seq_file *m) list_for_each(pos, &pfm_buffer_fmt_list) { entry = list_entry(pos, pfm_buffer_fmt_t, fmt_list); - seq_printf(m, "format : %02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x %s\n", - entry->fmt_uuid[0], - entry->fmt_uuid[1], - entry->fmt_uuid[2], - entry->fmt_uuid[3], - entry->fmt_uuid[4], - entry->fmt_uuid[5], - entry->fmt_uuid[6], - entry->fmt_uuid[7], - entry->fmt_uuid[8], - entry->fmt_uuid[9], - entry->fmt_uuid[10], - entry->fmt_uuid[11], - entry->fmt_uuid[12], - entry->fmt_uuid[13], - entry->fmt_uuid[14], - entry->fmt_uuid[15], - entry->fmt_name); + seq_printf(m, "format : %16phD %s\n", + entry->fmt_uuid, entry->fmt_name); } spin_unlock(&pfm_buffer_fmt_lock); @@ -6409,7 +6387,6 @@ pfm_flush_pmds(struct task_struct *task, pfm_context_t *ctx) static struct irqaction perfmon_irqaction = { .handler = pfm_interrupt_handler, - .flags = IRQF_DISABLED, .name = "perfmon" }; diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c index 4bc580af67b..ee9719eebb1 100644 --- a/arch/ia64/kernel/salinfo.c +++ b/arch/ia64/kernel/salinfo.c @@ -568,7 +568,7 @@ static const struct file_operations salinfo_data_fops = { .llseek = default_llseek, }; -static int __cpuinit +static int salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu) { unsigned int i, cpu = (unsigned long)hcpu; @@ -609,7 +609,7 @@ salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu return NOTIFY_OK; } -static struct notifier_block salinfo_cpu_notifier __cpuinitdata = +static struct notifier_block salinfo_cpu_notifier = { .notifier_call = salinfo_cpu_callback, .priority = 0, @@ -635,6 +635,8 @@ salinfo_init(void) (void *)salinfo_entries[i].feature); } + cpu_notifier_register_begin(); + for (i = 0; i < ARRAY_SIZE(salinfo_log_name); i++) { data = salinfo_data + i; data->type = i; @@ -669,7 +671,9 @@ salinfo_init(void) salinfo_timer.function = &salinfo_timeout; add_timer(&salinfo_timer); - register_hotcpu_notifier(&salinfo_cpu_notifier); + __register_hotcpu_notifier(&salinfo_cpu_notifier); + + cpu_notifier_register_done(); return 0; } diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index 13bfdd22afc..d86669bcdfb 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c @@ -748,7 +748,7 @@ const struct seq_operations cpuinfo_op = { #define MAX_BRANDS 8 static char brandname[MAX_BRANDS][128]; -static char * __cpuinit +static char * get_model_name(__u8 family, __u8 model) { static int overflow; @@ -778,7 +778,7 @@ get_model_name(__u8 family, __u8 model) return "Unknown"; } -static void __cpuinit +static void identify_cpu (struct cpuinfo_ia64 *c) { union { @@ -850,7 +850,7 @@ identify_cpu (struct cpuinfo_ia64 *c) * 2. the minimum of the i-cache stride sizes for "flush_icache_range()". * 3. the minimum of the cache stride sizes for "clflush_cache_range()". */ -static void __cpuinit +static void get_cache_info(void) { unsigned long line_size, max = 1; @@ -915,10 +915,10 @@ get_cache_info(void) * cpu_init() initializes state that is per-CPU. This function acts * as a 'CPU state barrier', nothing should get across. */ -void __cpuinit +void cpu_init (void) { - extern void __cpuinit ia64_mmu_init (void *); + extern void ia64_mmu_init(void *); static unsigned long max_num_phys_stacked = IA64_NUM_PHYS_STACK_REG; unsigned long num_phys_stacked; pal_vm_info_2_u_t vmi; @@ -1063,6 +1063,7 @@ check_bugs (void) static int __init run_dmi_scan(void) { dmi_scan_machine(); + dmi_memdev_walk(); dmi_set_dump_stack_arch_desc(); return 0; } diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c index 3637e03d228..33cab9a8adf 100644 --- a/arch/ia64/kernel/signal.c +++ b/arch/ia64/kernel/signal.c @@ -105,7 +105,7 @@ restore_sigcontext (struct sigcontext __user *sc, struct sigscratch *scr) } int -copy_siginfo_to_user (siginfo_t __user *to, siginfo_t *from) +copy_siginfo_to_user (siginfo_t __user *to, const siginfo_t *from) { if (!access_ok(VERIFY_WRITE, to, sizeof(siginfo_t))) return -EFAULT; diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index 8d87168d218..547a48d78bd 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c @@ -351,7 +351,7 @@ static inline void smp_setup_percpu_timer(void) { } -static void __cpuinit +static void smp_callin (void) { int cpuid, phys_id, itc_master; @@ -442,7 +442,7 @@ smp_callin (void) /* * Activate a secondary processor. head.S calls this. */ -int __cpuinit +int start_secondary (void *unused) { /* Early console may use I/O ports */ @@ -459,7 +459,7 @@ start_secondary (void *unused) return 0; } -static int __cpuinit +static int do_boot_cpu (int sapicid, int cpu, struct task_struct *idle) { int timeout; @@ -728,7 +728,7 @@ static inline void set_cpu_sibling_map(int cpu) } } -int __cpuinit +int __cpu_up(unsigned int cpu, struct task_struct *tidle) { int ret; diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c index fbaac1afb84..71c52bc7c28 100644 --- a/arch/ia64/kernel/time.c +++ b/arch/ia64/kernel/time.c @@ -380,7 +380,7 @@ static cycle_t itc_get_cycles(struct clocksource *cs) static struct irqaction timer_irqaction = { .handler = timer_interrupt, - .flags = IRQF_DISABLED | IRQF_IRQPOLL, + .flags = IRQF_IRQPOLL, .name = "timer" }; diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c index dc00b2c1b42..f295f9abba4 100644 --- a/arch/ia64/kernel/topology.c +++ b/arch/ia64/kernel/topology.c @@ -135,11 +135,11 @@ struct cpu_cache_info { struct kobject kobj; }; -static struct cpu_cache_info all_cpu_cache_info[NR_CPUS] __cpuinitdata; +static struct cpu_cache_info all_cpu_cache_info[NR_CPUS]; #define LEAF_KOBJECT_PTR(x,y) (&all_cpu_cache_info[x].cache_leaves[y]) #ifdef CONFIG_SMP -static void __cpuinit cache_shared_cpu_map_setup( unsigned int cpu, +static void cache_shared_cpu_map_setup(unsigned int cpu, struct cache_info * this_leaf) { pal_cache_shared_info_t csi; @@ -174,7 +174,7 @@ static void __cpuinit cache_shared_cpu_map_setup( unsigned int cpu, &csi) == PAL_STATUS_SUCCESS); } #else -static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, +static void cache_shared_cpu_map_setup(unsigned int cpu, struct cache_info * this_leaf) { cpu_set(cpu, this_leaf->shared_cpu_map); @@ -298,7 +298,7 @@ static struct kobj_type cache_ktype_percpu_entry = { .sysfs_ops = &cache_sysfs_ops, }; -static void __cpuinit cpu_cache_sysfs_exit(unsigned int cpu) +static void cpu_cache_sysfs_exit(unsigned int cpu) { kfree(all_cpu_cache_info[cpu].cache_leaves); all_cpu_cache_info[cpu].cache_leaves = NULL; @@ -307,7 +307,7 @@ static void __cpuinit cpu_cache_sysfs_exit(unsigned int cpu) return; } -static int __cpuinit cpu_cache_sysfs_init(unsigned int cpu) +static int cpu_cache_sysfs_init(unsigned int cpu) { unsigned long i, levels, unique_caches; pal_cache_config_info_t cci; @@ -351,7 +351,7 @@ static int __cpuinit cpu_cache_sysfs_init(unsigned int cpu) } /* Add cache interface for CPU device */ -static int __cpuinit cache_add_dev(struct device * sys_dev) +static int cache_add_dev(struct device *sys_dev) { unsigned int cpu = sys_dev->id; unsigned long i, j; @@ -401,7 +401,7 @@ static int __cpuinit cache_add_dev(struct device * sys_dev) } /* Remove cache interface for CPU device */ -static int __cpuinit cache_remove_dev(struct device * sys_dev) +static int cache_remove_dev(struct device *sys_dev) { unsigned int cpu = sys_dev->id; unsigned long i; @@ -425,7 +425,7 @@ static int __cpuinit cache_remove_dev(struct device * sys_dev) * When a cpu is hot-plugged, do a check and initiate * cache kobject if necessary */ -static int __cpuinit cache_cpu_callback(struct notifier_block *nfb, +static int cache_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { unsigned int cpu = (unsigned long)hcpu; @@ -445,7 +445,7 @@ static int __cpuinit cache_cpu_callback(struct notifier_block *nfb, return NOTIFY_OK; } -static struct notifier_block __cpuinitdata cache_cpu_notifier = +static struct notifier_block cache_cpu_notifier = { .notifier_call = cache_cpu_callback }; @@ -454,12 +454,16 @@ static int __init cache_sysfs_init(void) { int i; + cpu_notifier_register_begin(); + for_each_online_cpu(i) { struct device *sys_dev = get_cpu_device((unsigned int)i); cache_add_dev(sys_dev); } - register_hotcpu_notifier(&cache_cpu_notifier); + __register_hotcpu_notifier(&cache_cpu_notifier); + + cpu_notifier_register_done(); return 0; } diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c index f7f9f9c6caf..d3636e67a98 100644 --- a/arch/ia64/kernel/traps.c +++ b/arch/ia64/kernel/traps.c @@ -630,7 +630,7 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa, printk(KERN_ERR " iip - 0x%lx, ifa - 0x%lx, isr - 0x%lx\n", iip, ifa, isr); force_sig(SIGSEGV, current); - break; + return; case 46: printk(KERN_ERR "Unexpected IA-32 intercept trap (Trap 46)\n"); diff --git a/arch/ia64/kernel/uncached.c b/arch/ia64/kernel/uncached.c index a96bcf83a73..20e8a9b21d7 100644 --- a/arch/ia64/kernel/uncached.c +++ b/arch/ia64/kernel/uncached.c @@ -98,7 +98,7 @@ static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid) /* attempt to allocate a granule's worth of cached memory pages */ page = alloc_pages_exact_node(nid, - GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, + GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE, IA64_GRANULE_SHIFT-PAGE_SHIFT); if (!page) { mutex_unlock(&uc_pool->add_chunk_mutex); diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S index 0ccb28fab27..84f8a52ac5a 100644 --- a/arch/ia64/kernel/vmlinux.lds.S +++ b/arch/ia64/kernel/vmlinux.lds.S @@ -182,12 +182,6 @@ SECTIONS { __start_gate_section = .; *(.data..gate) __stop_gate_section = .; -#ifdef CONFIG_XEN - . = ALIGN(PAGE_SIZE); - __xen_start_gate_section = .; - *(.data..gate.xen) - __xen_stop_gate_section = .; -#endif } /* * make sure the gate page doesn't expose |
