diff options
Diffstat (limited to 'arch/x86/kernel')
26 files changed, 250 insertions, 151 deletions
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 098ec84b8c0..f2870920f24 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -431,6 +431,12 @@ static void __cpuinit setup_APIC_timer(void) { struct clock_event_device *levt = &__get_cpu_var(lapic_events); + if (cpu_has(¤t_cpu_data, X86_FEATURE_ARAT)) { + lapic_clockevent.features &= ~CLOCK_EVT_FEAT_C3STOP; + /* Make LAPIC timer preferrable over percpu HPET */ + lapic_clockevent.rating = 150; + } + memcpy(levt, &lapic_clockevent, sizeof(*levt)); levt->cpumask = cpumask_of(smp_processor_id()); diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c index 0014714ea97..306e5e88fb6 100644 --- a/arch/x86/kernel/apic/apic_flat_64.c +++ b/arch/x86/kernel/apic/apic_flat_64.c @@ -212,7 +212,7 @@ struct apic apic_flat = { .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, .wait_for_init_deassert = NULL, .smp_callin_clear_local_apic = NULL, - .inquire_remote_apic = NULL, + .inquire_remote_apic = default_inquire_remote_apic, .read = native_apic_mem_read, .write = native_apic_mem_write, @@ -362,7 +362,7 @@ struct apic apic_physflat = { .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, .wait_for_init_deassert = NULL, .smp_callin_clear_local_apic = NULL, - .inquire_remote_apic = NULL, + .inquire_remote_apic = default_inquire_remote_apic, .read = native_apic_mem_read, .write = native_apic_mem_write, diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 767fe7e46d6..a2789e42e16 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -2524,7 +2524,6 @@ static void irq_complete_move(struct irq_desc **descp) static inline void irq_complete_move(struct irq_desc **descp) {} #endif -#ifdef CONFIG_X86_X2APIC static void __eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg) { int apic, pin; @@ -2558,6 +2557,7 @@ eoi_ioapic_irq(struct irq_desc *desc) spin_unlock_irqrestore(&ioapic_lock, flags); } +#ifdef CONFIG_X86_X2APIC static void ack_x2apic_level(unsigned int irq) { struct irq_desc *desc = irq_to_desc(irq); @@ -2634,6 +2634,9 @@ static void ack_apic_level(unsigned int irq) */ ack_APIC_irq(); + if (irq_remapped(irq)) + eoi_ioapic_irq(desc); + /* Now we can move and renable the irq */ if (unlikely(do_unmask_irq)) { /* Only migrate the irq if the ack has been received. diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c index 1248318436e..de1a50af807 100644 --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c @@ -549,7 +549,8 @@ void __init uv_system_init(void) unsigned long gnode_upper, lowmem_redir_base, lowmem_redir_size; int bytes, nid, cpu, lcpu, pnode, blade, i, j, m_val, n_val; int max_pnode = 0; - unsigned long mmr_base, present; + unsigned long mmr_base, present, paddr; + unsigned short pnode_mask; map_low_mmrs(); @@ -592,6 +593,7 @@ void __init uv_system_init(void) } } + pnode_mask = (1 << n_val) - 1; node_id.v = uv_read_local_mmr(UVH_NODE_ID); gnode_upper = (((unsigned long)node_id.s.node_id) & ~((1 << n_val) - 1)) << m_val; @@ -615,7 +617,7 @@ void __init uv_system_init(void) uv_cpu_hub_info(cpu)->numa_blade_id = blade; uv_cpu_hub_info(cpu)->blade_processor_id = lcpu; uv_cpu_hub_info(cpu)->pnode = pnode; - uv_cpu_hub_info(cpu)->pnode_mask = (1 << n_val) - 1; + uv_cpu_hub_info(cpu)->pnode_mask = pnode_mask; uv_cpu_hub_info(cpu)->gpa_mask = (1 << (m_val + n_val)) - 1; uv_cpu_hub_info(cpu)->gnode_upper = gnode_upper; uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base; @@ -631,6 +633,16 @@ void __init uv_system_init(void) lcpu, blade); } + /* Add blade/pnode info for nodes without cpus */ + for_each_online_node(nid) { + if (uv_node_to_blade[nid] >= 0) + continue; + paddr = node_start_pfn(nid) << PAGE_SHIFT; + pnode = (paddr >> m_val) & pnode_mask; + blade = boot_pnode_to_blade(pnode); + uv_node_to_blade[nid] = blade; + } + map_gru_high(max_pnode); map_mmr_high(max_pnode); map_config_high(max_pnode); diff --git a/arch/x86/kernel/bios_uv.c b/arch/x86/kernel/bios_uv.c index f63882728d9..63a88e1f987 100644 --- a/arch/x86/kernel/bios_uv.c +++ b/arch/x86/kernel/bios_uv.c @@ -182,7 +182,8 @@ void uv_bios_init(void) memcpy(&uv_systab, tab, sizeof(struct uv_systab)); iounmap(tab); - printk(KERN_INFO "EFI UV System Table Revision %d\n", tab->revision); + printk(KERN_INFO "EFI UV System Table Revision %d\n", + uv_systab.revision); } #else /* !CONFIG_EFI */ diff --git a/arch/x86/kernel/cpu/addon_cpuid_features.c b/arch/x86/kernel/cpu/addon_cpuid_features.c index 8220ae69849..c965e521271 100644 --- a/arch/x86/kernel/cpu/addon_cpuid_features.c +++ b/arch/x86/kernel/cpu/addon_cpuid_features.c @@ -31,6 +31,7 @@ void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c) static const struct cpuid_bit __cpuinitconst cpuid_bits[] = { { X86_FEATURE_IDA, CR_EAX, 1, 0x00000006 }, + { X86_FEATURE_ARAT, CR_EAX, 2, 0x00000006 }, { 0, 0, 0, 0 } }; diff --git a/arch/x86/kernel/cpu/cpu_debug.c b/arch/x86/kernel/cpu/cpu_debug.c index 46e29ab96c6..46e29ab96c6 100755..100644 --- a/arch/x86/kernel/cpu/cpu_debug.c +++ b/arch/x86/kernel/cpu/cpu_debug.c diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c index 19f6b9d27e8..ecdb682ab51 100644 --- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c @@ -68,6 +68,7 @@ struct acpi_cpufreq_data { unsigned int max_freq; unsigned int resume; unsigned int cpu_feature; + u64 saved_aperf, saved_mperf; }; static DEFINE_PER_CPU(struct acpi_cpufreq_data *, drv_data); @@ -152,7 +153,8 @@ struct drv_cmd { u32 val; }; -static long do_drv_read(void *_cmd) +/* Called via smp_call_function_single(), on the target CPU */ +static void do_drv_read(void *_cmd) { struct drv_cmd *cmd = _cmd; u32 h; @@ -169,10 +171,10 @@ static long do_drv_read(void *_cmd) default: break; } - return 0; } -static long do_drv_write(void *_cmd) +/* Called via smp_call_function_many(), on the target CPUs */ +static void do_drv_write(void *_cmd) { struct drv_cmd *cmd = _cmd; u32 lo, hi; @@ -191,23 +193,24 @@ static long do_drv_write(void *_cmd) default: break; } - return 0; } static void drv_read(struct drv_cmd *cmd) { cmd->val = 0; - work_on_cpu(cpumask_any(cmd->mask), do_drv_read, cmd); + smp_call_function_single(cpumask_any(cmd->mask), do_drv_read, cmd, 1); } static void drv_write(struct drv_cmd *cmd) { - unsigned int i; + int this_cpu; - for_each_cpu(i, cmd->mask) { - work_on_cpu(i, do_drv_write, cmd); - } + this_cpu = get_cpu(); + if (cpumask_test_cpu(this_cpu, cmd->mask)) + do_drv_write(cmd); + smp_call_function_many(cmd->mask, do_drv_write, cmd, 1); + put_cpu(); } static u32 get_cur_val(const struct cpumask *mask) @@ -241,28 +244,23 @@ static u32 get_cur_val(const struct cpumask *mask) return cmd.val; } -struct perf_cur { +struct perf_pair { union { struct { u32 lo; u32 hi; } split; u64 whole; - } aperf_cur, mperf_cur; + } aperf, mperf; }; - -static long read_measured_perf_ctrs(void *_cur) +/* Called via smp_call_function_single(), on the target CPU */ +static void read_measured_perf_ctrs(void *_cur) { - struct perf_cur *cur = _cur; + struct perf_pair *cur = _cur; - rdmsr(MSR_IA32_APERF, cur->aperf_cur.split.lo, cur->aperf_cur.split.hi); - rdmsr(MSR_IA32_MPERF, cur->mperf_cur.split.lo, cur->mperf_cur.split.hi); - - wrmsr(MSR_IA32_APERF, 0, 0); - wrmsr(MSR_IA32_MPERF, 0, 0); - - return 0; + rdmsr(MSR_IA32_APERF, cur->aperf.split.lo, cur->aperf.split.hi); + rdmsr(MSR_IA32_MPERF, cur->mperf.split.lo, cur->mperf.split.hi); } /* @@ -281,52 +279,57 @@ static long read_measured_perf_ctrs(void *_cur) static unsigned int get_measured_perf(struct cpufreq_policy *policy, unsigned int cpu) { - struct perf_cur cur; + struct perf_pair readin, cur; unsigned int perf_percent; unsigned int retval; - if (!work_on_cpu(cpu, read_measured_perf_ctrs, &cur)) + if (smp_call_function_single(cpu, read_measured_perf_ctrs, &readin, 1)) return 0; + cur.aperf.whole = readin.aperf.whole - + per_cpu(drv_data, cpu)->saved_aperf; + cur.mperf.whole = readin.mperf.whole - + per_cpu(drv_data, cpu)->saved_mperf; + per_cpu(drv_data, cpu)->saved_aperf = readin.aperf.whole; + per_cpu(drv_data, cpu)->saved_mperf = readin.mperf.whole; + #ifdef __i386__ /* * We dont want to do 64 bit divide with 32 bit kernel * Get an approximate value. Return failure in case we cannot get * an approximate value. */ - if (unlikely(cur.aperf_cur.split.hi || cur.mperf_cur.split.hi)) { + if (unlikely(cur.aperf.split.hi || cur.mperf.split.hi)) { int shift_count; u32 h; - h = max_t(u32, cur.aperf_cur.split.hi, cur.mperf_cur.split.hi); + h = max_t(u32, cur.aperf.split.hi, cur.mperf.split.hi); shift_count = fls(h); - cur.aperf_cur.whole >>= shift_count; - cur.mperf_cur.whole >>= shift_count; + cur.aperf.whole >>= shift_count; + cur.mperf.whole >>= shift_count; } - if (((unsigned long)(-1) / 100) < cur.aperf_cur.split.lo) { + if (((unsigned long)(-1) / 100) < cur.aperf.split.lo) { int shift_count = 7; - cur.aperf_cur.split.lo >>= shift_count; - cur.mperf_cur.split.lo >>= shift_count; + cur.aperf.split.lo >>= shift_count; + cur.mperf.split.lo >>= shift_count; } - if (cur.aperf_cur.split.lo && cur.mperf_cur.split.lo) - perf_percent = (cur.aperf_cur.split.lo * 100) / - cur.mperf_cur.split.lo; + if (cur.aperf.split.lo && cur.mperf.split.lo) + perf_percent = (cur.aperf.split.lo * 100) / cur.mperf.split.lo; else perf_percent = 0; #else - if (unlikely(((unsigned long)(-1) / 100) < cur.aperf_cur.whole)) { + if (unlikely(((unsigned long)(-1) / 100) < cur.aperf.whole)) { int shift_count = 7; - cur.aperf_cur.whole >>= shift_count; - cur.mperf_cur.whole >>= shift_count; + cur.aperf.whole >>= shift_count; + cur.mperf.whole >>= shift_count; } - if (cur.aperf_cur.whole && cur.mperf_cur.whole) - perf_percent = (cur.aperf_cur.whole * 100) / - cur.mperf_cur.whole; + if (cur.aperf.whole && cur.mperf.whole) + perf_percent = (cur.aperf.whole * 100) / cur.mperf.whole; else perf_percent = 0; diff --git a/arch/x86/kernel/cpu/cpufreq/longhaul.c b/arch/x86/kernel/cpu/cpufreq/longhaul.c index 0bd48e65a0c..ce2ed3e4aad 100644 --- a/arch/x86/kernel/cpu/cpufreq/longhaul.c +++ b/arch/x86/kernel/cpu/cpufreq/longhaul.c @@ -33,7 +33,6 @@ #include <linux/timex.h> #include <linux/io.h> #include <linux/acpi.h> -#include <linux/kernel.h> #include <asm/msr.h> #include <acpi/processor.h> diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index 61df7753212..18dfa30795c 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c @@ -18,9 +18,10 @@ #include <linux/init.h> #include <linux/list.h> +#include <trace/syscall.h> + #include <asm/cacheflush.h> #include <asm/ftrace.h> -#include <linux/ftrace.h> #include <asm/nops.h> #include <asm/nmi.h> diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index 648b3a2a3a4..3f0019e0a22 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c @@ -722,7 +722,7 @@ static int hpet_cpuhp_notify(struct notifier_block *n, /* * Clock source related code */ -static cycle_t read_hpet(void) +static cycle_t read_hpet(struct clocksource *cs) { return (cycle_t)hpet_readl(HPET_COUNTER); } @@ -756,7 +756,7 @@ static int hpet_clocksource_register(void) hpet_restart_counter(); /* Verify whether hpet counter works */ - t1 = read_hpet(); + t1 = hpet_readl(HPET_COUNTER); rdtscll(start); /* @@ -770,7 +770,7 @@ static int hpet_clocksource_register(void) rdtscll(now); } while ((now - start) < 200000UL); - if (t1 == read_hpet()) { + if (t1 == hpet_readl(HPET_COUNTER)) { printk(KERN_WARNING "HPET counter not counting. HPET disabled\n"); return -ENODEV; diff --git a/arch/x86/kernel/i8253.c b/arch/x86/kernel/i8253.c index 3475440baa5..c2e0bb0890d 100644 --- a/arch/x86/kernel/i8253.c +++ b/arch/x86/kernel/i8253.c @@ -129,7 +129,7 @@ void __init setup_pit_timer(void) * to just read by itself. So use jiffies to emulate a free * running counter: */ -static cycle_t pit_read(void) +static cycle_t pit_read(struct clocksource *cs) { static int old_count; static u32 old_jifs; diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index 3aaf7b9e3a8..c3fe010d74c 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c @@ -65,7 +65,7 @@ static int show_other_interrupts(struct seq_file *p, int prec) seq_printf(p, " Spurious interrupts\n"); #endif if (generic_interrupt_extension) { - seq_printf(p, "PLT: "); + seq_printf(p, "%*s: ", prec, "PLT"); for_each_online_cpu(j) seq_printf(p, "%10u ", irq_stats(j)->generic_irqs); seq_printf(p, " Platform interrupts\n"); diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c index 137f2e8132d..223af43f152 100644 --- a/arch/x86/kernel/kvmclock.c +++ b/arch/x86/kernel/kvmclock.c @@ -77,6 +77,11 @@ static cycle_t kvm_clock_read(void) return ret; } +static cycle_t kvm_clock_get_cycles(struct clocksource *cs) +{ + return kvm_clock_read(); +} + /* * If we don't do that, there is the possibility that the guest * will calibrate under heavy load - thus, getting a lower lpj - @@ -107,7 +112,7 @@ static void kvm_get_preset_lpj(void) static struct clocksource kvm_clock = { .name = "kvm-clock", - .read = kvm_clock_read, + .read = kvm_clock_get_cycles, .rating = 400, .mask = CLOCKSOURCE_MASK(64), .mult = 1 << KVM_SCALE, diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c index a0f3851ef31..98c470c069d 100644 --- a/arch/x86/kernel/microcode_core.c +++ b/arch/x86/kernel/microcode_core.c @@ -108,40 +108,29 @@ struct ucode_cpu_info ucode_cpu_info[NR_CPUS]; EXPORT_SYMBOL_GPL(ucode_cpu_info); #ifdef CONFIG_MICROCODE_OLD_INTERFACE -struct update_for_cpu { - const void __user *buf; - size_t size; -}; - -static long update_for_cpu(void *_ufc) -{ - struct update_for_cpu *ufc = _ufc; - int error; - - error = microcode_ops->request_microcode_user(smp_processor_id(), - ufc->buf, ufc->size); - if (error < 0) - return error; - if (!error) - microcode_ops->apply_microcode(smp_processor_id()); - return error; -} - static int do_microcode_update(const void __user *buf, size_t size) { + cpumask_t old; int error = 0; int cpu; - struct update_for_cpu ufc = { .buf = buf, .size = size }; + + old = current->cpus_allowed; for_each_online_cpu(cpu) { struct ucode_cpu_info *uci = ucode_cpu_info + cpu; if (!uci->valid) continue; - error = work_on_cpu(cpu, update_for_cpu, &ufc); + + set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); + error = microcode_ops->request_microcode_user(cpu, buf, size); if (error < 0) - break; + goto out; + if (!error) + microcode_ops->apply_microcode(cpu); } +out: + set_cpus_allowed_ptr(current, &old); return error; } @@ -391,8 +380,6 @@ static int mc_sysdev_add(struct sys_device *sys_dev) return err; err = microcode_init_cpu(cpu); - if (err) - sysfs_remove_group(&sys_dev->kobj, &mc_attr_group); return err; } diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c index dce99dca6cf..70fd7e414c1 100644 --- a/arch/x86/kernel/mpparse.c +++ b/arch/x86/kernel/mpparse.c @@ -679,7 +679,7 @@ void __init get_smp_config(void) __get_smp_config(0); } -static void smp_reserve_bootmem(struct mpf_intel *mpf) +static void __init smp_reserve_bootmem(struct mpf_intel *mpf) { unsigned long size = get_mpc_size(mpf->physptr); #ifdef CONFIG_X86_32 @@ -838,7 +838,7 @@ static int __init get_MP_intsrc_index(struct mpc_intsrc *m) static struct mpc_intsrc __initdata *m_spare[SPARE_SLOT_NUM]; -static void check_irq_src(struct mpc_intsrc *m, int *nr_m_spare) +static void __init check_irq_src(struct mpc_intsrc *m, int *nr_m_spare) { int i; @@ -866,7 +866,8 @@ static void check_irq_src(struct mpc_intsrc *m, int *nr_m_spare) } } #else /* CONFIG_X86_IO_APIC */ -static inline void check_irq_src(struct mpc_intsrc *m, int *nr_m_spare) {} +static +inline void __init check_irq_src(struct mpc_intsrc *m, int *nr_m_spare) {} #endif /* CONFIG_X86_IO_APIC */ static int check_slot(unsigned long mpc_new_phys, unsigned long mpc_new_length, diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c index 90f5b9ef5de..745579bc825 100644 --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c @@ -40,7 +40,7 @@ EXPORT_SYMBOL(bad_dma_address); to older i386. */ struct device x86_dma_fallback_dev = { .init_name = "fallback device", - .coherent_dma_mask = DMA_32BIT_MASK, + .coherent_dma_mask = DMA_BIT_MASK(32), .dma_mask = &x86_dma_fallback_dev.coherent_dma_mask, }; EXPORT_SYMBOL(x86_dma_fallback_dev); @@ -148,7 +148,7 @@ again: if (!is_buffer_dma_capable(dma_mask, addr, size)) { __free_pages(page, get_order(size)); - if (dma_mask < DMA_32BIT_MASK && !(flag & GFP_DMA)) { + if (dma_mask < DMA_BIT_MASK(32) && !(flag & GFP_DMA)) { flag = (flag & ~GFP_DMA32) | GFP_DMA; goto again; } @@ -243,7 +243,7 @@ int dma_supported(struct device *dev, u64 mask) /* Copied from i386. Doesn't make much sense, because it will only work for pci_alloc_coherent. The caller just has to use GFP_DMA in this case. */ - if (mask < DMA_24BIT_MASK) + if (mask < DMA_BIT_MASK(24)) return 0; /* Tell the device to use SAC when IOMMU force is on. This @@ -258,7 +258,7 @@ int dma_supported(struct device *dev, u64 mask) SAC for these. Assume all masks <= 40 bits are of this type. Normally this doesn't make any difference, but gives more gentle handling of IOMMU overflow. */ - if (iommu_sac_force && (mask >= DMA_40BIT_MASK)) { + if (iommu_sac_force && (mask >= DMA_BIT_MASK(40))) { dev_info(dev, "Force SAC with mask %Lx\n", mask); return 0; } diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c index c6d703b3932..71d412a09f3 100644 --- a/arch/x86/kernel/pci-nommu.c +++ b/arch/x86/kernel/pci-nommu.c @@ -15,7 +15,7 @@ static int check_addr(char *name, struct device *hwdev, dma_addr_t bus, size_t size) { if (hwdev && !is_buffer_dma_capable(*hwdev->dma_mask, bus, size)) { - if (*hwdev->dma_mask >= DMA_32BIT_MASK) + if (*hwdev->dma_mask >= DMA_BIT_MASK(32)) printk(KERN_ERR "nommu_%s: overflow %Lx+%zu of device mask %Lx\n", name, (long long)bus, size, diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c index 34f12e9996e..221a3853e26 100644 --- a/arch/x86/kernel/pci-swiotlb.c +++ b/arch/x86/kernel/pci-swiotlb.c @@ -50,7 +50,7 @@ static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size, return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags); } -struct dma_map_ops swiotlb_dma_ops = { +static struct dma_map_ops swiotlb_dma_ops = { .mapping_error = swiotlb_dma_mapping_error, .alloc_coherent = x86_swiotlb_alloc_coherent, .free_coherent = swiotlb_free_coherent, diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index b32a8ee5338..d5252ae6c52 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c @@ -21,7 +21,6 @@ #include <linux/audit.h> #include <linux/seccomp.h> #include <linux/signal.h> -#include <linux/ftrace.h> #include <linux/workqueue.h> #include <asm/uaccess.h> @@ -36,6 +35,8 @@ #include <asm/proto.h> #include <asm/ds.h> +#include <trace/syscall.h> + #include "tls.h" enum x86_regset { diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 2aef36d8aca..1340dad417f 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c @@ -224,6 +224,14 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "Dell XPS710"), }, }, + { /* Handle problems with rebooting on Dell DXP061 */ + .callback = set_bios_reboot, + .ident = "Dell DXP061", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Dell DXP061"), + }, + }, { } }; diff --git a/arch/x86/kernel/tlb_uv.c b/arch/x86/kernel/tlb_uv.c index deb5ebb32c3..ed0c33761e6 100644 --- a/arch/x86/kernel/tlb_uv.c +++ b/arch/x86/kernel/tlb_uv.c @@ -25,6 +25,8 @@ static int uv_bau_retry_limit __read_mostly; /* position of pnode (which is nasid>>1): */ static int uv_nshift __read_mostly; +/* base pnode in this partition */ +static int uv_partition_base_pnode __read_mostly; static unsigned long uv_mmask __read_mostly; @@ -32,6 +34,34 @@ static DEFINE_PER_CPU(struct ptc_stats, ptcstats); static DEFINE_PER_CPU(struct bau_control, bau_control); /* + * Determine the first node on a blade. + */ +static int __init blade_to_first_node(int blade) +{ + int node, b; + + for_each_online_node(node) { + b = uv_node_to_blade_id(node); + if (blade == b) + return node; + } + return -1; /* shouldn't happen */ +} + +/* + * Determine the apicid of the first cpu on a blade. + */ +static int __init blade_to_first_apicid(int blade) +{ + int cpu; + + for_each_present_cpu(cpu) + if (blade == uv_cpu_to_blade_id(cpu)) + return per_cpu(x86_cpu_to_apicid, cpu); + return -1; +} + +/* * Free a software acknowledge hardware resource by clearing its Pending * bit. This will return a reply to the sender. * If the message has timed out, a reply has already been sent by the @@ -67,7 +97,7 @@ static void uv_bau_process_message(struct bau_payload_queue_entry *msg, msp = __get_cpu_var(bau_control).msg_statuses + msg_slot; cpu = uv_blade_processor_id(); msg->number_of_cpus = - uv_blade_nr_online_cpus(uv_node_to_blade_id(numa_node_id())); + uv_blade_nr_online_cpus(uv_node_to_blade_id(numa_node_id())); this_cpu_mask = 1UL << cpu; if (msp->seen_by.bits & this_cpu_mask) return; @@ -215,14 +245,14 @@ static int uv_wait_completion(struct bau_desc *bau_desc, * Returns @flush_mask if some remote flushing remains to be done. The * mask will have some bits still set. */ -const struct cpumask *uv_flush_send_and_wait(int cpu, int this_blade, +const struct cpumask *uv_flush_send_and_wait(int cpu, int this_pnode, struct bau_desc *bau_desc, struct cpumask *flush_mask) { int completion_status = 0; int right_shift; int tries = 0; - int blade; + int pnode; int bit; unsigned long mmr_offset; unsigned long index; @@ -265,8 +295,8 @@ const struct cpumask *uv_flush_send_and_wait(int cpu, int this_blade, * use the IPI method of shootdown on them. */ for_each_cpu(bit, flush_mask) { - blade = uv_cpu_to_blade_id(bit); - if (blade == this_blade) + pnode = uv_cpu_to_pnode(bit); + if (pnode == this_pnode) continue; cpumask_clear_cpu(bit, flush_mask); } @@ -309,16 +339,16 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, struct cpumask *flush_mask = __get_cpu_var(uv_flush_tlb_mask); int i; int bit; - int blade; + int pnode; int uv_cpu; - int this_blade; + int this_pnode; int locals = 0; struct bau_desc *bau_desc; cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu)); uv_cpu = uv_blade_processor_id(); - this_blade = uv_numa_blade_id(); + this_pnode = uv_hub_info->pnode; bau_desc = __get_cpu_var(bau_control).descriptor_base; bau_desc += UV_ITEMS_PER_DESCRIPTOR * uv_cpu; @@ -326,13 +356,14 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, i = 0; for_each_cpu(bit, flush_mask) { - blade = uv_cpu_to_blade_id(bit); - BUG_ON(blade > (UV_DISTRIBUTION_SIZE - 1)); - if (blade == this_blade) { + pnode = uv_cpu_to_pnode(bit); + BUG_ON(pnode > (UV_DISTRIBUTION_SIZE - 1)); + if (pnode == this_pnode) { locals++; continue; } - bau_node_set(blade, &bau_desc->distribution); + bau_node_set(pnode - uv_partition_base_pnode, + &bau_desc->distribution); i++; } if (i == 0) { @@ -350,7 +381,7 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, bau_desc->payload.address = va; bau_desc->payload.sending_cpu = cpu; - return uv_flush_send_and_wait(uv_cpu, this_blade, bau_desc, flush_mask); + return uv_flush_send_and_wait(uv_cpu, this_pnode, bau_desc, flush_mask); } /* @@ -418,24 +449,58 @@ void uv_bau_message_interrupt(struct pt_regs *regs) set_irq_regs(old_regs); } +/* + * uv_enable_timeouts + * + * Each target blade (i.e. blades that have cpu's) needs to have + * shootdown message timeouts enabled. The timeout does not cause + * an interrupt, but causes an error message to be returned to + * the sender. + */ static void uv_enable_timeouts(void) { - int i; int blade; - int last_blade; + int nblades; int pnode; - int cur_cpu = 0; - unsigned long apicid; + unsigned long mmr_image; - last_blade = -1; - for_each_online_node(i) { - blade = uv_node_to_blade_id(i); - if (blade == last_blade) + nblades = uv_num_possible_blades(); + + for (blade = 0; blade < nblades; blade++) { + if (!uv_blade_nr_possible_cpus(blade)) continue; - last_blade = blade; - apicid = per_cpu(x86_cpu_to_apicid, cur_cpu); + pnode = uv_blade_to_pnode(blade); - cur_cpu += uv_blade_nr_possible_cpus(i); + mmr_image = + uv_read_global_mmr64(pnode, UVH_LB_BAU_MISC_CONTROL); + /* + * Set the timeout period and then lock it in, in three + * steps; captures and locks in the period. + * + * To program the period, the SOFT_ACK_MODE must be off. + */ + mmr_image &= ~((unsigned long)1 << + UV_ENABLE_INTD_SOFT_ACK_MODE_SHIFT); + uv_write_global_mmr64 + (pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image); + /* + * Set the 4-bit period. + */ + mmr_image &= ~((unsigned long)0xf << + UV_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHIFT); + mmr_image |= (UV_INTD_SOFT_ACK_TIMEOUT_PERIOD << + UV_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHIFT); + uv_write_global_mmr64 + (pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image); + /* + * Subsequent reversals of the timebase bit (3) cause an + * immediate timeout of one or all INTD resources as + * indicated in bits 2:0 (7 causes all of them to timeout). + */ + mmr_image |= ((unsigned long)1 << + UV_ENABLE_INTD_SOFT_ACK_MODE_SHIFT); + uv_write_global_mmr64 + (pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image); } } @@ -482,8 +547,7 @@ static int uv_ptc_seq_show(struct seq_file *file, void *data) stat->requestee, stat->onetlb, stat->alltlb, stat->s_retry, stat->d_retry, stat->ptc_i); seq_printf(file, "%lx %ld %ld %ld %ld %ld %ld\n", - uv_read_global_mmr64(uv_blade_to_pnode - (uv_cpu_to_blade_id(cpu)), + uv_read_global_mmr64(uv_cpu_to_pnode(cpu), UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE), stat->sflush, stat->dflush, stat->retriesok, stat->nomsg, @@ -617,16 +681,18 @@ static struct bau_control * __init uv_table_bases_init(int blade, int node) * finish the initialization of the per-blade control structures */ static void __init -uv_table_bases_finish(int blade, int node, int cur_cpu, +uv_table_bases_finish(int blade, struct bau_control *bau_tablesp, struct bau_desc *adp) { struct bau_control *bcp; - int i; + int cpu; - for (i = cur_cpu; i < cur_cpu + uv_blade_nr_possible_cpus(blade); i++) { - bcp = (struct bau_control *)&per_cpu(bau_control, i); + for_each_present_cpu(cpu) { + if (blade != uv_cpu_to_blade_id(cpu)) + continue; + bcp = (struct bau_control *)&per_cpu(bau_control, cpu); bcp->bau_msg_head = bau_tablesp->va_queue_first; bcp->va_queue_first = bau_tablesp->va_queue_first; bcp->va_queue_last = bau_tablesp->va_queue_last; @@ -649,11 +715,10 @@ uv_activation_descriptor_init(int node, int pnode) struct bau_desc *adp; struct bau_desc *ad2; - adp = (struct bau_desc *) - kmalloc_node(16384, GFP_KERNEL, node); + adp = (struct bau_desc *)kmalloc_node(16384, GFP_KERNEL, node); BUG_ON(!adp); - pa = __pa((unsigned long)adp); + pa = uv_gpa(adp); /* need the real nasid*/ n = pa >> uv_nshift; m = pa & uv_mmask; @@ -667,8 +732,12 @@ uv_activation_descriptor_init(int node, int pnode) for (i = 0, ad2 = adp; i < UV_ACTIVATION_DESCRIPTOR_SIZE; i++, ad2++) { memset(ad2, 0, sizeof(struct bau_desc)); ad2->header.sw_ack_flag = 1; - ad2->header.base_dest_nodeid = - uv_blade_to_pnode(uv_cpu_to_blade_id(0)); + /* + * base_dest_nodeid is the first node in the partition, so + * the bit map will indicate partition-relative node numbers. + * note that base_dest_nodeid is actually a nasid. + */ + ad2->header.base_dest_nodeid = uv_partition_base_pnode << 1; ad2->header.command = UV_NET_ENDPOINT_INTD; ad2->header.int_both = 1; /* @@ -686,6 +755,8 @@ static struct bau_payload_queue_entry * __init uv_payload_queue_init(int node, int pnode, struct bau_control *bau_tablesp) { struct bau_payload_queue_entry *pqp; + unsigned long pa; + int pn; char *cp; pqp = (struct bau_payload_queue_entry *) kmalloc_node( @@ -696,10 +767,14 @@ uv_payload_queue_init(int node, int pnode, struct bau_control *bau_tablesp) cp = (char *)pqp + 31; pqp = (struct bau_payload_queue_entry *)(((unsigned long)cp >> 5) << 5); bau_tablesp->va_queue_first = pqp; + /* + * need the pnode of where the memory was really allocated + */ + pa = uv_gpa(pqp); + pn = pa >> uv_nshift; uv_write_global_mmr64(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST, - ((unsigned long)pnode << - UV_PAYLOADQ_PNODE_SHIFT) | + ((unsigned long)pn << UV_PAYLOADQ_PNODE_SHIFT) | uv_physnodeaddr(pqp)); uv_write_global_mmr64(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL, uv_physnodeaddr(pqp)); @@ -715,8 +790,9 @@ uv_payload_queue_init(int node, int pnode, struct bau_control *bau_tablesp) /* * Initialization of each UV blade's structures */ -static int __init uv_init_blade(int blade, int node, int cur_cpu) +static int __init uv_init_blade(int blade) { + int node; int pnode; unsigned long pa; unsigned long apicid; @@ -724,16 +800,17 @@ static int __init uv_init_blade(int blade, int node, int cur_cpu) struct bau_payload_queue_entry *pqp; struct bau_control *bau_tablesp; + node = blade_to_first_node(blade); bau_tablesp = uv_table_bases_init(blade, node); pnode = uv_blade_to_pnode(blade); adp = uv_activation_descriptor_init(node, pnode); pqp = uv_payload_queue_init(node, pnode, bau_tablesp); - uv_table_bases_finish(blade, node, cur_cpu, bau_tablesp, adp); + uv_table_bases_finish(blade, bau_tablesp, adp); /* * the below initialization can't be in firmware because the * messaging IRQ will be determined by the OS */ - apicid = per_cpu(x86_cpu_to_apicid, cur_cpu); + apicid = blade_to_first_apicid(blade); pa = uv_read_global_mmr64(pnode, UVH_BAU_DATA_CONFIG); if ((pa & 0xff) != UV_BAU_MESSAGE) { uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG, @@ -748,9 +825,7 @@ static int __init uv_init_blade(int blade, int node, int cur_cpu) static int __init uv_bau_init(void) { int blade; - int node; int nblades; - int last_blade; int cur_cpu; if (!is_uv_system()) @@ -763,29 +838,21 @@ static int __init uv_bau_init(void) uv_bau_retry_limit = 1; uv_nshift = uv_hub_info->n_val; uv_mmask = (1UL << uv_hub_info->n_val) - 1; - nblades = 0; - last_blade = -1; - cur_cpu = 0; - for_each_online_node(node) { - blade = uv_node_to_blade_id(node); - if (blade == last_blade) - continue; - last_blade = blade; - nblades++; - } + nblades = uv_num_possible_blades(); + uv_bau_table_bases = (struct bau_control **) kmalloc(nblades * sizeof(struct bau_control *), GFP_KERNEL); BUG_ON(!uv_bau_table_bases); - last_blade = -1; - for_each_online_node(node) { - blade = uv_node_to_blade_id(node); - if (blade == last_blade) - continue; - last_blade = blade; - uv_init_blade(blade, node, cur_cpu); - cur_cpu += uv_blade_nr_possible_cpus(blade); - } + uv_partition_base_pnode = 0x7fffffff; + for (blade = 0; blade < nblades; blade++) + if (uv_blade_nr_possible_cpus(blade) && + (uv_blade_to_pnode(blade) < uv_partition_base_pnode)) + uv_partition_base_pnode = uv_blade_to_pnode(blade); + for (blade = 0; blade < nblades; blade++) + if (uv_blade_nr_possible_cpus(blade)) + uv_init_blade(blade); + alloc_intr_gate(UV_BAU_MESSAGE, uv_bau_message_intr1); uv_enable_timeouts(); diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 7a567ebe636..d57de05dc43 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -699,7 +699,7 @@ static struct clocksource clocksource_tsc; * code, which is necessary to support wrapping clocksources like pm * timer. */ -static cycle_t read_tsc(void) +static cycle_t read_tsc(struct clocksource *cs) { cycle_t ret = (cycle_t)get_cycles(); diff --git a/arch/x86/kernel/uv_sysfs.c b/arch/x86/kernel/uv_sysfs.c index 67f9b9dbf80..36afb98675a 100644 --- a/arch/x86/kernel/uv_sysfs.c +++ b/arch/x86/kernel/uv_sysfs.c @@ -21,6 +21,7 @@ #include <linux/sysdev.h> #include <asm/uv/bios.h> +#include <asm/uv/uv.h> struct kobject *sgi_uv_kobj; @@ -47,6 +48,9 @@ static int __init sgi_uv_sysfs_init(void) { unsigned long ret; + if (!is_uv_system()) + return -ENODEV; + if (!sgi_uv_kobj) sgi_uv_kobj = kobject_create_and_add("sgi_uv", firmware_kobj); if (!sgi_uv_kobj) { diff --git a/arch/x86/kernel/vmiclock_32.c b/arch/x86/kernel/vmiclock_32.c index d303369a7ba..2b3eb82efee 100644 --- a/arch/x86/kernel/vmiclock_32.c +++ b/arch/x86/kernel/vmiclock_32.c @@ -283,7 +283,7 @@ void __devinit vmi_time_ap_init(void) /** vmi clocksource */ static struct clocksource clocksource_vmi; -static cycle_t read_real_cycles(void) +static cycle_t read_real_cycles(struct clocksource *cs) { cycle_t ret = (cycle_t)vmi_timer_ops.get_cycle_counter(VMI_CYCLES_REAL); return max(ret, clocksource_vmi.cycle_last); diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c index 2b54fe002e9..0a5b04aa98f 100644 --- a/arch/x86/kernel/xsave.c +++ b/arch/x86/kernel/xsave.c @@ -324,7 +324,7 @@ void __ref xsave_cntxt_init(void) } /* - * for now OS knows only about FP/SSE + * Support only the state known to OS. */ pcntxt_mask = pcntxt_mask & XCNTXT_MASK; xsave_init(); |