aboutsummaryrefslogtreecommitdiff
path: root/arch/x86
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/Kconfig.debug19
-rw-r--r--arch/x86/include/asm/apic.h2
-rw-r--r--arch/x86/include/asm/entry_arch.h9
-rw-r--r--arch/x86/include/asm/irq_vectors.h11
-rw-r--r--arch/x86/include/asm/paravirt.h5
-rw-r--r--arch/x86/include/asm/paravirt_types.h3
-rw-r--r--arch/x86/include/asm/percpu.h17
-rw-r--r--arch/x86/include/asm/processor.h13
-rw-r--r--arch/x86/include/asm/smp.h16
-rw-r--r--arch/x86/include/asm/tlb.h9
-rw-r--r--arch/x86/include/asm/tlbflush.h49
-rw-r--r--arch/x86/include/asm/uv/uv.h5
-rw-r--r--arch/x86/kernel/apic/apic.c6
-rw-r--r--arch/x86/kernel/cpu/common.c31
-rw-r--r--arch/x86/kernel/cpu/cpu.h9
-rw-r--r--arch/x86/kernel/cpu/intel.c176
-rw-r--r--arch/x86/kernel/entry_64.S18
-rw-r--r--arch/x86/kernel/irqinit.c73
-rw-r--r--arch/x86/kernel/setup_percpu.c2
-rw-r--r--arch/x86/kernel/smpboot.c8
-rw-r--r--arch/x86/mm/tlb.c401
-rw-r--r--arch/x86/platform/uv/tlb_uv.c6
-rw-r--r--arch/x86/xen/mmu.c12
23 files changed, 541 insertions, 359 deletions
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
index e46c2147397..b322f124ee3 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -129,6 +129,25 @@ config DOUBLEFAULT
option saves about 4k and might cause you much additional grey
hair.
+config DEBUG_TLBFLUSH
+ bool "Set upper limit of TLB entries to flush one-by-one"
+ depends on DEBUG_KERNEL && (X86_64 || X86_INVLPG)
+ ---help---
+
+ X86-only for now.
+
+ This option allows the user to tune the amount of TLB entries the
+ kernel flushes one-by-one instead of doing a full TLB flush. In
+ certain situations, the former is cheaper. This is controlled by the
+ tlb_flushall_shift knob under /sys/kernel/debug/x86. If you set it
+ to -1, the code flushes the whole TLB unconditionally. Otherwise,
+ for positive values of it, the kernel will use single TLB entry
+ invalidating instructions according to the following formula:
+
+ flush_entries <= active_tlb_entries / 2^tlb_flushall_shift
+
+ If in doubt, say "N".
+
config IOMMU_DEBUG
bool "Enable IOMMU debugging"
depends on GART_IOMMU && DEBUG_KERNEL
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index 3ea51a84a0e..f34261296ff 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -546,7 +546,7 @@ static inline const struct cpumask *online_target_cpus(void)
return cpu_online_mask;
}
-DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);
+DECLARE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_bios_cpu_apicid);
static inline unsigned int read_apic_id(void)
diff --git a/arch/x86/include/asm/entry_arch.h b/arch/x86/include/asm/entry_arch.h
index 0baa628e330..40afa0005c6 100644
--- a/arch/x86/include/asm/entry_arch.h
+++ b/arch/x86/include/asm/entry_arch.h
@@ -15,15 +15,6 @@ BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR)
BUILD_INTERRUPT(call_function_single_interrupt,CALL_FUNCTION_SINGLE_VECTOR)
BUILD_INTERRUPT(irq_move_cleanup_interrupt,IRQ_MOVE_CLEANUP_VECTOR)
BUILD_INTERRUPT(reboot_interrupt,REBOOT_VECTOR)
-
-.irp idx,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15, \
- 16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
-.if NUM_INVALIDATE_TLB_VECTORS > \idx
-BUILD_INTERRUPT3(invalidate_interrupt\idx,
- (INVALIDATE_TLB_VECTOR_START)+\idx,
- smp_invalidate_interrupt)
-.endif
-.endr
#endif
BUILD_INTERRUPT(x86_platform_ipi, X86_PLATFORM_IPI_VECTOR)
diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h
index 4b4448761e8..1508e518c7e 100644
--- a/arch/x86/include/asm/irq_vectors.h
+++ b/arch/x86/include/asm/irq_vectors.h
@@ -119,17 +119,6 @@
*/
#define LOCAL_TIMER_VECTOR 0xef
-/* up to 32 vectors used for spreading out TLB flushes: */
-#if NR_CPUS <= 32
-# define NUM_INVALIDATE_TLB_VECTORS (NR_CPUS)
-#else
-# define NUM_INVALIDATE_TLB_VECTORS (32)
-#endif
-
-#define INVALIDATE_TLB_VECTOR_END (0xee)
-#define INVALIDATE_TLB_VECTOR_START \
- (INVALIDATE_TLB_VECTOR_END-NUM_INVALIDATE_TLB_VECTORS+1)
-
#define NR_VECTORS 256
#define FPU_IRQ 13
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index 0b47ddb6f00..a0facf3908d 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -360,9 +360,10 @@ static inline void __flush_tlb_single(unsigned long addr)
static inline void flush_tlb_others(const struct cpumask *cpumask,
struct mm_struct *mm,
- unsigned long va)
+ unsigned long start,
+ unsigned long end)
{
- PVOP_VCALL3(pv_mmu_ops.flush_tlb_others, cpumask, mm, va);
+ PVOP_VCALL4(pv_mmu_ops.flush_tlb_others, cpumask, mm, start, end);
}
static inline int paravirt_pgd_alloc(struct mm_struct *mm)
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index 8613cbb7ba4..142236ed83a 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -248,7 +248,8 @@ struct pv_mmu_ops {
void (*flush_tlb_single)(unsigned long addr);
void (*flush_tlb_others)(const struct cpumask *cpus,
struct mm_struct *mm,
- unsigned long va);
+ unsigned long start,
+ unsigned long end);
/* Hooks for allocating and freeing a pagetable top-level */
int (*pgd_alloc)(struct mm_struct *mm);
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index d9b8e3f7f42..1104afaba52 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -551,6 +551,12 @@ DECLARE_PER_CPU(unsigned long, this_cpu_off);
{ [0 ... NR_CPUS-1] = _initvalue }; \
__typeof__(_type) *_name##_early_ptr __refdata = _name##_early_map
+#define DEFINE_EARLY_PER_CPU_READ_MOSTLY(_type, _name, _initvalue) \
+ DEFINE_PER_CPU_READ_MOSTLY(_type, _name) = _initvalue; \
+ __typeof__(_type) _name##_early_map[NR_CPUS] __initdata = \
+ { [0 ... NR_CPUS-1] = _initvalue }; \
+ __typeof__(_type) *_name##_early_ptr __refdata = _name##_early_map
+
#define EXPORT_EARLY_PER_CPU_SYMBOL(_name) \
EXPORT_PER_CPU_SYMBOL(_name)
@@ -559,6 +565,11 @@ DECLARE_PER_CPU(unsigned long, this_cpu_off);
extern __typeof__(_type) *_name##_early_ptr; \
extern __typeof__(_type) _name##_early_map[]
+#define DECLARE_EARLY_PER_CPU_READ_MOSTLY(_type, _name) \
+ DECLARE_PER_CPU_READ_MOSTLY(_type, _name); \
+ extern __typeof__(_type) *_name##_early_ptr; \
+ extern __typeof__(_type) _name##_early_map[]
+
#define early_per_cpu_ptr(_name) (_name##_early_ptr)
#define early_per_cpu_map(_name, _idx) (_name##_early_map[_idx])
#define early_per_cpu(_name, _cpu) \
@@ -570,12 +581,18 @@ DECLARE_PER_CPU(unsigned long, this_cpu_off);
#define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \
DEFINE_PER_CPU(_type, _name) = _initvalue
+#define DEFINE_EARLY_PER_CPU_READ_MOSTLY(_type, _name, _initvalue) \
+ DEFINE_PER_CPU_READ_MOSTLY(_type, _name) = _initvalue
+
#define EXPORT_EARLY_PER_CPU_SYMBOL(_name) \
EXPORT_PER_CPU_SYMBOL(_name)
#define DECLARE_EARLY_PER_CPU(_type, _name) \
DECLARE_PER_CPU(_type, _name)
+#define DECLARE_EARLY_PER_CPU_READ_MOSTLY(_type, _name) \
+ DECLARE_PER_CPU_READ_MOSTLY(_type, _name)
+
#define early_per_cpu(_name, _cpu) per_cpu(_name, _cpu)
#define early_per_cpu_ptr(_name) NULL
/* no early_per_cpu_map() */
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 39bc5777211..d048cad9bca 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -61,6 +61,19 @@ static inline void *current_text_addr(void)
# define ARCH_MIN_MMSTRUCT_ALIGN 0
#endif
+enum tlb_infos {
+ ENTRIES,
+ NR_INFO
+};
+
+extern u16 __read_mostly tlb_lli_4k[NR_INFO];
+extern u16 __read_mostly tlb_lli_2m[NR_INFO];
+extern u16 __read_mostly tlb_lli_4m[NR_INFO];
+extern u16 __read_mostly tlb_lld_4k[NR_INFO];
+extern u16 __read_mostly tlb_lld_2m[NR_INFO];
+extern u16 __read_mostly tlb_lld_4m[NR_INFO];
+extern s8 __read_mostly tlb_flushall_shift;
+
/*
* CPU type and hardware bug flags. Kept separately for each CPU.
* Members of this structure are referenced in head.S, so think twice
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index 2ffa95dc233..4f19a152603 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -31,12 +31,12 @@ static inline bool cpu_has_ht_siblings(void)
return has_siblings;
}
-DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map);
-DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
+DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_sibling_map);
+DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
/* cpus sharing the last level cache: */
-DECLARE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
-DECLARE_PER_CPU(u16, cpu_llc_id);
-DECLARE_PER_CPU(int, cpu_number);
+DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
+DECLARE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id);
+DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
static inline struct cpumask *cpu_sibling_mask(int cpu)
{
@@ -53,10 +53,10 @@ static inline struct cpumask *cpu_llc_shared_mask(int cpu)
return per_cpu(cpu_llc_shared_map, cpu);
}
-DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid);
-DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);
+DECLARE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_cpu_to_apicid);
+DECLARE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_bios_cpu_apicid);
#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32)
-DECLARE_EARLY_PER_CPU(int, x86_cpu_to_logical_apicid);
+DECLARE_EARLY_PER_CPU_READ_MOSTLY(int, x86_cpu_to_logical_apicid);
#endif
/* Static state in head.S used to set up a CPU */
diff --git a/arch/x86/include/asm/tlb.h b/arch/x86/include/asm/tlb.h
index 829215fef9e..4fef20773b8 100644
--- a/arch/x86/include/asm/tlb.h
+++ b/arch/x86/include/asm/tlb.h
@@ -4,7 +4,14 @@
#define tlb_start_vma(tlb, vma) do { } while (0)
#define tlb_end_vma(tlb, vma) do { } while (0)
#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
-#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
+
+#define tlb_flush(tlb) \
+{ \
+ if (tlb->fullmm == 0) \
+ flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end, 0UL); \
+ else \
+ flush_tlb_mm_range(tlb->mm, 0UL, TLB_FLUSH_ALL, 0UL); \
+}
#include <asm-generic/tlb.h>
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index 36a1a2ab87d..74a44333545 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -73,14 +73,10 @@ static inline void __flush_tlb_one(unsigned long addr)
* - flush_tlb_page(vma, vmaddr) flushes one page
* - flush_tlb_range(vma, start, end) flushes a range of pages
* - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
- * - flush_tlb_others(cpumask, mm, va) flushes TLBs on other cpus
+ * - flush_tlb_others(cpumask, mm, start, end) flushes TLBs on other cpus
*
* ..but the i386 has somewhat limited tlb flushing capabilities,
* and page-granular flushes are available only on i486 and up.
- *
- * x86-64 can only flush individual pages or full VMs. For a range flush
- * we always do the full VM. Might be worth trying if for a small
- * range a few INVLPGs in a row are a win.
*/
#ifndef CONFIG_SMP
@@ -109,9 +105,17 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
__flush_tlb();
}
+static inline void flush_tlb_mm_range(struct mm_struct *mm,
+ unsigned long start, unsigned long end, unsigned long vmflag)
+{
+ if (mm == current->active_mm)
+ __flush_tlb();
+}
+
static inline void native_flush_tlb_others(const struct cpumask *cpumask,
struct mm_struct *mm,
- unsigned long va)
+ unsigned long start,
+ unsigned long end)
{
}
@@ -119,27 +123,35 @@ static inline void reset_lazy_tlbstate(void)
{
}
+static inline void flush_tlb_kernel_range(unsigned long start,
+ unsigned long end)
+{
+ flush_tlb_all();
+}
+
#else /* SMP */
#include <asm/smp.h>
#define local_flush_tlb() __flush_tlb()
+#define flush_tlb_mm(mm) flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL)
+
+#define flush_tlb_range(vma, start, end) \
+ flush_tlb_mm_range(vma->vm_mm, start, end, vma->vm_flags)
+
extern void flush_tlb_all(void);
extern void flush_tlb_current_task(void);
-extern void flush_tlb_mm(struct mm_struct *);
extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
+extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
+ unsigned long end, unsigned long vmflag);
+extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
#define flush_tlb() flush_tlb_current_task()
-static inline void flush_tlb_range(struct vm_area_struct *vma,
- unsigned long start, unsigned long end)
-{
- flush_tlb_mm(vma->vm_mm);
-}
-
void native_flush_tlb_others(const struct cpumask *cpumask,
- struct mm_struct *mm, unsigned long va);
+ struct mm_struct *mm,
+ unsigned long start, unsigned long end);
#define TLBSTATE_OK 1
#define TLBSTATE_LAZY 2
@@ -159,13 +171,8 @@ static inline void reset_lazy_tlbstate(void)
#endif /* SMP */
#ifndef CONFIG_PARAVIRT
-#define flush_tlb_others(mask, mm, va) native_flush_tlb_others(mask, mm, va)
+#define flush_tlb_others(mask, mm, start, end) \
+ native_flush_tlb_others(mask, mm, start, end)
#endif
-static inline void flush_tlb_kernel_range(unsigned long start,
- unsigned long end)
-{
- flush_tlb_all();
-}
-
#endif /* _ASM_X86_TLBFLUSH_H */
diff --git a/arch/x86/include/asm/uv/uv.h b/arch/x86/include/asm/uv/uv.h
index 3bb9491b765..b47c2a82ff1 100644
--- a/arch/x86/include/asm/uv/uv.h
+++ b/arch/x86/include/asm/uv/uv.h
@@ -15,7 +15,8 @@ extern void uv_nmi_init(void);
extern void uv_system_init(void);
extern const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
struct mm_struct *mm,
- unsigned long va,
+ unsigned long start,
+ unsigned end,
unsigned int cpu);
#else /* X86_UV */
@@ -26,7 +27,7 @@ static inline void uv_cpu_init(void) { }
static inline void uv_system_init(void) { }
static inline const struct cpumask *
uv_flush_tlb_others(const struct cpumask *cpumask, struct mm_struct *mm,
- unsigned long va, unsigned int cpu)
+ unsigned long start, unsigned long end, unsigned int cpu)
{ return cpumask; }
#endif /* X86_UV */
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 98e24131ff3..24deb308232 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -75,8 +75,8 @@ physid_mask_t phys_cpu_present_map;
/*
* Map cpu index to physical APIC ID
*/
-DEFINE_EARLY_PER_CPU(u16, x86_cpu_to_apicid, BAD_APICID);
-DEFINE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid, BAD_APICID);
+DEFINE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_cpu_to_apicid, BAD_APICID);
+DEFINE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_bios_cpu_apicid, BAD_APICID);
EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid);
EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid);
@@ -88,7 +88,7 @@ EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid);
* used for the mapping. This is where the behaviors of x86_64 and 32
* actually diverge. Let's keep it ugly for now.
*/
-DEFINE_EARLY_PER_CPU(int, x86_cpu_to_logical_apicid, BAD_APICID);
+DEFINE_EARLY_PER_CPU_READ_MOSTLY(int, x86_cpu_to_logical_apicid, BAD_APICID);
/*
* Knob to control our willingness to enable the local APIC.
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 5bbc082c47a..46d8786d655 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -452,6 +452,35 @@ void __cpuinit cpu_detect_cache_sizes(struct cpuinfo_x86 *c)
c->x86_cache_size = l2size;
}
+u16 __read_mostly tlb_lli_4k[NR_INFO];
+u16 __read_mostly tlb_lli_2m[NR_INFO];
+u16 __read_mostly tlb_lli_4m[NR_INFO];
+u16 __read_mostly tlb_lld_4k[NR_INFO];
+u16 __read_mostly tlb_lld_2m[NR_INFO];
+u16 __read_mostly tlb_lld_4m[NR_INFO];
+
+/*
+ * tlb_flushall_shift shows the balance point in replacing cr3 write
+ * with multiple 'invlpg'. It will do this replacement when
+ * flush_tlb_lines <= active_lines/2^tlb_flushall_shift.
+ * If tlb_flushall_shift is -1, means the replacement will be disabled.
+ */
+s8 __read_mostly tlb_flushall_shift = -1;
+
+void __cpuinit cpu_detect_tlb(struct cpuinfo_x86 *c)
+{
+ if (this_cpu->c_detect_tlb)
+ this_cpu->c_detect_tlb(c);
+
+ printk(KERN_INFO "Last level iTLB entries: 4KB %d, 2MB %d, 4MB %d\n" \
+ "Last level dTLB entries: 4KB %d, 2MB %d, 4MB %d\n" \
+ "tlb_flushall_shift is 0x%x\n",
+ tlb_lli_4k[ENTRIES], tlb_lli_2m[ENTRIES],
+ tlb_lli_4m[ENTRIES], tlb_lld_4k[ENTRIES],
+ tlb_lld_2m[ENTRIES], tlb_lld_4m[ENTRIES],
+ tlb_flushall_shift);
+}
+
void __cpuinit detect_ht(struct cpuinfo_x86 *c)
{
#ifdef CONFIG_X86_HT
@@ -911,6 +940,8 @@ void __init identify_boot_cpu(void)
#else
vgetcpu_set_mode();
#endif
+ if (boot_cpu_data.cpuid_level >= 2)
+ cpu_detect_tlb(&boot_cpu_data);
}
void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c)
diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h
index 8bacc7826fb..4041c24ae7d 100644
--- a/arch/x86/kernel/cpu/cpu.h
+++ b/arch/x86/kernel/cpu/cpu.h
@@ -20,10 +20,19 @@ struct cpu_dev {
void (*c_bsp_init)(struct cpuinfo_x86 *);
void (*c_init)(struct cpuinfo_x86 *);
void (*c_identify)(struct cpuinfo_x86 *);
+ void (*c_detect_tlb)(struct cpuinfo_x86 *);
unsigned int (*c_size_cache)(struct cpuinfo_x86 *, unsigned int);
int c_x86_vendor;
};
+struct _tlb_table {
+ unsigned char descriptor;
+ char tlb_type;
+ unsigned int entries;
+ /* unsigned int ways; */
+ char info[128];
+};
+
#define cpu_dev_register(cpu_devX) \
static const struct cpu_dev *const __cpu_dev_##cpu_devX __used \
__attribute__((__section__(".x86_cpu_dev.init"))) = \
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 3e6ff6cbf42..0a4ce2980a5 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -491,6 +491,181 @@ static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 *c, unsigned i
}
#endif
+#define TLB_INST_4K 0x01
+#define TLB_INST_4M 0x02
+#define TLB_INST_2M_4M 0x03
+
+#define TLB_INST_ALL 0x05
+#define TLB_INST_1G 0x06
+
+#define TLB_DATA_4K 0x11
+#define TLB_DATA_4M 0x12
+#define TLB_DATA_2M_4M 0x13
+#define TLB_DATA_4K_4M 0x14
+
+#define TLB_DATA_1G 0x16
+
+#define TLB_DATA0_4K 0x21
+#define TLB_DATA0_4M 0x22
+#define TLB_DATA0_2M_4M 0x23
+
+#define STLB_4K 0x41
+
+static const struct _tlb_table intel_tlb_table[] __cpuinitconst = {
+ { 0x01, TLB_INST_4K, 32, " TLB_INST 4 KByte pages, 4-way set associative" },
+ { 0x02, TLB_INST_4M, 2, " TLB_INST 4 MByte pages, full associative" },
+ { 0x03, TLB_DATA_4K, 64, " TLB_DATA 4 KByte pages, 4-way set associative" },
+ { 0x04, TLB_DATA_4M, 8, " TLB_DATA 4 MByte pages, 4-way set associative" },
+ { 0x05, TLB_DATA_4M, 32, " TLB_DATA 4 MByte pages, 4-way set associative" },
+ { 0x0b, TLB_INST_4M, 4, " TLB_INST 4 MByte pages, 4-way set associative" },
+ { 0x4f, TLB_INST_4K, 32, " TLB_INST 4 KByte pages */" },
+ { 0x50, TLB_INST_ALL, 64, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
+ { 0x51, TLB_INST_ALL, 128, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
+ { 0x52, TLB_INST_ALL, 256, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
+ { 0x55, TLB_INST_2M_4M, 7, " TLB_INST 2-MByte or 4-MByte pages, fully associative" },
+ { 0x56, TLB_DATA0_4M, 16, " TLB_DATA0 4 MByte pages, 4-way set associative" },
+ { 0x57, TLB_DATA0_4K, 16, " TLB_DATA0 4 KByte pages, 4-way associative" },
+ { 0x59, TLB_DATA0_4K, 16, " TLB_DATA0 4 KByte pages, fully associative" },
+ { 0x5a, TLB_DATA0_2M_4M, 32, " TLB_DATA0 2-MByte or 4 MByte pages, 4-way set associative" },
+ { 0x5b, TLB_DATA_4K_4M, 64, " TLB_DATA 4 KByte and 4 MByte pages" },
+ { 0x5c, TLB_DATA_4K_4M, 128, " TLB_DATA 4 KByte and 4 MByte pages" },
+ { 0x5d, TLB_DATA_4K_4M, 256, " TLB_DATA 4 KByte and 4 MByte pages" },
+ { 0xb0, TLB_INST_4K, 128, " TLB_INST 4 KByte pages, 4-way set associative" },
+ { 0xb1, TLB_INST_2M_4M, 4, " TLB_INST 2M pages, 4-way, 8 entries or 4M pages, 4-way entries" },
+ { 0xb2, TLB_INST_4K, 64, " TLB_INST 4KByte pages, 4-way set associative" },
+ { 0xb3, TLB_DATA_4K, 128, " TLB_DATA 4 KByte pages, 4-way set associative" },
+ { 0xb4, TLB_DATA_4K, 256, " TLB_DATA 4 KByte pages, 4-way associative" },
+ { 0xba, TLB_DATA_4K, 64, " TLB_DATA 4 KByte pages, 4-way associative" },
+ { 0xc0, TLB_DATA_4K_4M, 8, " TLB_DATA 4 KByte and 4 MByte pages, 4-way associative" },
+ { 0xca, STLB_4K, 512, " STLB 4 KByte pages, 4-way associative" },
+ { 0x00, 0, 0 }
+};
+
+static void __cpuinit intel_tlb_lookup(const unsigned char desc)
+{
+ unsigned char k;
+ if (desc == 0)
+ return;
+
+ /* look up this descriptor in the table */
+ for (k = 0; intel_tlb_table[k].descriptor != desc && \
+ intel_tlb_table[k].descriptor != 0; k++)
+ ;
+
+ if (intel_tlb_table[k].tlb_type == 0)
+ return;
+
+ switch (intel_tlb_table[k].tlb_type) {
+ case STLB_4K:
+ if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
+ tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
+ if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
+ tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
+ break;
+ case TLB_INST_ALL:
+ if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
+ tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
+ if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries)
+ tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries;
+ if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
+ tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
+ break;
+ case TLB_INST_4K:
+ if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
+ tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
+ break;
+ case TLB_INST_4M:
+ if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
+ tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
+ break;
+ case TLB_INST_2M_4M:
+ if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries)
+ tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries;
+ if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
+ tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
+ break;
+ case TLB_DATA_4K:
+ case TLB_DATA0_4K:
+ if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
+ tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
+ break;
+ case TLB_DATA_4M:
+ case TLB_DATA0_4M:
+ if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
+ tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
+ break;
+ case TLB_DATA_2M_4M:
+ case TLB_DATA0_2M_4M:
+ if (tlb_lld_2m[ENTRIES] < intel_tlb_table[k].entries)
+ tlb_lld_2m[ENTRIES] = intel_tlb_table[k].entries;
+ if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
+ tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
+ break;
+ case TLB_DATA_4K_4M:
+ if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
+ tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
+ if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
+ tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
+ break;
+ }
+}
+
+static void __cpuinit intel_tlb_flushall_shift_set(struct cpuinfo_x86 *c)
+{
+ if (!cpu_has_invlpg) {
+ tlb_flushall_shift = -1;
+ return;
+ }
+ switch ((c->x86 << 8) + c->x86_model) {
+ case 0x60f: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
+ case 0x616: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
+ case 0x617: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
+ case 0x61d: /* six-core 45 nm xeon "Dunnington" */
+ tlb_flushall_shift = -1;
+ break;
+ case 0x61a: /* 45 nm nehalem, "Bloomfield" */
+ case 0x61e: /* 45 nm nehalem, "Lynnfield" */
+ case 0x625: /* 32 nm nehalem, "Clarkdale" */
+ case 0x62c: /* 32 nm nehalem, "Gulftown" */
+ case 0x62e: /* 45 nm nehalem-ex, "Beckton" */
+ case 0x62f: /* 32 nm Xeon E7 */
+ tlb_flushall_shift = 6;
+ break;
+ case 0x62a: /* SandyBridge */
+ case 0x62d: /* SandyBridge, "Romely-EP" */
+ tlb_flushall_shift = 5;
+ break;
+ case 0x63a: /* Ivybridge */
+ tlb_flushall_shift = 1;
+ break;
+ default:
+ tlb_flushall_shift = 6;
+ }
+}
+
+static void __cpuinit intel_detect_tlb(struct cpuinfo_x86 *c)
+{
+ int i, j, n;
+ unsigned int regs[4];
+ unsigned char *desc = (unsigned char *)regs;
+ /* Number of times to iterate */
+ n = cpuid_eax(2) & 0xFF;
+
+ for (i = 0 ; i < n ; i++) {
+ cpuid(2, &regs[0], &regs[1], &regs[2], &regs[3]);
+
+ /* If bit 31 is set, this is an unknown format */
+ for (j = 0 ; j < 3 ; j++)
+ if (regs[j] & (1 << 31))
+ regs[j] = 0;
+
+ /* Byte 0 is level count, not a descriptor */
+ for (j = 1 ; j < 16 ; j++)
+ intel_tlb_lookup(desc[j]);
+ }
+ intel_tlb_flushall_shift_set(c);
+}
+
static const struct cpu_dev __cpuinitconst intel_cpu_dev = {
.c_vendor = "Intel",
.c_ident = { "GenuineIntel" },
@@ -546,6 +721,7 @@ static const struct cpu_dev __cpuinitconst intel_cpu_dev = {
},
.c_size_cache = intel_size_cache,
#endif
+ .c_detect_tlb = intel_detect_tlb,
.c_early_init = early_init_intel,
.c_init = init_intel,
.c_x86_vendor = X86_VENDOR_INTEL,
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 111f6bbd8b3..69babd8c834 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -1048,24 +1048,6 @@ apicinterrupt LOCAL_TIMER_VECTOR \
apicinterrupt X86_PLATFORM_IPI_VECTOR \
x86_platform_ipi smp_x86_platform_ipi
-#ifdef CONFIG_SMP
- ALIGN
- INTR_FRAME
-.irp idx,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15, \
- 16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
-.if NUM_INVALIDATE_TLB_VECTORS > \idx
-ENTRY(invalidate_interrupt\idx)
- pushq_cfi $~(INVALIDATE_TLB_VECTOR_START+\idx)
- jmp .Lcommon_invalidate_interrupt0
- CFI_ADJUST_CFA_OFFSET -8
-END(invalidate_interrupt\idx)
-.endif
-.endr
- CFI_ENDPROC
-apicinterrupt INVALIDATE_TLB_VECTOR_START, \
- invalidate_interrupt0, smp_invalidate_interrupt
-#endif
-
apicinterrupt THRESHOLD_APIC_VECTOR \
threshold_interrupt smp_threshold_interrupt
apicinterrupt THERMAL_APIC_VECTOR \
diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c
index 252981afd6c..6e03b0d6913 100644
--- a/arch/x86/kernel/irqinit.c
+++ b/arch/x86/kernel/irqinit.c
@@ -171,79 +171,6 @@ static void __init smp_intr_init(void)
*/
alloc_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt);
- /* IPIs for invalidation */
-#define ALLOC_INVTLB_VEC(NR) \
- alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+NR, \
- invalidate_interrupt##NR)
-
- switch (NUM_INVALIDATE_TLB_VECTORS) {
- default:
- ALLOC_INVTLB_VEC(31);
- case 31:
- ALLOC_INVTLB_VEC(30);
- case 30:
- ALLOC_INVTLB_VEC(29);
- case 29:
- ALLOC_INVTLB_VEC(28);
- case 28:
- ALLOC_INVTLB_VEC(27);
- case 27:
- ALLOC_INVTLB_VEC(26);
- case 26:
- ALLOC_INVTLB_VEC(25);
- case 25:
- ALLOC_INVTLB_VEC(24);
- case 24:
- ALLOC_INVTLB_VEC(23);
- case 23:
- ALLOC_INVTLB_VEC(22);
- case 22:
- ALLOC_INVTLB_VEC(21);
- case 21:
- ALLOC_INVTLB_VEC(20);
- case 20:
- ALLOC_INVTLB_VEC(19);
- case 19:
- ALLOC_INVTLB_VEC(18);
- case 18:
- ALLOC_INVTLB_VEC(17);
- case 17:
- ALLOC_INVTLB_VEC(16);
- case 16:
- ALLOC_INVTLB_VEC(15);
- case 15:
- ALLOC_INVTLB_VEC(14);
- case 14:
- ALLOC_INVTLB_VEC(13);
- case 13:
- ALLOC_INVTLB_VEC(12);
- case 12:
- ALLOC_INVTLB_VEC(11);
- case 11:
- ALLOC_INVTLB_VEC(10);
- case 10:
- ALLOC_INVTLB_VEC(9);
- case 9:
- ALLOC_INVTLB_VEC(8);
- case 8:
- ALLOC_INVTLB_VEC(7);
- case 7:
- ALLOC_INVTLB_VEC(6);
- case 6:
- ALLOC_INVTLB_VEC(5);
- case 5:
- ALLOC_INVTLB_VEC(4);
- case 4:
- ALLOC_INVTLB_VEC(3);
- case 3:
- ALLOC_INVTLB_VEC(2);
- case 2:
- ALLOC_INVTLB_VEC(1);
- case 1:
- ALLOC_INVTLB_VEC(0);
- break;
- }
-
/* IPI for generic function call */
alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index 5a98aa27218..5cdff035774 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -21,7 +21,7 @@
#include <asm/cpu.h>
#include <asm/stackprotector.h>
-DEFINE_PER_CPU(int, cpu_number);
+DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);
EXPORT_PER_CPU_SYMBOL(cpu_number);
#ifdef CONFIG_X86_64
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index c1a310fb830..7c5a8c314c0 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -106,17 +106,17 @@ int smp_num_siblings = 1;
EXPORT_SYMBOL(smp_num_siblings);
/* Last level cache ID of each logical CPU */
-DEFINE_PER_CPU(u16, cpu_llc_id) = BAD_APICID;
+DEFINE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id) = BAD_APICID;
/* representing HT siblings of each logical CPU */
-DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map);
+DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_sibling_map);
EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
/* representing HT and core siblings of each logical CPU */
-DEFINE_PER_CPU(cpumask_var_t, cpu_core_map);
+DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
EXPORT_PER_CPU_SYMBOL(cpu_core_map);
-DEFINE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
+DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
/* Per CPU bogomips and other parameters */
DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 5e57e113b72..613cd83e8c0 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -12,6 +12,7 @@
#include <asm/cache.h>
#include <asm/apic.h>
#include <asm/uv/uv.h>
+#include <linux/debugfs.h>
DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate)
= { &init_mm, 0, };
@@ -27,33 +28,14 @@ DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate)
*
* More scalable flush, from Andi Kleen
*
- * To avoid global state use 8 different call vectors.
- * Each CPU uses a specific vector to trigger flushes on other
- * CPUs. Depending on the received vector the target CPUs look into
- * the right array slot for the flush data.
- *
- * With more than 8 CPUs they are hashed to the 8 available
- * vectors. The limited global vector space forces us to this right now.
- * In future when interrupts are split into per CPU domains this could be
- * fixed, at the cost of triggering multiple IPIs in some cases.
+ * Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi
*/
-union smp_flush_state {
- struct {
- struct mm_struct *flush_mm;
- unsigned long flush_va;
- raw_spinlock_t tlbstate_lock;
- DECLARE_BITMAP(flush_cpumask, NR_CPUS);
- };
- char pad[INTERNODE_CACHE_BYTES];
-} ____cacheline_internodealigned_in_smp;
-
-/* State is put into the per CPU data section, but padded
- to a full cache line because other CPUs can access it and we don't
- want false sharing in the per cpu data segment. */
-static union smp_flush_state flush_state[NUM_INVALIDATE_TLB_VECTORS];
-
-static DEFINE_PER_CPU_READ_MOSTLY(int, tlb_vector_offset);
+struct flush_tlb_info {
+ struct mm_struct *flush_mm;
+ unsigned long flush_start;
+ unsigned long flush_end;
+};
/*
* We cannot call mmdrop() because we are in interrupt context,
@@ -72,28 +54,25 @@ void leave_mm(int cpu)
EXPORT_SYMBOL_GPL(leave_mm);
/*
- *
* The flush IPI assumes that a thread switch happens in this order:
* [cpu0: the cpu that switches]
* 1) switch_mm() either 1a) or 1b)
* 1a) thread switch to a different mm
- * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask);
- * Stop ipi delivery for the old mm. This is not synchronized with
- * the other cpus, but smp_invalidate_interrupt ignore flush ipis
- * for the wrong mm, and in the worst case we perform a superfluous
- * tlb flush.
- * 1a2) set cpu mmu_state to TLBSTATE_OK
- * Now the smp_invalidate_interrupt won't call leave_mm if cpu0
- * was in lazy tlb mode.
- * 1a3) update cpu active_mm
+ * 1a1) set cpu_tlbstate to TLBSTATE_OK
+ * Now the tlb flush NMI handler flush_tlb_func won't call leave_mm
+ * if cpu0 was in lazy tlb mode.
+ * 1a2) update cpu active_mm
* Now cpu0 accepts tlb flushes for the new mm.
- * 1a4) cpu_set(cpu, new_mm->cpu_vm_mask);
+ * 1a3) cpu_set(cpu, new_mm->cpu_vm_mask);
* Now the other cpus will send tlb flush ipis.
* 1a4) change cr3.
+ * 1a5) cpu_clear(cpu, old_mm->cpu_vm_mask);
+ * Stop ipi delivery for the old mm. This is not synchronized with
+ * the other cpus, but flush_tlb_func ignore flush ipis for the wrong
+ * mm, and in the worst case we perform a superfluous tlb flush.
* 1b) thread switch without mm change
- * cpu active_mm is correct, cpu0 already handles
- * flush ipis.
- * 1b1) set cpu mmu_state to TLBSTATE_OK
+ * cpu active_mm is correct, cpu0 already handles flush ipis.
+ * 1b1) set cpu_tlbstate to TLBSTATE_OK
* 1b2) test_and_set the cpu bit in cpu_vm_mask.
* Atomically set the bit [other cpus will start sending flush ipis],
* and test the bit.
@@ -106,174 +85,62 @@ EXPORT_SYMBOL_GPL(leave_mm);
* runs in kernel space, the cpu could load tlb entries for user space
* pages.
*
- * The good news is that cpu mmu_state is local to each cpu, no
+ * The good news is that cpu_tlbstate is local to each cpu, no
* write/read ordering problems.
*/
/*
- * TLB flush IPI:
- *
+ * TLB flush funcation:
* 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
* 2) Leave the mm if we are in the lazy tlb mode.
- *
- * Interrupts are disabled.
- */
-
-/*
- * FIXME: use of asmlinkage is not consistent. On x86_64 it's noop
- * but still used for documentation purpose but the usage is slightly
- * inconsistent. On x86_32, asmlinkage is regparm(0) but interrupt
- * entry calls in with the first parameter in %eax. Maybe define
- * intrlinkage?
*/
-#ifdef CONFIG_X86_64
-asmlinkage
-#endif
-void smp_invalidate_interrupt(struct pt_regs *regs)
+static void flush_tlb_func(void *info)
{
- unsigned int cpu;
- unsigned int sender;
- union smp_flush_state *f;
-
- cpu = smp_processor_id();
- /*
- * orig_rax contains the negated interrupt vector.
- * Use that to determine where the sender put the data.
- */
- sender = ~regs->orig_ax - INVALIDATE_TLB_VECTOR_START;
- f = &flush_state[sender];
-
- if (!cpumask_test_cpu(cpu, to_cpumask(f->flush_cpumask)))
- goto out;
- /*
- * This was a BUG() but until someone can quote me the
- * line from the intel manual that guarantees an IPI to
- * multiple CPUs is retried _only_ on the erroring CPUs
- * its staying as a return
- *
- * BUG();
- */
-
- if (f->flush_mm == this_cpu_read(cpu_tlbstate.active_mm)) {
- if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) {
- if (f->flush_va == TLB_FLUSH_ALL)
- local_flush_tlb();
- else
- __flush_tlb_one(f->flush_va);
- } else
- leave_mm(cpu);
- }
-out:
- ack_APIC_irq();
- smp_mb__before_clear_bit();
- cpumask_clear_cpu(cpu, to_cpumask(f->flush_cpumask));
- smp_mb__after_clear_bit();
- inc_irq_stat(irq_tlb_count);
-}
+ struct flush_tlb_info *f = info;
-static void flush_tlb_others_ipi(const struct cpumask *cpumask,
- struct mm_struct *mm, unsigned long va)
-{
- unsigned int sender;
- union smp_flush_state *f;
-
- /* Caller has disabled preemption */
- sender = this_cpu_read(tlb_vector_offset);
- f = &flush_state[sender];
-
- if (nr_cpu_ids > NUM_INVALIDATE_TLB_VECTORS)
- raw_spin_lock(&f->tlbstate_lock);
-
- f->flush_mm = mm;
- f->flush_va = va;
- if (cpumask_andnot(to_cpumask(f->flush_cpumask), cpumask, cpumask_of(smp_processor_id()))) {
- /*
- * We have to send the IPI only to
- * CPUs affected.
- */
- apic->send_IPI_mask(to_cpumask(f->flush_cpumask),
- INVALIDATE_TLB_VECTOR_START + sender);
-
- while (!cpumask_empty(to_cpumask(f->flush_cpumask)))
- cpu_relax();
- }
+ if (f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm))
+ return;
+
+ if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) {
+ if (f->flush_end == TLB_FLUSH_ALL || !cpu_has_invlpg)
+ local_flush_tlb();
+ else if (!f->flush_end)
+ __flush_tlb_single(f->flush_start);
+ else {
+ unsigned long addr;
+ addr = f->flush_start;
+ while (addr < f->flush_end) {
+ __flush_tlb_single(addr);
+ addr += PAGE_SIZE;
+ }
+ }
+ } else
+ leave_mm(smp_processor_id());
- f->flush_mm = NULL;
- f->flush_va = 0;
- if (nr_cpu_ids > NUM_INVALIDATE_TLB_VECTORS)
- raw_spin_unlock(&f->tlbstate_lock);
}
void native_flush_tlb_others(const struct cpumask *cpumask,
- struct mm_struct *mm, unsigned long va)
+ struct mm_struct *mm, unsigned long start,
+ unsigned long end)
{
+ struct flush_tlb_info info;
+ info.flush_mm = mm;
+ info.flush_start = start;
+ info.flush_end = end;
+
if (is_uv_system()) {
unsigned int cpu;
cpu = smp_processor_id();
- cpumask = uv_flush_tlb_others(cpumask, mm, va, cpu);
+ cpumask = uv_flush_tlb_others(cpumask, mm, start, end, cpu);
if (cpumask)
- flush_tlb_others_ipi(cpumask, mm, va);
+ smp_call_function_many(cpumask, flush_tlb_func,
+ &info, 1);
return;
}
- flush_tlb_others_ipi(cpumask, mm, va);
+ smp_call_function_many(cpumask, flush_tlb_func, &info, 1);
}
-static void __cpuinit calculate_tlb_offset(void)
-{
- int cpu, node, nr_node_vecs, idx = 0;
- /*
- * we are changing tlb_vector_offset for each CPU in runtime, but this
- * will not cause inconsistency, as the write is atomic under X86. we
- * might see more lock contentions in a short time, but after all CPU's
- * tlb_vector_offset are changed, everything should go normal
- *
- * Note: if NUM_INVALIDATE_TLB_VECTORS % nr_online_nodes !=0, we might
- * waste some vectors.
- **/
- if (nr_online_nodes > NUM_INVALIDATE_TLB_VECTORS)
- nr_node_vecs = 1;
- else
- nr_node_vecs = NUM_INVALIDATE_TLB_VECTORS/nr_online_nodes;
-
- for_each_online_node(node) {
- int node_offset = (idx % NUM_INVALIDATE_TLB_VECTORS) *
- nr_node_vecs;
- int cpu_offset = 0;
- for_each_cpu(cpu, cpumask_of_node(node)) {
- per_cpu(tlb_vector_offset, cpu) = node_offset +
- cpu_offset;
- cpu_offset++;
- cpu_offset = cpu_offset % nr_node_vecs;
- }
- idx++;
- }
-}
-
-static int __cpuinit tlb_cpuhp_notify(struct notifier_block *n,
- unsigned long action, void *hcpu)
-{
- switch (action & 0xf) {
- case CPU_ONLINE:
- case CPU_DEAD:
- calculate_tlb_offset();
- }
- return NOTIFY_OK;
-}
-
-static int __cpuinit init_smp_flush(void)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(flush_state); i++)
- raw_spin_lock_init(&flush_state[i].tlbstate_lock);
-
- calculate_tlb_offset();
- hotcpu_notifier(tlb_cpuhp_notify, 0);
- return 0;
-}
-core_initcall(init_smp_flush);
-
void flush_tlb_current_task(void)
{
struct mm_struct *mm = current->mm;
@@ -282,27 +149,91 @@ void flush_tlb_current_task(void)
local_flush_tlb();
if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
- flush_tlb_others(mm_cpumask(mm), mm, TLB_FLUSH_ALL);
+ flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL);
preempt_enable();
}
-void flush_tlb_mm(struct mm_struct *mm)
+/*
+ * It can find out the THP large page, or
+ * HUGETLB page in tlb_flush when THP disabled
+ */
+static inline unsigned long has_large_page(struct mm_struct *mm,
+ unsigned long start, unsigned long end)
+{
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ unsigned long addr = ALIGN(start, HPAGE_SIZE);
+ for (; addr < end; addr += HPAGE_SIZE) {
+ pgd = pgd_offset(mm, addr);
+ if (likely(!pgd_none(*pgd))) {
+ pud = pud_offset(pgd, addr);
+ if (likely(!pud_none(*pud))) {
+ pmd = pmd_offset(pud, addr);
+ if (likely(!pmd_none(*pmd)))
+ if (pmd_large(*pmd))
+ return addr;
+ }
+ }
+ }
+ return 0;
+}
+
+void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
+ unsigned long end, unsigned long vmflag)
{
+ unsigned long addr;
+ unsigned act_entries, tlb_entries = 0;
+
preempt_disable();
+ if (current->active_mm != mm)
+ goto flush_all;
- if (current->active_mm == mm) {
- if (current->mm)
+ if (!current->mm) {
+ leave_mm(smp_processor_id());
+ goto flush_all;
+ }
+
+ if (end == TLB_FLUSH_ALL || tlb_flushall_shift == -1
+ || vmflag == VM_HUGETLB) {
+ local_flush_tlb();
+ goto flush_all;
+ }
+
+ /* In modern CPU, last level tlb used for both data/ins */
+ if (vmflag & VM_EXEC)
+ tlb_entries = tlb_lli_4k[ENTRIES];
+ else
+ tlb_entries = tlb_lld_4k[ENTRIES];
+ /* Assume all of TLB entries was occupied by this task */
+ act_entries = mm->total_vm > tlb_entries ? tlb_entries : mm->total_vm;
+
+ /* tlb_flushall_shift is on balance point, details in commit log */
+ if ((end - start) >> PAGE_SHIFT > act_entries >> tlb_flushall_shift)
+ local_flush_tlb();
+ else {
+ if (has_large_page(mm, start, end)) {
local_flush_tlb();
- else
- leave_mm(smp_processor_id());
+ goto flush_all;
+ }
+ /* flush range by one by one 'invlpg' */
+ for (addr = start; addr < end; addr += PAGE_SIZE)
+ __flush_tlb_single(addr);
+
+ if (cpumask_any_but(mm_cpumask(mm),
+ smp_processor_id()) < nr_cpu_ids)
+ flush_tlb_others(mm_cpumask(mm), mm, start, end);
+ preempt_enable();
+ return;
}
- if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
- flush_tlb_others(mm_cpumask(mm), mm, TLB_FLUSH_ALL);
+flush_all:
+ if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
+ flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL);
preempt_enable();
}
-void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
+void flush_tlb_page(struct vm_area_struct *vma, unsigned long start)
{
struct mm_struct *mm = vma->vm_mm;
@@ -310,13 +241,13 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
if (current->active_mm == mm) {
if (current->mm)
- __flush_tlb_one(va);
+ __flush_tlb_one(start);
else
leave_mm(smp_processor_id());
}
if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
- flush_tlb_others(mm_cpumask(mm), mm, va);
+ flush_tlb_others(mm_cpumask(mm), mm, start, 0UL);
preempt_enable();
}
@@ -332,3 +263,83 @@ void flush_tlb_all(void)
{
on_each_cpu(do_flush_tlb_all, NULL, 1);
}
+
+static void do_kernel_range_flush(void *info)
+{
+ struct flush_tlb_info *f = info;
+ unsigned long addr;
+
+ /* flush range by one by one 'invlpg' */
+ for (addr = f->flush_start; addr < f->flush_end; addr += PAGE_SIZE)
+ __flush_tlb_single(addr);
+}
+
+void flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+ unsigned act_entries;
+ struct flush_tlb_info info;
+
+ /* In modern CPU, last level tlb used for both data/ins */
+ act_entries = tlb_lld_4k[ENTRIES];
+
+ /* Balance as user space task's flush, a bit conservative */
+ if (end == TLB_FLUSH_ALL || tlb_flushall_shift == -1 ||
+ (end - start) >> PAGE_SHIFT > act_entries >> tlb_flushall_shift)
+
+ on_each_cpu(do_flush_tlb_all, NULL, 1);
+ else {
+ info.flush_start = start;
+ info.flush_end = end;
+ on_each_cpu(do_kernel_range_flush, &info, 1);
+ }
+}
+
+#ifdef CONFIG_DEBUG_TLBFLUSH
+static ssize_t tlbflush_read_file(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ char buf[32];
+ unsigned int len;
+
+ len = sprintf(buf, "%hd\n", tlb_flushall_shift);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t tlbflush_write_file(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ char buf[32];
+ ssize_t len;
+ s8 shift;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+ if (kstrtos8(buf, 0, &shift))
+ return -EINVAL;
+
+ if (shift > 64)
+ return -EINVAL;
+
+ tlb_flushall_shift = shift;
+ return count;
+}
+
+static const struct file_operations fops_tlbflush = {
+ .read = tlbflush_read_file,
+ .write = tlbflush_write_file,
+ .llseek = default_llseek,
+};
+
+static int __cpuinit create_tlb_flushall_shift(void)
+{
+ if (cpu_has_invlpg) {
+ debugfs_create_file("tlb_flushall_shift", S_IRUSR | S_IWUSR,
+ arch_debugfs_dir, NULL, &fops_tlbflush);
+ }
+ return 0;
+}
+late_initcall(create_tlb_flushall_shift);
+#endif
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
index 71b5d5a07d7..b8b3a37c80c 100644
--- a/arch/x86/platform/uv/tlb_uv.c
+++ b/arch/x86/platform/uv/tlb_uv.c
@@ -1055,8 +1055,8 @@ static int set_distrib_bits(struct cpumask *flush_mask, struct bau_control *bcp,
* done. The returned pointer is valid till preemption is re-enabled.
*/
const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
- struct mm_struct *mm, unsigned long va,
- unsigned int cpu)
+ struct mm_struct *mm, unsigned long start,
+ unsigned end, unsigned int cpu)
{
int locals = 0;
int remotes = 0;
@@ -1113,7 +1113,7 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
record_send_statistics(stat, locals, hubs, remotes, bau_desc);
- bau_desc->payload.address = va;
+ bau_desc->payload.address = start;
bau_desc->payload.sending_cpu = cpu;
/*
* uv_flush_send_and_wait returns 0 if all cpu's were messaged,
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 27336dfcda8..b65a76133f4 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1256,7 +1256,8 @@ static void xen_flush_tlb_single(unsigned long addr)
}
static void xen_flush_tlb_others(const struct cpumask *cpus,
- struct mm_struct *mm, unsigned long va)
+ struct mm_struct *mm, unsigned long start,
+ unsigned long end)
{
struct {
struct mmuext_op op;
@@ -1268,7 +1269,7 @@ static void xen_flush_tlb_others(const struct cpumask *cpus,
} *args;
struct multicall_space mcs;
- trace_xen_mmu_flush_tlb_others(cpus, mm, va);
+ trace_xen_mmu_flush_tlb_others(cpus, mm, start, end);
if (cpumask_empty(cpus))
return; /* nothing to do */
@@ -1281,11 +1282,10 @@ static void xen_flush_tlb_others(const struct cpumask *cpus,
cpumask_and(to_cpumask(args->mask), cpus, cpu_online_mask);
cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask));
- if (va == TLB_FLUSH_ALL) {
- args->op.cmd = MMUEXT_TLB_FLUSH_MULTI;
- } else {
+ args->op.cmd = MMUEXT_TLB_FLUSH_MULTI;
+ if (start != TLB_FLUSH_ALL && (end - start) <= PAGE_SIZE) {
args->op.cmd = MMUEXT_INVLPG_MULTI;
- args->op.arg1.linear_addr = va;
+ args->op.arg1.linear_addr = start;
}
MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF);