diff options
Diffstat (limited to 'arch/x86/kernel/cpu/common.c')
| -rw-r--r-- | arch/x86/kernel/cpu/common.c | 352 | 
1 files changed, 254 insertions, 98 deletions
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 4b68bda3093..ef1b93f18ed 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -8,6 +8,7 @@  #include <linux/delay.h>  #include <linux/sched.h>  #include <linux/init.h> +#include <linux/kprobes.h>  #include <linux/kgdb.h>  #include <linux/smp.h>  #include <linux/io.h> @@ -15,18 +16,22 @@  #include <asm/stackprotector.h>  #include <asm/perf_event.h>  #include <asm/mmu_context.h> +#include <asm/archrandom.h>  #include <asm/hypervisor.h>  #include <asm/processor.h> +#include <asm/debugreg.h>  #include <asm/sections.h> +#include <asm/vsyscall.h>  #include <linux/topology.h>  #include <linux/cpumask.h>  #include <asm/pgtable.h> -#include <asm/atomic.h> +#include <linux/atomic.h>  #include <asm/proto.h>  #include <asm/setup.h>  #include <asm/apic.h>  #include <asm/desc.h>  #include <asm/i387.h> +#include <asm/fpu-internal.h>  #include <asm/mtrr.h>  #include <linux/numa.h>  #include <asm/asm.h> @@ -34,6 +39,8 @@  #include <asm/mce.h>  #include <asm/msr.h>  #include <asm/pat.h> +#include <asm/microcode.h> +#include <asm/microcode_intel.h>  #ifdef CONFIG_X86_LOCAL_APIC  #include <asm/uv/uv.h> @@ -58,7 +65,7 @@ void __init setup_cpu_local_masks(void)  	alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask);  } -static void __cpuinit default_init(struct cpuinfo_x86 *c) +static void default_init(struct cpuinfo_x86 *c)  {  #ifdef CONFIG_X86_64  	cpu_detect_cache_sizes(c); @@ -75,13 +82,13 @@ static void __cpuinit default_init(struct cpuinfo_x86 *c)  #endif  } -static const struct cpu_dev __cpuinitconst default_cpu = { +static const struct cpu_dev default_cpu = {  	.c_init		= default_init,  	.c_vendor	= "Unknown",  	.c_x86_vendor	= X86_VENDOR_UNKNOWN,  }; -static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu; +static const struct cpu_dev *this_cpu = &default_cpu;  DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {  #ifdef CONFIG_X86_64 @@ -141,6 +148,8 @@ static int __init x86_xsave_setup(char *s)  {  	setup_clear_cpu_cap(X86_FEATURE_XSAVE);  	setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT); +	setup_clear_cpu_cap(X86_FEATURE_AVX); +	setup_clear_cpu_cap(X86_FEATURE_AVX2);  	return 1;  }  __setup("noxsave", x86_xsave_setup); @@ -153,8 +162,8 @@ static int __init x86_xsaveopt_setup(char *s)  __setup("noxsaveopt", x86_xsaveopt_setup);  #ifdef CONFIG_X86_32 -static int cachesize_override __cpuinitdata = -1; -static int disable_x86_serial_nr __cpuinitdata = 1; +static int cachesize_override = -1; +static int disable_x86_serial_nr = 1;  static int __init cachesize_setup(char *str)  { @@ -208,12 +217,12 @@ static inline int flag_is_changeable_p(u32 flag)  }  /* Probe for the CPUID instruction */ -static int __cpuinit have_cpuid_p(void) +int have_cpuid_p(void)  {  	return flag_is_changeable_p(X86_EFLAGS_ID);  } -static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c) +static void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)  {  	unsigned long lo, hi; @@ -244,15 +253,47 @@ static inline int flag_is_changeable_p(u32 flag)  {  	return 1;  } -/* Probe for the CPUID instruction */ -static inline int have_cpuid_p(void) +static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)  { +} +#endif + +static __init int setup_disable_smep(char *arg) +{ +	setup_clear_cpu_cap(X86_FEATURE_SMEP);  	return 1;  } -static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c) +__setup("nosmep", setup_disable_smep); + +static __always_inline void setup_smep(struct cpuinfo_x86 *c) +{ +	if (cpu_has(c, X86_FEATURE_SMEP)) +		set_in_cr4(X86_CR4_SMEP); +} + +static __init int setup_disable_smap(char *arg)  { +	setup_clear_cpu_cap(X86_FEATURE_SMAP); +	return 1;  } +__setup("nosmap", setup_disable_smap); + +static __always_inline void setup_smap(struct cpuinfo_x86 *c) +{ +	unsigned long eflags; + +	/* This should have been cleared long ago */ +	raw_local_save_flags(eflags); +	BUG_ON(eflags & X86_EFLAGS_AC); + +	if (cpu_has(c, X86_FEATURE_SMAP)) { +#ifdef CONFIG_X86_SMAP +		set_in_cr4(X86_CR4_SMAP); +#else +		clear_in_cr4(X86_CR4_SMAP);  #endif +	} +}  /*   * Some CPU features depend on higher CPUID levels, which may not always @@ -264,7 +305,7 @@ struct cpuid_dependent_feature {  	u32 level;  }; -static const struct cpuid_dependent_feature __cpuinitconst +static const struct cpuid_dependent_feature  cpuid_dependent_features[] = {  	{ X86_FEATURE_MWAIT,		0x00000005 },  	{ X86_FEATURE_DCA,		0x00000009 }, @@ -272,7 +313,7 @@ cpuid_dependent_features[] = {  	{ 0, 0 }  }; -static void __cpuinit filter_cpuid_features(struct cpuinfo_x86 *c, bool warn) +static void filter_cpuid_features(struct cpuinfo_x86 *c, bool warn)  {  	const struct cpuid_dependent_feature *df; @@ -310,9 +351,10 @@ static void __cpuinit filter_cpuid_features(struct cpuinfo_x86 *c, bool warn)   */  /* Look up CPU names by table lookup. */ -static const char *__cpuinit table_lookup_model(struct cpuinfo_x86 *c) +static const char *table_lookup_model(struct cpuinfo_x86 *c)  { -	const struct cpu_model_info *info; +#ifdef CONFIG_X86_32 +	const struct legacy_cpu_model_info *info;  	if (c->x86_model >= 16)  		return NULL;	/* Range check */ @@ -320,18 +362,19 @@ static const char *__cpuinit table_lookup_model(struct cpuinfo_x86 *c)  	if (!this_cpu)  		return NULL; -	info = this_cpu->c_models; +	info = this_cpu->legacy_models; -	while (info && info->family) { +	while (info->family) {  		if (info->family == c->x86)  			return info->model_names[c->x86_model];  		info++;  	} +#endif  	return NULL;		/* Not found */  } -__u32 cpu_caps_cleared[NCAPINTS] __cpuinitdata; -__u32 cpu_caps_set[NCAPINTS] __cpuinitdata; +__u32 cpu_caps_cleared[NCAPINTS]; +__u32 cpu_caps_set[NCAPINTS];  void load_percpu_segment(int cpu)  { @@ -360,9 +403,9 @@ void switch_to_new_gdt(int cpu)  	load_percpu_segment(cpu);  } -static const struct cpu_dev *__cpuinitdata cpu_devs[X86_VENDOR_NUM] = {}; +static const struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {}; -static void __cpuinit get_model_name(struct cpuinfo_x86 *c) +static void get_model_name(struct cpuinfo_x86 *c)  {  	unsigned int *v;  	char *p, *q; @@ -391,7 +434,7 @@ static void __cpuinit get_model_name(struct cpuinfo_x86 *c)  	}  } -void __cpuinit cpu_detect_cache_sizes(struct cpuinfo_x86 *c) +void cpu_detect_cache_sizes(struct cpuinfo_x86 *c)  {  	unsigned int n, dummy, ebx, ecx, edx, l2size; @@ -416,8 +459,8 @@ void __cpuinit cpu_detect_cache_sizes(struct cpuinfo_x86 *c)  	c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);  #else  	/* do processor-specific cache resizing */ -	if (this_cpu->c_size_cache) -		l2size = this_cpu->c_size_cache(c, l2size); +	if (this_cpu->legacy_cache_size) +		l2size = this_cpu->legacy_cache_size(c, l2size);  	/* Allow user to override all this if necessary. */  	if (cachesize_override != -1) @@ -430,7 +473,37 @@ void __cpuinit cpu_detect_cache_sizes(struct cpuinfo_x86 *c)  	c->x86_cache_size = l2size;  } -void __cpuinit detect_ht(struct cpuinfo_x86 *c) +u16 __read_mostly tlb_lli_4k[NR_INFO]; +u16 __read_mostly tlb_lli_2m[NR_INFO]; +u16 __read_mostly tlb_lli_4m[NR_INFO]; +u16 __read_mostly tlb_lld_4k[NR_INFO]; +u16 __read_mostly tlb_lld_2m[NR_INFO]; +u16 __read_mostly tlb_lld_4m[NR_INFO]; +u16 __read_mostly tlb_lld_1g[NR_INFO]; + +/* + * tlb_flushall_shift shows the balance point in replacing cr3 write + * with multiple 'invlpg'. It will do this replacement when + *   flush_tlb_lines <= active_lines/2^tlb_flushall_shift. + * If tlb_flushall_shift is -1, means the replacement will be disabled. + */ +s8  __read_mostly tlb_flushall_shift = -1; + +void cpu_detect_tlb(struct cpuinfo_x86 *c) +{ +	if (this_cpu->c_detect_tlb) +		this_cpu->c_detect_tlb(c); + +	printk(KERN_INFO "Last level iTLB entries: 4KB %d, 2MB %d, 4MB %d\n" +		"Last level dTLB entries: 4KB %d, 2MB %d, 4MB %d, 1GB %d\n" +		"tlb_flushall_shift: %d\n", +		tlb_lli_4k[ENTRIES], tlb_lli_2m[ENTRIES], +		tlb_lli_4m[ENTRIES], tlb_lld_4k[ENTRIES], +		tlb_lld_2m[ENTRIES], tlb_lld_4m[ENTRIES], +		tlb_lld_1g[ENTRIES], tlb_flushall_shift); +} + +void detect_ht(struct cpuinfo_x86 *c)  {  #ifdef CONFIG_X86_HT  	u32 eax, ebx, ecx, edx; @@ -458,13 +531,6 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c)  	if (smp_num_siblings <= 1)  		goto out; -	if (smp_num_siblings > nr_cpu_ids) { -		pr_warning("CPU: Unsupported number of siblings %d", -			   smp_num_siblings); -		smp_num_siblings = 1; -		return; -	} -  	index_msb = get_count_order(smp_num_siblings);  	c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb); @@ -488,7 +554,7 @@ out:  #endif  } -static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c) +static void get_cpu_vendor(struct cpuinfo_x86 *c)  {  	char *v = c->x86_vendor_id;  	int i; @@ -515,7 +581,7 @@ static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)  	this_cpu = &default_cpu;  } -void __cpuinit cpu_detect(struct cpuinfo_x86 *c) +void cpu_detect(struct cpuinfo_x86 *c)  {  	/* Get vendor name */  	cpuid(0x00000000, (unsigned int *)&c->cpuid_level, @@ -545,7 +611,7 @@ void __cpuinit cpu_detect(struct cpuinfo_x86 *c)  	}  } -void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c) +void get_cpu_cap(struct cpuinfo_x86 *c)  {  	u32 tfms, xlvl;  	u32 ebx; @@ -565,8 +631,7 @@ void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c)  		cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx); -		if (eax > 0) -			c->x86_capability[9] = ebx; +		c->x86_capability[9] = ebx;  	}  	/* AMD-defined flags: level 0x80000001 */ @@ -597,7 +662,7 @@ void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c)  	init_scattered_cpuid_features(c);  } -static void __cpuinit identify_cpu_without_cpuid(struct cpuinfo_x86 *c) +static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)  {  #ifdef CONFIG_X86_32  	int i; @@ -656,18 +721,20 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)  		return;  	cpu_detect(c); -  	get_cpu_vendor(c); -  	get_cpu_cap(c); +	fpu_detect(c);  	if (this_cpu->c_early_init)  		this_cpu->c_early_init(c); -#ifdef CONFIG_SMP  	c->cpu_index = 0; -#endif  	filter_cpuid_features(c, false); + +	if (this_cpu->c_bsp_init) +		this_cpu->c_bsp_init(c); + +	setup_force_cpu_cap(X86_FEATURE_ALWAYS);  }  void __init early_cpu_init(void) @@ -675,7 +742,7 @@ void __init early_cpu_init(void)  	const struct cpu_dev *const *cdev;  	int count = 0; -#ifdef PROCESSOR_SELECT +#ifdef CONFIG_PROCESSOR_SELECT  	printk(KERN_INFO "KERNEL supported cpus:\n");  #endif @@ -687,7 +754,7 @@ void __init early_cpu_init(void)  		cpu_devs[count] = cpudev;  		count++; -#ifdef PROCESSOR_SELECT +#ifdef CONFIG_PROCESSOR_SELECT  		{  			unsigned int j; @@ -712,7 +779,7 @@ void __init early_cpu_init(void)   * unless we can find a reliable way to detect all the broken cases.   * Enable it explicitly on 64-bit for non-constant inputs of cpu_has().   */ -static void __cpuinit detect_nopl(struct cpuinfo_x86 *c) +static void detect_nopl(struct cpuinfo_x86 *c)  {  #ifdef CONFIG_X86_32  	clear_cpu_cap(c, X86_FEATURE_NOPL); @@ -721,7 +788,7 @@ static void __cpuinit detect_nopl(struct cpuinfo_x86 *c)  #endif  } -static void __cpuinit generic_identify(struct cpuinfo_x86 *c) +static void generic_identify(struct cpuinfo_x86 *c)  {  	c->extended_cpuid_level = 0; @@ -747,10 +814,7 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 *c)  		c->apicid = c->initial_apicid;  # endif  #endif - -#ifdef CONFIG_X86_HT  		c->phys_proc_id = c->initial_apicid; -#endif  	}  	get_model_name(c); /* Default name */ @@ -761,7 +825,7 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 *c)  /*   * This does the hard work of actually picking apart the CPU stuff...   */ -static void __cpuinit identify_cpu(struct cpuinfo_x86 *c) +static void identify_cpu(struct cpuinfo_x86 *c)  {  	int i; @@ -817,6 +881,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)  	/* Disable the PN if appropriate */  	squash_the_stupid_serial_number(c); +	/* Set up SMEP/SMAP */ +	setup_smep(c); +	setup_smap(c); +  	/*  	 * The vendor-specific functions might have changed features.  	 * Now we do "generic changes." @@ -842,6 +910,7 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)  #endif  	init_hypervisor(c); +	x86_init_rdrand(c);  	/*  	 * Clear/Set all flags overriden by options, need do it @@ -862,6 +931,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)  		/* AND the already accumulated flags with these */  		for (i = 0; i < NCAPINTS; i++)  			boot_cpu_data.x86_capability[i] &= c->x86_capability[i]; + +		/* OR, i.e. replicate the bug flags */ +		for (i = NCAPINTS; i < NCAPINTS + NBUGINTS; i++) +			c->x86_capability[i] |= boot_cpu_data.x86_capability[i];  	}  	/* Init Machine Check Exception if available. */ @@ -869,7 +942,7 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)  	select_idle_routine(c); -#if defined(CONFIG_NUMA) && defined(CONFIG_X86_64) +#ifdef CONFIG_NUMA  	numa_add_cpu(smp_processor_id());  #endif  } @@ -882,22 +955,54 @@ static void vgetcpu_set_mode(void)  	else  		vgetcpu_mode = VGETCPU_LSL;  } + +/* May not be __init: called during resume */ +static void syscall32_cpu_init(void) +{ +	/* Load these always in case some future AMD CPU supports +	   SYSENTER from compat mode too. */ +	wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS); +	wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL); +	wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)ia32_sysenter_target); + +	wrmsrl(MSR_CSTAR, ia32_cstar_target); +} +#endif + +#ifdef CONFIG_X86_32 +void enable_sep_cpu(void) +{ +	int cpu = get_cpu(); +	struct tss_struct *tss = &per_cpu(init_tss, cpu); + +	if (!boot_cpu_has(X86_FEATURE_SEP)) { +		put_cpu(); +		return; +	} + +	tss->x86_tss.ss1 = __KERNEL_CS; +	tss->x86_tss.sp1 = sizeof(struct tss_struct) + (unsigned long) tss; +	wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0); +	wrmsr(MSR_IA32_SYSENTER_ESP, tss->x86_tss.sp1, 0); +	wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long) ia32_sysenter_target, 0); +	put_cpu(); +}  #endif  void __init identify_boot_cpu(void)  {  	identify_cpu(&boot_cpu_data); -	init_c1e_mask(); +	init_amd_e400_c1e_mask();  #ifdef CONFIG_X86_32  	sysenter_setup();  	enable_sep_cpu();  #else  	vgetcpu_set_mode();  #endif -	init_hw_perf_events(); +	cpu_detect_tlb(&boot_cpu_data);  } -void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c) +void identify_secondary_cpu(struct cpuinfo_x86 *c)  {  	BUG_ON(c == &boot_cpu_data);  	identify_cpu(c); @@ -912,14 +1017,14 @@ struct msr_range {  	unsigned	max;  }; -static const struct msr_range msr_range_array[] __cpuinitconst = { +static const struct msr_range msr_range_array[] = {  	{ 0x00000000, 0x00000418},  	{ 0xc0000000, 0xc000040b},  	{ 0xc0010000, 0xc0010142},  	{ 0xc0011000, 0xc001103b},  }; -static void __cpuinit print_cpu_msr(void) +static void __print_cpu_msr(void)  {  	unsigned index_min, index_max;  	unsigned index; @@ -931,14 +1036,14 @@ static void __cpuinit print_cpu_msr(void)  		index_max = msr_range_array[i].max;  		for (index = index_min; index < index_max; index++) { -			if (rdmsrl_amd_safe(index, &val)) +			if (rdmsrl_safe(index, &val))  				continue;  			printk(KERN_INFO " MSR%08x: %016llx\n", index, val);  		}  	}  } -static int show_msr __cpuinitdata; +static int show_msr;  static __init int setup_show_msr(char *arg)  { @@ -954,12 +1059,13 @@ __setup("show_msr=", setup_show_msr);  static __init int setup_noclflush(char *arg)  { -	setup_clear_cpu_cap(X86_FEATURE_CLFLSH); +	setup_clear_cpu_cap(X86_FEATURE_CLFLUSH); +	setup_clear_cpu_cap(X86_FEATURE_CLFLUSHOPT);  	return 1;  }  __setup("noclflush", setup_noclflush); -void __cpuinit print_cpu_info(struct cpuinfo_x86 *c) +void print_cpu_info(struct cpuinfo_x86 *c)  {  	const char *vendor = NULL; @@ -974,22 +1080,24 @@ void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)  		printk(KERN_CONT "%s ", vendor);  	if (c->x86_model_id[0]) -		printk(KERN_CONT "%s", c->x86_model_id); +		printk(KERN_CONT "%s", strim(c->x86_model_id));  	else  		printk(KERN_CONT "%d86", c->x86); +	printk(KERN_CONT " (fam: %02x, model: %02x", c->x86, c->x86_model); +  	if (c->x86_mask || c->cpuid_level >= 0) -		printk(KERN_CONT " stepping %02x\n", c->x86_mask); +		printk(KERN_CONT ", stepping: %02x)\n", c->x86_mask);  	else -		printk(KERN_CONT "\n"); +		printk(KERN_CONT ")\n"); -#ifdef CONFIG_SMP +	print_cpu_msr(c); +} + +void print_cpu_msr(struct cpuinfo_x86 *c) +{  	if (c->cpu_index < show_msr) -		print_cpu_msr(); -#else -	if (show_msr) -		print_cpu_msr(); -#endif +		__print_cpu_msr();  }  static __init int setup_disablecpuid(char *arg) @@ -1005,11 +1113,17 @@ static __init int setup_disablecpuid(char *arg)  }  __setup("clearcpuid=", setup_disablecpuid); +DEFINE_PER_CPU(unsigned long, kernel_stack) = +	(unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE; +EXPORT_PER_CPU_SYMBOL(kernel_stack); +  #ifdef CONFIG_X86_64  struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table }; +struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1, +				    (unsigned long) debug_idt_table };  DEFINE_PER_CPU_FIRST(union irq_stack_union, -		     irq_stack_union) __aligned(PAGE_SIZE); +		     irq_stack_union) __aligned(PAGE_SIZE) __visible;  /*   * The following four percpu variables are hot.  Align current_task to @@ -1019,14 +1133,15 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =  	&init_task;  EXPORT_PER_CPU_SYMBOL(current_task); -DEFINE_PER_CPU(unsigned long, kernel_stack) = -	(unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE; -EXPORT_PER_CPU_SYMBOL(kernel_stack); -  DEFINE_PER_CPU(char *, irq_stack_ptr) =  	init_per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE - 64; -DEFINE_PER_CPU(unsigned int, irq_count) = -1; +DEFINE_PER_CPU(unsigned int, irq_count) __visible = -1; + +DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT; +EXPORT_PER_CPU_SYMBOL(__preempt_count); + +DEFINE_PER_CPU(struct task_struct *, fpu_owner_task);  /*   * Special IST stacks which the CPU switches to when it calls @@ -1060,35 +1175,57 @@ void syscall_init(void)  	/* Flags to clear on syscall */  	wrmsrl(MSR_SYSCALL_MASK, -	       X86_EFLAGS_TF|X86_EFLAGS_DF|X86_EFLAGS_IF|X86_EFLAGS_IOPL); +	       X86_EFLAGS_TF|X86_EFLAGS_DF|X86_EFLAGS_IF| +	       X86_EFLAGS_IOPL|X86_EFLAGS_AC);  } -unsigned long kernel_eflags; -  /*   * Copies of the original ist values from the tss are only accessed during   * debugging, no special alignment required.   */  DEFINE_PER_CPU(struct orig_ist, orig_ist); +static DEFINE_PER_CPU(unsigned long, debug_stack_addr); +DEFINE_PER_CPU(int, debug_stack_usage); + +int is_debug_stack(unsigned long addr) +{ +	return __get_cpu_var(debug_stack_usage) || +		(addr <= __get_cpu_var(debug_stack_addr) && +		 addr > (__get_cpu_var(debug_stack_addr) - DEBUG_STKSZ)); +} +NOKPROBE_SYMBOL(is_debug_stack); + +DEFINE_PER_CPU(u32, debug_idt_ctr); + +void debug_stack_set_zero(void) +{ +	this_cpu_inc(debug_idt_ctr); +	load_current_idt(); +} +NOKPROBE_SYMBOL(debug_stack_set_zero); + +void debug_stack_reset(void) +{ +	if (WARN_ON(!this_cpu_read(debug_idt_ctr))) +		return; +	if (this_cpu_dec_return(debug_idt_ctr) == 0) +		load_current_idt(); +} +NOKPROBE_SYMBOL(debug_stack_reset); +  #else	/* CONFIG_X86_64 */  DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task;  EXPORT_PER_CPU_SYMBOL(current_task); +DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT; +EXPORT_PER_CPU_SYMBOL(__preempt_count); +DEFINE_PER_CPU(struct task_struct *, fpu_owner_task);  #ifdef CONFIG_CC_STACKPROTECTOR  DEFINE_PER_CPU_ALIGNED(struct stack_canary, stack_canary);  #endif -/* Make sure %fs and %gs are initialized properly in idle threads */ -struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs) -{ -	memset(regs, 0, sizeof(struct pt_regs)); -	regs->fs = __KERNEL_PERCPU; -	regs->gs = __KERNEL_STACK_CANARY; - -	return regs; -}  #endif	/* CONFIG_X86_64 */  /* @@ -1130,7 +1267,7 @@ static void dbg_restore_debug_regs(void)   */  #ifdef CONFIG_X86_64 -void __cpuinit cpu_init(void) +void cpu_init(void)  {  	struct orig_ist *oist;  	struct task_struct *me; @@ -1139,12 +1276,18 @@ void __cpuinit cpu_init(void)  	int cpu;  	int i; +	/* +	 * Load microcode on this cpu if a valid microcode is available. +	 * This is early microcode loading procedure. +	 */ +	load_ucode_ap(); +  	cpu = stack_smp_processor_id();  	t = &per_cpu(init_tss, cpu);  	oist = &per_cpu(orig_ist, cpu);  #ifdef CONFIG_NUMA -	if (cpu != 0 && percpu_read(numa_node) == 0 && +	if (this_cpu_read(numa_node) == 0 &&  	    early_cpu_to_node(cpu) != NUMA_NO_NODE)  		set_numa_node(early_cpu_to_node(cpu));  #endif @@ -1166,7 +1309,7 @@ void __cpuinit cpu_init(void)  	switch_to_new_gdt(cpu);  	loadsegment(fs, 0); -	load_idt((const struct desc_ptr *)&idt_descr); +	load_current_idt();  	memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);  	syscall_init(); @@ -1176,8 +1319,7 @@ void __cpuinit cpu_init(void)  	barrier();  	x86_configure_nx(); -	if (cpu != 0) -		enable_x2apic(); +	enable_x2apic();  	/*  	 * set up and load the per-CPU TSS @@ -1189,6 +1331,8 @@ void __cpuinit cpu_init(void)  			estacks += exception_stack_sizes[v];  			oist->ist[v] = t->x86_tss.ist[v] =  					(unsigned long)estacks; +			if (v == DEBUG_STACK-1) +				per_cpu(debug_stack_addr, cpu) = (unsigned long)estacks;  		}  	} @@ -1215,9 +1359,6 @@ void __cpuinit cpu_init(void)  	dbg_restore_debug_regs();  	fpu_init(); -	xsave_init(); - -	raw_local_save_flags(kernel_eflags);  	if (is_uv_system())  		uv_cpu_init(); @@ -1225,13 +1366,15 @@ void __cpuinit cpu_init(void)  #else -void __cpuinit cpu_init(void) +void cpu_init(void)  {  	int cpu = smp_processor_id();  	struct task_struct *curr = current;  	struct tss_struct *t = &per_cpu(init_tss, cpu);  	struct thread_struct *thread = &curr->thread; +	show_ucode_info_early(); +  	if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {  		printk(KERN_WARNING "CPU#%d already initialized!\n", cpu);  		for (;;) @@ -1243,7 +1386,7 @@ void __cpuinit cpu_init(void)  	if (cpu_has_vme || cpu_has_tsc || cpu_has_de)  		clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); -	load_idt(&idt_descr); +	load_current_idt();  	switch_to_new_gdt(cpu);  	/* @@ -1270,6 +1413,19 @@ void __cpuinit cpu_init(void)  	dbg_restore_debug_regs();  	fpu_init(); -	xsave_init();  }  #endif + +#ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS +void warn_pre_alternatives(void) +{ +	WARN(1, "You're using static_cpu_has before alternatives have run!\n"); +} +EXPORT_SYMBOL_GPL(warn_pre_alternatives); +#endif + +inline bool __static_cpu_has_safe(u16 bit) +{ +	return boot_cpu_has(bit); +} +EXPORT_SYMBOL_GPL(__static_cpu_has_safe);  | 
