diff options
Diffstat (limited to 'arch/x86/kvm/x86.c')
| -rw-r--r-- | arch/x86/kvm/x86.c | 4597 | 
1 files changed, 3031 insertions, 1566 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index cdac9e592aa..ef432f891d3 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -26,6 +26,7 @@  #include "tss.h"  #include "kvm_cache_regs.h"  #include "x86.h" +#include "cpuid.h"  #include <linux/clocksource.h>  #include <linux/interrupt.h> @@ -43,6 +44,10 @@  #include <linux/slab.h>  #include <linux/perf_event.h>  #include <linux/uaccess.h> +#include <linux/hash.h> +#include <linux/pci.h> +#include <linux/timekeeper_internal.h> +#include <linux/pvclock_gtod.h>  #include <trace/events/kvm.h>  #define CREATE_TRACE_POINTS @@ -54,49 +59,54 @@  #include <asm/mtrr.h>  #include <asm/mce.h>  #include <asm/i387.h> +#include <asm/fpu-internal.h> /* Ugh! */  #include <asm/xcr.h>  #include <asm/pvclock.h>  #include <asm/div64.h>  #define MAX_IO_MSRS 256 -#define CR0_RESERVED_BITS						\ -	(~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \ -			  | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \ -			  | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG)) -#define CR4_RESERVED_BITS						\ -	(~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\ -			  | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE	\ -			  | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR	\ -			  | X86_CR4_OSXSAVE \ -			  | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE)) - -#define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR) -  #define KVM_MAX_MCE_BANKS 32  #define KVM_MCE_CAP_SUPPORTED (MCG_CTL_P | MCG_SER_P) +#define emul_to_vcpu(ctxt) \ +	container_of(ctxt, struct kvm_vcpu, arch.emulate_ctxt) +  /* EFER defaults:   * - enable syscall per default because its emulated by KVM   * - enable LME and LMA per default on 64 bit KVM   */  #ifdef CONFIG_X86_64 -static u64 __read_mostly efer_reserved_bits = 0xfffffffffffffafeULL; +static +u64 __read_mostly efer_reserved_bits = ~((u64)(EFER_SCE | EFER_LME | EFER_LMA));  #else -static u64 __read_mostly efer_reserved_bits = 0xfffffffffffffffeULL; +static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE);  #endif  #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM  #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU  static void update_cr8_intercept(struct kvm_vcpu *vcpu); -static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid, -				    struct kvm_cpuid_entry2 __user *entries); +static void process_nmi(struct kvm_vcpu *vcpu);  struct kvm_x86_ops *kvm_x86_ops;  EXPORT_SYMBOL_GPL(kvm_x86_ops); -int ignore_msrs = 0; -module_param_named(ignore_msrs, ignore_msrs, bool, S_IRUGO | S_IWUSR); +static bool ignore_msrs = 0; +module_param(ignore_msrs, bool, S_IRUGO | S_IWUSR); + +unsigned int min_timer_period_us = 500; +module_param(min_timer_period_us, uint, S_IRUGO | S_IWUSR); + +bool kvm_has_tsc_control; +EXPORT_SYMBOL_GPL(kvm_has_tsc_control); +u32  kvm_max_guest_tsc_khz; +EXPORT_SYMBOL_GPL(kvm_max_guest_tsc_khz); + +/* tsc tolerance in parts per million - default to 1/2 of the NTP threshold */ +static u32 tsc_tolerance_ppm = 250; +module_param(tsc_tolerance_ppm, uint, S_IRUGO | S_IWUSR); + +static bool backwards_tsc_observed = false;  #define KVM_NR_SHARED_MSRS 16 @@ -115,7 +125,7 @@ struct kvm_shared_msrs {  };  static struct kvm_shared_msrs_global __read_mostly shared_msrs_global; -static DEFINE_PER_CPU(struct kvm_shared_msrs, shared_msrs); +static struct kvm_shared_msrs __percpu *shared_msrs;  struct kvm_stats_debugfs_item debugfs_entries[] = {  	{ "pf_fixed", VCPU_STAT(pf_fixed) }, @@ -155,9 +165,13 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {  u64 __read_mostly host_xcr0; -static inline u32 bit(int bitno) +static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt); + +static inline void kvm_async_pf_hash_reset(struct kvm_vcpu *vcpu)  { -	return 1 << (bitno & 31); +	int i; +	for (i = 0; i < roundup_pow_of_two(ASYNC_PF_PER_VCPU); i++) +		vcpu->arch.apf.gfns[i] = ~0;  }  static void kvm_on_user_return(struct user_return_notifier *urn) @@ -180,10 +194,10 @@ static void kvm_on_user_return(struct user_return_notifier *urn)  static void shared_msr_update(unsigned slot, u32 msr)  { -	struct kvm_shared_msrs *smsr;  	u64 value; +	unsigned int cpu = smp_processor_id(); +	struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu); -	smsr = &__get_cpu_var(shared_msrs);  	/* only read, and nobody should modify it at this time,  	 * so don't need lock */  	if (slot >= shared_msrs_global.nr) { @@ -215,7 +229,8 @@ static void kvm_shared_msr_cpu_online(void)  void kvm_set_shared_msr(unsigned slot, u64 value, u64 mask)  { -	struct kvm_shared_msrs *smsr = &__get_cpu_var(shared_msrs); +	unsigned int cpu = smp_processor_id(); +	struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);  	if (((value ^ smsr->values[slot].curr) & mask) == 0)  		return; @@ -231,7 +246,8 @@ EXPORT_SYMBOL_GPL(kvm_set_shared_msr);  static void drop_user_return_notifiers(void *ignore)  { -	struct kvm_shared_msrs *smsr = &__get_cpu_var(shared_msrs); +	unsigned int cpu = smp_processor_id(); +	struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);  	if (smsr->registered)  		kvm_on_user_return(&smsr->urn); @@ -239,23 +255,40 @@ static void drop_user_return_notifiers(void *ignore)  u64 kvm_get_apic_base(struct kvm_vcpu *vcpu)  { -	if (irqchip_in_kernel(vcpu->kvm)) -		return vcpu->arch.apic_base; -	else -		return vcpu->arch.apic_base; +	return vcpu->arch.apic_base;  }  EXPORT_SYMBOL_GPL(kvm_get_apic_base); -void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data) -{ -	/* TODO: reserve bits check */ -	if (irqchip_in_kernel(vcpu->kvm)) -		kvm_lapic_set_base(vcpu, data); -	else -		vcpu->arch.apic_base = data; +int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info) +{ +	u64 old_state = vcpu->arch.apic_base & +		(MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE); +	u64 new_state = msr_info->data & +		(MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE); +	u64 reserved_bits = ((~0ULL) << cpuid_maxphyaddr(vcpu)) | +		0x2ff | (guest_cpuid_has_x2apic(vcpu) ? 0 : X2APIC_ENABLE); + +	if (!msr_info->host_initiated && +	    ((msr_info->data & reserved_bits) != 0 || +	     new_state == X2APIC_ENABLE || +	     (new_state == MSR_IA32_APICBASE_ENABLE && +	      old_state == (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE)) || +	     (new_state == (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE) && +	      old_state == 0))) +		return 1; + +	kvm_lapic_set_base(vcpu, msr_info->data); +	return 0;  }  EXPORT_SYMBOL_GPL(kvm_set_apic_base); +asmlinkage __visible void kvm_spurious_fault(void) +{ +	/* Fault while not rebooting.  We want the trace. */ +	BUG(); +} +EXPORT_SYMBOL_GPL(kvm_spurious_fault); +  #define EXCPT_BENIGN		0  #define EXCPT_CONTRIBUTORY	1  #define EXCPT_PF		2 @@ -331,29 +364,35 @@ void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr)  }  EXPORT_SYMBOL_GPL(kvm_requeue_exception); -void kvm_inject_page_fault(struct kvm_vcpu *vcpu) +void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err)  { -	unsigned error_code = vcpu->arch.fault.error_code; +	if (err) +		kvm_inject_gp(vcpu, 0); +	else +		kvm_x86_ops->skip_emulated_instruction(vcpu); +} +EXPORT_SYMBOL_GPL(kvm_complete_insn_gp); +void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault) +{  	++vcpu->stat.pf_guest; -	vcpu->arch.cr2 = vcpu->arch.fault.address; -	kvm_queue_exception_e(vcpu, PF_VECTOR, error_code); +	vcpu->arch.cr2 = fault->address; +	kvm_queue_exception_e(vcpu, PF_VECTOR, fault->error_code);  } +EXPORT_SYMBOL_GPL(kvm_inject_page_fault); -void kvm_propagate_fault(struct kvm_vcpu *vcpu) +void kvm_propagate_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)  { -	if (mmu_is_nested(vcpu) && !vcpu->arch.fault.nested) -		vcpu->arch.nested_mmu.inject_page_fault(vcpu); +	if (mmu_is_nested(vcpu) && !fault->nested_page_fault) +		vcpu->arch.nested_mmu.inject_page_fault(vcpu, fault);  	else -		vcpu->arch.mmu.inject_page_fault(vcpu); - -	vcpu->arch.fault.nested = false; +		vcpu->arch.mmu.inject_page_fault(vcpu, fault);  }  void kvm_inject_nmi(struct kvm_vcpu *vcpu)  { -	kvm_make_request(KVM_REQ_EVENT, vcpu); -	vcpu->arch.nmi_pending = 1; +	atomic_inc(&vcpu->arch.nmi_queued); +	kvm_make_request(KVM_REQ_NMI, vcpu);  }  EXPORT_SYMBOL_GPL(kvm_inject_nmi); @@ -465,8 +504,8 @@ static bool pdptrs_changed(struct kvm_vcpu *vcpu)  		      (unsigned long *)&vcpu->arch.regs_avail))  		return true; -	gfn = (vcpu->arch.cr3 & ~31u) >> PAGE_SHIFT; -	offset = (vcpu->arch.cr3 & ~31u) & (PAGE_SIZE - 1); +	gfn = (kvm_read_cr3(vcpu) & ~31u) >> PAGE_SHIFT; +	offset = (kvm_read_cr3(vcpu) & ~31u) & (PAGE_SIZE - 1);  	r = kvm_read_nested_guest_page(vcpu, gfn, pdpte, offset, sizeof(pdpte),  				       PFERR_USER_MASK | PFERR_WRITE_MASK);  	if (r < 0) @@ -511,12 +550,20 @@ int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)  		} else  #endif  		if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, -						 vcpu->arch.cr3)) +						 kvm_read_cr3(vcpu)))  			return 1;  	} +	if (!(cr0 & X86_CR0_PG) && kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE)) +		return 1; +  	kvm_x86_ops->set_cr0(vcpu, cr0); +	if ((cr0 ^ old_cr0) & X86_CR0_PG) { +		kvm_clear_async_pf_completion_queue(vcpu); +		kvm_async_pf_hash_reset(vcpu); +	} +  	if ((cr0 ^ old_cr0) & update_bits)  		kvm_mmu_reset_context(vcpu);  	return 0; @@ -529,30 +576,63 @@ void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)  }  EXPORT_SYMBOL_GPL(kvm_lmsw); +static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu) +{ +	if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) && +			!vcpu->guest_xcr0_loaded) { +		/* kvm_set_xcr() also depends on this */ +		xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0); +		vcpu->guest_xcr0_loaded = 1; +	} +} + +static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu) +{ +	if (vcpu->guest_xcr0_loaded) { +		if (vcpu->arch.xcr0 != host_xcr0) +			xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0); +		vcpu->guest_xcr0_loaded = 0; +	} +} +  int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)  { -	u64 xcr0; +	u64 xcr0 = xcr; +	u64 old_xcr0 = vcpu->arch.xcr0; +	u64 valid_bits;  	/* Only support XCR_XFEATURE_ENABLED_MASK(xcr0) now  */  	if (index != XCR_XFEATURE_ENABLED_MASK)  		return 1; -	xcr0 = xcr; -	if (kvm_x86_ops->get_cpl(vcpu) != 0) -		return 1;  	if (!(xcr0 & XSTATE_FP))  		return 1;  	if ((xcr0 & XSTATE_YMM) && !(xcr0 & XSTATE_SSE))  		return 1; -	if (xcr0 & ~host_xcr0) + +	/* +	 * Do not allow the guest to set bits that we do not support +	 * saving.  However, xcr0 bit 0 is always set, even if the +	 * emulated CPU does not support XSAVE (see fx_init). +	 */ +	valid_bits = vcpu->arch.guest_supported_xcr0 | XSTATE_FP; +	if (xcr0 & ~valid_bits) +		return 1; + +	if ((!(xcr0 & XSTATE_BNDREGS)) != (!(xcr0 & XSTATE_BNDCSR)))  		return 1; + +	kvm_put_guest_xcr0(vcpu);  	vcpu->arch.xcr0 = xcr0; -	vcpu->guest_xcr0_loaded = 0; + +	if ((xcr0 ^ old_xcr0) & XSTATE_EXTEND_MASK) +		kvm_update_cpuid(vcpu);  	return 0;  }  int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)  { -	if (__kvm_set_xcr(vcpu, index, xcr)) { +	if (kvm_x86_ops->get_cpl(vcpu) != 0 || +	    __kvm_set_xcr(vcpu, index, xcr)) {  		kvm_inject_gp(vcpu, 0);  		return 1;  	} @@ -560,59 +640,56 @@ int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)  }  EXPORT_SYMBOL_GPL(kvm_set_xcr); -static bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu) -{ -	struct kvm_cpuid_entry2 *best; - -	best = kvm_find_cpuid_entry(vcpu, 1, 0); -	return best && (best->ecx & bit(X86_FEATURE_XSAVE)); -} - -static void update_cpuid(struct kvm_vcpu *vcpu) -{ -	struct kvm_cpuid_entry2 *best; - -	best = kvm_find_cpuid_entry(vcpu, 1, 0); -	if (!best) -		return; - -	/* Update OSXSAVE bit */ -	if (cpu_has_xsave && best->function == 0x1) { -		best->ecx &= ~(bit(X86_FEATURE_OSXSAVE)); -		if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE)) -			best->ecx |= bit(X86_FEATURE_OSXSAVE); -	} -} -  int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)  {  	unsigned long old_cr4 = kvm_read_cr4(vcpu); -	unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE; - +	unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | +				   X86_CR4_PAE | X86_CR4_SMEP;  	if (cr4 & CR4_RESERVED_BITS)  		return 1;  	if (!guest_cpuid_has_xsave(vcpu) && (cr4 & X86_CR4_OSXSAVE))  		return 1; +	if (!guest_cpuid_has_smep(vcpu) && (cr4 & X86_CR4_SMEP)) +		return 1; + +	if (!guest_cpuid_has_smap(vcpu) && (cr4 & X86_CR4_SMAP)) +		return 1; + +	if (!guest_cpuid_has_fsgsbase(vcpu) && (cr4 & X86_CR4_FSGSBASE)) +		return 1; +  	if (is_long_mode(vcpu)) {  		if (!(cr4 & X86_CR4_PAE))  			return 1;  	} else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)  		   && ((cr4 ^ old_cr4) & pdptr_bits) -		   && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, vcpu->arch.cr3)) +		   && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, +				   kvm_read_cr3(vcpu)))  		return 1; -	if (cr4 & X86_CR4_VMXE) -		return 1; +	if ((cr4 & X86_CR4_PCIDE) && !(old_cr4 & X86_CR4_PCIDE)) { +		if (!guest_cpuid_has_pcid(vcpu)) +			return 1; + +		/* PCID can not be enabled when cr3[11:0]!=000H or EFER.LMA=0 */ +		if ((kvm_read_cr3(vcpu) & X86_CR3_PCID_MASK) || !is_long_mode(vcpu)) +			return 1; +	} -	kvm_x86_ops->set_cr4(vcpu, cr4); +	if (kvm_x86_ops->set_cr4(vcpu, cr4)) +		return 1; -	if ((cr4 ^ old_cr4) & pdptr_bits) +	if (((cr4 ^ old_cr4) & pdptr_bits) || +	    (!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE)))  		kvm_mmu_reset_context(vcpu); +	if ((cr4 ^ old_cr4) & X86_CR4_SMAP) +		update_permission_bitmask(vcpu, vcpu->arch.walk_mmu, false); +  	if ((cr4 ^ old_cr4) & X86_CR4_OSXSAVE) -		update_cpuid(vcpu); +		kvm_update_cpuid(vcpu);  	return 0;  } @@ -620,7 +697,7 @@ EXPORT_SYMBOL_GPL(kvm_set_cr4);  int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)  { -	if (cr3 == vcpu->arch.cr3 && !pdptrs_changed(vcpu)) { +	if (cr3 == kvm_read_cr3(vcpu) && !pdptrs_changed(vcpu)) {  		kvm_mmu_sync_roots(vcpu);  		kvm_mmu_flush_tlb(vcpu);  		return 0; @@ -629,38 +706,18 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)  	if (is_long_mode(vcpu)) {  		if (cr3 & CR3_L_MODE_RESERVED_BITS)  			return 1; -	} else { -		if (is_pae(vcpu)) { -			if (cr3 & CR3_PAE_RESERVED_BITS) -				return 1; -			if (is_paging(vcpu) && -			    !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3)) -				return 1; -		} -		/* -		 * We don't check reserved bits in nonpae mode, because -		 * this isn't enforced, and VMware depends on this. -		 */ -	} - -	/* -	 * Does the new cr3 value map to physical memory? (Note, we -	 * catch an invalid cr3 even in real-mode, because it would -	 * cause trouble later on when we turn on paging anyway.) -	 * -	 * A real CPU would silently accept an invalid cr3 and would -	 * attempt to use it - with largely undefined (and often hard -	 * to debug) behavior on the guest side. -	 */ -	if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT))) +	} else if (is_pae(vcpu) && is_paging(vcpu) && +		   !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))  		return 1; +  	vcpu->arch.cr3 = cr3; -	vcpu->arch.mmu.new_cr3(vcpu); +	__set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail); +	kvm_mmu_new_cr3(vcpu);  	return 0;  }  EXPORT_SYMBOL_GPL(kvm_set_cr3); -int __kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8) +int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)  {  	if (cr8 & CR8_RESERVED_BITS)  		return 1; @@ -670,12 +727,6 @@ int __kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)  		vcpu->arch.cr8 = cr8;  	return 0;  } - -void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8) -{ -	if (__kvm_set_cr8(vcpu, cr8)) -		kvm_inject_gp(vcpu, 0); -}  EXPORT_SYMBOL_GPL(kvm_set_cr8);  unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu) @@ -687,6 +738,26 @@ unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)  }  EXPORT_SYMBOL_GPL(kvm_get_cr8); +static void kvm_update_dr6(struct kvm_vcpu *vcpu) +{ +	if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) +		kvm_x86_ops->set_dr6(vcpu, vcpu->arch.dr6); +} + +static void kvm_update_dr7(struct kvm_vcpu *vcpu) +{ +	unsigned long dr7; + +	if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) +		dr7 = vcpu->arch.guest_debug_dr7; +	else +		dr7 = vcpu->arch.dr7; +	kvm_x86_ops->set_dr7(vcpu, dr7); +	vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_BP_ENABLED; +	if (dr7 & DR7_BP_EN_MASK) +		vcpu->arch.switch_db_regs |= KVM_DEBUGREG_BP_ENABLED; +} +  static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)  {  	switch (dr) { @@ -703,6 +774,7 @@ static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)  		if (val & 0xffffffff00000000ULL)  			return -1; /* #GP */  		vcpu->arch.dr6 = (val & DR6_VOLATILE) | DR6_FIXED_1; +		kvm_update_dr6(vcpu);  		break;  	case 5:  		if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) @@ -712,10 +784,7 @@ static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)  		if (val & 0xffffffff00000000ULL)  			return -1; /* #GP */  		vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1; -		if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) { -			kvm_x86_ops->set_dr7(vcpu, vcpu->arch.dr7); -			vcpu->arch.switch_db_regs = (val & DR7_BP_EN_MASK); -		} +		kvm_update_dr7(vcpu);  		break;  	} @@ -747,7 +816,10 @@ static int _kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)  			return 1;  		/* fall through */  	case 6: -		*val = vcpu->arch.dr6; +		if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) +			*val = vcpu->arch.dr6; +		else +			*val = kvm_x86_ops->get_dr6(vcpu);  		break;  	case 5:  		if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) @@ -771,6 +843,21 @@ int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)  }  EXPORT_SYMBOL_GPL(kvm_get_dr); +bool kvm_rdpmc(struct kvm_vcpu *vcpu) +{ +	u32 ecx = kvm_register_read(vcpu, VCPU_REGS_RCX); +	u64 data; +	int err; + +	err = kvm_pmu_read_pmc(vcpu, ecx, &data); +	if (err) +		return err; +	kvm_register_write(vcpu, VCPU_REGS_RAX, (u32)data); +	kvm_register_write(vcpu, VCPU_REGS_RDX, data >> 32); +	return err; +} +EXPORT_SYMBOL_GPL(kvm_rdpmc); +  /*   * List of msr numbers which we expose to userspace through KVM_GET_MSRS   * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST. @@ -780,45 +867,44 @@ EXPORT_SYMBOL_GPL(kvm_get_dr);   * kvm-specific. Those are put in the beginning of the list.   */ -#define KVM_SAVE_MSRS_BEGIN	7 +#define KVM_SAVE_MSRS_BEGIN	12  static u32 msrs_to_save[] = {  	MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,  	MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW,  	HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL, -	HV_X64_MSR_APIC_ASSIST_PAGE, +	HV_X64_MSR_TIME_REF_COUNT, HV_X64_MSR_REFERENCE_TSC, +	HV_X64_MSR_APIC_ASSIST_PAGE, MSR_KVM_ASYNC_PF_EN, MSR_KVM_STEAL_TIME, +	MSR_KVM_PV_EOI_EN,  	MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,  	MSR_STAR,  #ifdef CONFIG_X86_64  	MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,  #endif -	MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA +	MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA, +	MSR_IA32_FEATURE_CONTROL, MSR_IA32_BNDCFGS  };  static unsigned num_msrs_to_save; -static u32 emulated_msrs[] = { +static const u32 emulated_msrs[] = { +	MSR_IA32_TSC_ADJUST, +	MSR_IA32_TSCDEADLINE,  	MSR_IA32_MISC_ENABLE,  	MSR_IA32_MCG_STATUS,  	MSR_IA32_MCG_CTL,  }; -static int set_efer(struct kvm_vcpu *vcpu, u64 efer) +bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)  { -	u64 old_efer = vcpu->arch.efer; -  	if (efer & efer_reserved_bits) -		return 1; - -	if (is_paging(vcpu) -	    && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) -		return 1; +		return false;  	if (efer & EFER_FFXSR) {  		struct kvm_cpuid_entry2 *feat;  		feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);  		if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT))) -			return 1; +			return false;  	}  	if (efer & EFER_SVME) { @@ -826,17 +912,29 @@ static int set_efer(struct kvm_vcpu *vcpu, u64 efer)  		feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);  		if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM))) -			return 1; +			return false;  	} +	return true; +} +EXPORT_SYMBOL_GPL(kvm_valid_efer); + +static int set_efer(struct kvm_vcpu *vcpu, u64 efer) +{ +	u64 old_efer = vcpu->arch.efer; + +	if (!kvm_valid_efer(vcpu, efer)) +		return 1; + +	if (is_paging(vcpu) +	    && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) +		return 1; +  	efer &= ~EFER_LMA;  	efer |= vcpu->arch.efer & EFER_LMA;  	kvm_x86_ops->set_efer(vcpu, efer); -	vcpu->arch.mmu.base_role.nxe = (efer & EFER_NX) && !tdp_enabled; -	kvm_mmu_reset_context(vcpu); -  	/* Update reserved bits */  	if ((efer ^ old_efer) & EFER_NX)  		kvm_mmu_reset_context(vcpu); @@ -856,9 +954,9 @@ EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);   * Returns 0 on success, non-0 otherwise.   * Assumes vcpu_load() was already called.   */ -int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) +int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)  { -	return kvm_x86_ops->set_msr(vcpu, msr_index, data); +	return kvm_x86_ops->set_msr(vcpu, msr);  }  /* @@ -866,8 +964,62 @@ int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)   */  static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)  { -	return kvm_set_msr(vcpu, index, *data); +	struct msr_data msr; + +	msr.data = *data; +	msr.index = index; +	msr.host_initiated = true; +	return kvm_set_msr(vcpu, &msr); +} + +#ifdef CONFIG_X86_64 +struct pvclock_gtod_data { +	seqcount_t	seq; + +	struct { /* extract of a clocksource struct */ +		int vclock_mode; +		cycle_t	cycle_last; +		cycle_t	mask; +		u32	mult; +		u32	shift; +	} clock; + +	/* open coded 'struct timespec' */ +	u64		monotonic_time_snsec; +	time_t		monotonic_time_sec; +}; + +static struct pvclock_gtod_data pvclock_gtod_data; + +static void update_pvclock_gtod(struct timekeeper *tk) +{ +	struct pvclock_gtod_data *vdata = &pvclock_gtod_data; + +	write_seqcount_begin(&vdata->seq); + +	/* copy pvclock gtod data */ +	vdata->clock.vclock_mode	= tk->clock->archdata.vclock_mode; +	vdata->clock.cycle_last		= tk->clock->cycle_last; +	vdata->clock.mask		= tk->clock->mask; +	vdata->clock.mult		= tk->mult; +	vdata->clock.shift		= tk->shift; + +	vdata->monotonic_time_sec	= tk->xtime_sec +					+ tk->wall_to_monotonic.tv_sec; +	vdata->monotonic_time_snsec	= tk->xtime_nsec +					+ (tk->wall_to_monotonic.tv_nsec +						<< tk->shift); +	while (vdata->monotonic_time_snsec >= +					(((u64)NSEC_PER_SEC) << tk->shift)) { +		vdata->monotonic_time_snsec -= +					((u64)NSEC_PER_SEC) << tk->shift; +		vdata->monotonic_time_sec++; +	} + +	write_seqcount_end(&vdata->seq);  } +#endif +  static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)  { @@ -898,6 +1050,10 @@ static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)  	 */  	getboottime(&boot); +	if (kvm->arch.kvmclock_offset) { +		struct timespec ts = ns_to_timespec(kvm->arch.kvmclock_offset); +		boot = timespec_sub(boot, ts); +	}  	wc.sec = boot.tv_sec;  	wc.nsec = boot.tv_nsec;  	wc.version = version; @@ -955,124 +1111,444 @@ static inline u64 get_kernel_ns(void)  {  	struct timespec ts; -	WARN_ON(preemptible());  	ktime_get_ts(&ts);  	monotonic_to_bootbased(&ts);  	return timespec_to_ns(&ts);  } +#ifdef CONFIG_X86_64 +static atomic_t kvm_guest_has_master_clock = ATOMIC_INIT(0); +#endif +  static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz);  unsigned long max_tsc_khz; -static inline int kvm_tsc_changes_freq(void) +static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec)  { -	int cpu = get_cpu(); -	int ret = !boot_cpu_has(X86_FEATURE_CONSTANT_TSC) && -		  cpufreq_quick_get(cpu) != 0; -	put_cpu(); -	return ret; +	return pvclock_scale_delta(nsec, vcpu->arch.virtual_tsc_mult, +				   vcpu->arch.virtual_tsc_shift);  } -static inline u64 nsec_to_cycles(u64 nsec) +static u32 adjust_tsc_khz(u32 khz, s32 ppm)  { -	u64 ret; - -	WARN_ON(preemptible()); -	if (kvm_tsc_changes_freq()) -		printk_once(KERN_WARNING -		 "kvm: unreliable cycle conversion on adjustable rate TSC\n"); -	ret = nsec * __get_cpu_var(cpu_tsc_khz); -	do_div(ret, USEC_PER_SEC); -	return ret; +	u64 v = (u64)khz * (1000000 + ppm); +	do_div(v, 1000000); +	return v;  } -static void kvm_arch_set_tsc_khz(struct kvm *kvm, u32 this_tsc_khz) +static void kvm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 this_tsc_khz)  { +	u32 thresh_lo, thresh_hi; +	int use_scaling = 0; + +	/* tsc_khz can be zero if TSC calibration fails */ +	if (this_tsc_khz == 0) +		return; +  	/* Compute a scale to convert nanoseconds in TSC cycles */  	kvm_get_time_scale(this_tsc_khz, NSEC_PER_SEC / 1000, -			   &kvm->arch.virtual_tsc_shift, -			   &kvm->arch.virtual_tsc_mult); -	kvm->arch.virtual_tsc_khz = this_tsc_khz; +			   &vcpu->arch.virtual_tsc_shift, +			   &vcpu->arch.virtual_tsc_mult); +	vcpu->arch.virtual_tsc_khz = this_tsc_khz; + +	/* +	 * Compute the variation in TSC rate which is acceptable +	 * within the range of tolerance and decide if the +	 * rate being applied is within that bounds of the hardware +	 * rate.  If so, no scaling or compensation need be done. +	 */ +	thresh_lo = adjust_tsc_khz(tsc_khz, -tsc_tolerance_ppm); +	thresh_hi = adjust_tsc_khz(tsc_khz, tsc_tolerance_ppm); +	if (this_tsc_khz < thresh_lo || this_tsc_khz > thresh_hi) { +		pr_debug("kvm: requested TSC rate %u falls outside tolerance [%u,%u]\n", this_tsc_khz, thresh_lo, thresh_hi); +		use_scaling = 1; +	} +	kvm_x86_ops->set_tsc_khz(vcpu, this_tsc_khz, use_scaling);  }  static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns)  { -	u64 tsc = pvclock_scale_delta(kernel_ns-vcpu->arch.last_tsc_nsec, -				      vcpu->kvm->arch.virtual_tsc_mult, -				      vcpu->kvm->arch.virtual_tsc_shift); -	tsc += vcpu->arch.last_tsc_write; +	u64 tsc = pvclock_scale_delta(kernel_ns-vcpu->arch.this_tsc_nsec, +				      vcpu->arch.virtual_tsc_mult, +				      vcpu->arch.virtual_tsc_shift); +	tsc += vcpu->arch.this_tsc_write;  	return tsc;  } -void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data) +void kvm_track_tsc_matching(struct kvm_vcpu *vcpu) +{ +#ifdef CONFIG_X86_64 +	bool vcpus_matched; +	bool do_request = false; +	struct kvm_arch *ka = &vcpu->kvm->arch; +	struct pvclock_gtod_data *gtod = &pvclock_gtod_data; + +	vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 == +			 atomic_read(&vcpu->kvm->online_vcpus)); + +	if (vcpus_matched && gtod->clock.vclock_mode == VCLOCK_TSC) +		if (!ka->use_master_clock) +			do_request = 1; + +	if (!vcpus_matched && ka->use_master_clock) +			do_request = 1; + +	if (do_request) +		kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu); + +	trace_kvm_track_tsc(vcpu->vcpu_id, ka->nr_vcpus_matched_tsc, +			    atomic_read(&vcpu->kvm->online_vcpus), +		            ka->use_master_clock, gtod->clock.vclock_mode); +#endif +} + +static void update_ia32_tsc_adjust_msr(struct kvm_vcpu *vcpu, s64 offset) +{ +	u64 curr_offset = kvm_x86_ops->read_tsc_offset(vcpu); +	vcpu->arch.ia32_tsc_adjust_msr += offset - curr_offset; +} + +void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)  {  	struct kvm *kvm = vcpu->kvm;  	u64 offset, ns, elapsed;  	unsigned long flags; -	s64 sdiff; +	s64 usdiff; +	bool matched; +	u64 data = msr->data; -	spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags); -	offset = data - native_read_tsc(); +	raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags); +	offset = kvm_x86_ops->compute_tsc_offset(vcpu, data);  	ns = get_kernel_ns();  	elapsed = ns - kvm->arch.last_tsc_nsec; -	sdiff = data - kvm->arch.last_tsc_write; -	if (sdiff < 0) -		sdiff = -sdiff; + +	if (vcpu->arch.virtual_tsc_khz) { +		int faulted = 0; + +		/* n.b - signed multiplication and division required */ +		usdiff = data - kvm->arch.last_tsc_write; +#ifdef CONFIG_X86_64 +		usdiff = (usdiff * 1000) / vcpu->arch.virtual_tsc_khz; +#else +		/* do_div() only does unsigned */ +		asm("1: idivl %[divisor]\n" +		    "2: xor %%edx, %%edx\n" +		    "   movl $0, %[faulted]\n" +		    "3:\n" +		    ".section .fixup,\"ax\"\n" +		    "4: movl $1, %[faulted]\n" +		    "   jmp  3b\n" +		    ".previous\n" + +		_ASM_EXTABLE(1b, 4b) + +		: "=A"(usdiff), [faulted] "=r" (faulted) +		: "A"(usdiff * 1000), [divisor] "rm"(vcpu->arch.virtual_tsc_khz)); + +#endif +		do_div(elapsed, 1000); +		usdiff -= elapsed; +		if (usdiff < 0) +			usdiff = -usdiff; + +		/* idivl overflow => difference is larger than USEC_PER_SEC */ +		if (faulted) +			usdiff = USEC_PER_SEC; +	} else +		usdiff = USEC_PER_SEC; /* disable TSC match window below */  	/* -	 * Special case: close write to TSC within 5 seconds of -	 * another CPU is interpreted as an attempt to synchronize -	 * The 5 seconds is to accomodate host load / swapping as -	 * well as any reset of TSC during the boot process. -	 * -	 * In that case, for a reliable TSC, we can match TSC offsets, -	 * or make a best guest using elapsed value. -	 */ -	if (sdiff < nsec_to_cycles(5ULL * NSEC_PER_SEC) && -	    elapsed < 5ULL * NSEC_PER_SEC) { +	 * Special case: TSC write with a small delta (1 second) of virtual +	 * cycle time against real time is interpreted as an attempt to +	 * synchronize the CPU. +         * +	 * For a reliable TSC, we can match TSC offsets, and for an unstable +	 * TSC, we add elapsed time in this computation.  We could let the +	 * compensation code attempt to catch up if we fall behind, but +	 * it's better to try to match offsets from the beginning. +         */ +	if (usdiff < USEC_PER_SEC && +	    vcpu->arch.virtual_tsc_khz == kvm->arch.last_tsc_khz) {  		if (!check_tsc_unstable()) { -			offset = kvm->arch.last_tsc_offset; +			offset = kvm->arch.cur_tsc_offset;  			pr_debug("kvm: matched tsc offset for %llu\n", data);  		} else { -			u64 delta = nsec_to_cycles(elapsed); -			offset += delta; +			u64 delta = nsec_to_cycles(vcpu, elapsed); +			data += delta; +			offset = kvm_x86_ops->compute_tsc_offset(vcpu, data);  			pr_debug("kvm: adjusted tsc offset by %llu\n", delta);  		} -		ns = kvm->arch.last_tsc_nsec; +		matched = true; +	} else { +		/* +		 * We split periods of matched TSC writes into generations. +		 * For each generation, we track the original measured +		 * nanosecond time, offset, and write, so if TSCs are in +		 * sync, we can match exact offset, and if not, we can match +		 * exact software computation in compute_guest_tsc() +		 * +		 * These values are tracked in kvm->arch.cur_xxx variables. +		 */ +		kvm->arch.cur_tsc_generation++; +		kvm->arch.cur_tsc_nsec = ns; +		kvm->arch.cur_tsc_write = data; +		kvm->arch.cur_tsc_offset = offset; +		matched = false; +		pr_debug("kvm: new tsc generation %u, clock %llu\n", +			 kvm->arch.cur_tsc_generation, data);  	} + +	/* +	 * We also track th most recent recorded KHZ, write and time to +	 * allow the matching interval to be extended at each write. +	 */  	kvm->arch.last_tsc_nsec = ns;  	kvm->arch.last_tsc_write = data; -	kvm->arch.last_tsc_offset = offset; +	kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz; + +	vcpu->arch.last_guest_tsc = data; + +	/* Keep track of which generation this VCPU has synchronized to */ +	vcpu->arch.this_tsc_generation = kvm->arch.cur_tsc_generation; +	vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec; +	vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write; + +	if (guest_cpuid_has_tsc_adjust(vcpu) && !msr->host_initiated) +		update_ia32_tsc_adjust_msr(vcpu, offset);  	kvm_x86_ops->write_tsc_offset(vcpu, offset); -	spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags); +	raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags); -	/* Reset of TSC must disable overshoot protection below */ -	vcpu->arch.hv_clock.tsc_timestamp = 0; -	vcpu->arch.last_tsc_write = data; -	vcpu->arch.last_tsc_nsec = ns; +	spin_lock(&kvm->arch.pvclock_gtod_sync_lock); +	if (matched) +		kvm->arch.nr_vcpus_matched_tsc++; +	else +		kvm->arch.nr_vcpus_matched_tsc = 0; + +	kvm_track_tsc_matching(vcpu); +	spin_unlock(&kvm->arch.pvclock_gtod_sync_lock);  } +  EXPORT_SYMBOL_GPL(kvm_write_tsc); +#ifdef CONFIG_X86_64 + +static cycle_t read_tsc(void) +{ +	cycle_t ret; +	u64 last; + +	/* +	 * Empirically, a fence (of type that depends on the CPU) +	 * before rdtsc is enough to ensure that rdtsc is ordered +	 * with respect to loads.  The various CPU manuals are unclear +	 * as to whether rdtsc can be reordered with later loads, +	 * but no one has ever seen it happen. +	 */ +	rdtsc_barrier(); +	ret = (cycle_t)vget_cycles(); + +	last = pvclock_gtod_data.clock.cycle_last; + +	if (likely(ret >= last)) +		return ret; + +	/* +	 * GCC likes to generate cmov here, but this branch is extremely +	 * predictable (it's just a funciton of time and the likely is +	 * very likely) and there's a data dependence, so force GCC +	 * to generate a branch instead.  I don't barrier() because +	 * we don't actually need a barrier, and if this function +	 * ever gets inlined it will generate worse code. +	 */ +	asm volatile (""); +	return last; +} + +static inline u64 vgettsc(cycle_t *cycle_now) +{ +	long v; +	struct pvclock_gtod_data *gtod = &pvclock_gtod_data; + +	*cycle_now = read_tsc(); + +	v = (*cycle_now - gtod->clock.cycle_last) & gtod->clock.mask; +	return v * gtod->clock.mult; +} + +static int do_monotonic(struct timespec *ts, cycle_t *cycle_now) +{ +	unsigned long seq; +	u64 ns; +	int mode; +	struct pvclock_gtod_data *gtod = &pvclock_gtod_data; + +	ts->tv_nsec = 0; +	do { +		seq = read_seqcount_begin(>od->seq); +		mode = gtod->clock.vclock_mode; +		ts->tv_sec = gtod->monotonic_time_sec; +		ns = gtod->monotonic_time_snsec; +		ns += vgettsc(cycle_now); +		ns >>= gtod->clock.shift; +	} while (unlikely(read_seqcount_retry(>od->seq, seq))); +	timespec_add_ns(ts, ns); + +	return mode; +} + +/* returns true if host is using tsc clocksource */ +static bool kvm_get_time_and_clockread(s64 *kernel_ns, cycle_t *cycle_now) +{ +	struct timespec ts; + +	/* checked again under seqlock below */ +	if (pvclock_gtod_data.clock.vclock_mode != VCLOCK_TSC) +		return false; + +	if (do_monotonic(&ts, cycle_now) != VCLOCK_TSC) +		return false; + +	monotonic_to_bootbased(&ts); +	*kernel_ns = timespec_to_ns(&ts); + +	return true; +} +#endif + +/* + * + * Assuming a stable TSC across physical CPUS, and a stable TSC + * across virtual CPUs, the following condition is possible. + * Each numbered line represents an event visible to both + * CPUs at the next numbered event. + * + * "timespecX" represents host monotonic time. "tscX" represents + * RDTSC value. + * + * 		VCPU0 on CPU0		|	VCPU1 on CPU1 + * + * 1.  read timespec0,tsc0 + * 2.					| timespec1 = timespec0 + N + * 					| tsc1 = tsc0 + M + * 3. transition to guest		| transition to guest + * 4. ret0 = timespec0 + (rdtsc - tsc0) | + * 5.				        | ret1 = timespec1 + (rdtsc - tsc1) + * 				        | ret1 = timespec0 + N + (rdtsc - (tsc0 + M)) + * + * Since ret0 update is visible to VCPU1 at time 5, to obey monotonicity: + * + * 	- ret0 < ret1 + *	- timespec0 + (rdtsc - tsc0) < timespec0 + N + (rdtsc - (tsc0 + M)) + *		... + *	- 0 < N - M => M < N + * + * That is, when timespec0 != timespec1, M < N. Unfortunately that is not + * always the case (the difference between two distinct xtime instances + * might be smaller then the difference between corresponding TSC reads, + * when updating guest vcpus pvclock areas). + * + * To avoid that problem, do not allow visibility of distinct + * system_timestamp/tsc_timestamp values simultaneously: use a master + * copy of host monotonic time values. Update that master copy + * in lockstep. + * + * Rely on synchronization of host TSCs and guest TSCs for monotonicity. + * + */ + +static void pvclock_update_vm_gtod_copy(struct kvm *kvm) +{ +#ifdef CONFIG_X86_64 +	struct kvm_arch *ka = &kvm->arch; +	int vclock_mode; +	bool host_tsc_clocksource, vcpus_matched; + +	vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 == +			atomic_read(&kvm->online_vcpus)); + +	/* +	 * If the host uses TSC clock, then passthrough TSC as stable +	 * to the guest. +	 */ +	host_tsc_clocksource = kvm_get_time_and_clockread( +					&ka->master_kernel_ns, +					&ka->master_cycle_now); + +	ka->use_master_clock = host_tsc_clocksource && vcpus_matched +				&& !backwards_tsc_observed; + +	if (ka->use_master_clock) +		atomic_set(&kvm_guest_has_master_clock, 1); + +	vclock_mode = pvclock_gtod_data.clock.vclock_mode; +	trace_kvm_update_master_clock(ka->use_master_clock, vclock_mode, +					vcpus_matched); +#endif +} + +static void kvm_gen_update_masterclock(struct kvm *kvm) +{ +#ifdef CONFIG_X86_64 +	int i; +	struct kvm_vcpu *vcpu; +	struct kvm_arch *ka = &kvm->arch; + +	spin_lock(&ka->pvclock_gtod_sync_lock); +	kvm_make_mclock_inprogress_request(kvm); +	/* no guest entries from this point */ +	pvclock_update_vm_gtod_copy(kvm); + +	kvm_for_each_vcpu(i, vcpu, kvm) +		set_bit(KVM_REQ_CLOCK_UPDATE, &vcpu->requests); + +	/* guest entries allowed */ +	kvm_for_each_vcpu(i, vcpu, kvm) +		clear_bit(KVM_REQ_MCLOCK_INPROGRESS, &vcpu->requests); + +	spin_unlock(&ka->pvclock_gtod_sync_lock); +#endif +} +  static int kvm_guest_time_update(struct kvm_vcpu *v)  { -	unsigned long flags; +	unsigned long flags, this_tsc_khz;  	struct kvm_vcpu_arch *vcpu = &v->arch; -	void *shared_kaddr; -	unsigned long this_tsc_khz; -	s64 kernel_ns, max_kernel_ns; -	u64 tsc_timestamp; +	struct kvm_arch *ka = &v->kvm->arch; +	s64 kernel_ns; +	u64 tsc_timestamp, host_tsc; +	struct pvclock_vcpu_time_info guest_hv_clock; +	u8 pvclock_flags; +	bool use_master_clock; + +	kernel_ns = 0; +	host_tsc = 0; + +	/* +	 * If the host uses TSC clock, then passthrough TSC as stable +	 * to the guest. +	 */ +	spin_lock(&ka->pvclock_gtod_sync_lock); +	use_master_clock = ka->use_master_clock; +	if (use_master_clock) { +		host_tsc = ka->master_cycle_now; +		kernel_ns = ka->master_kernel_ns; +	} +	spin_unlock(&ka->pvclock_gtod_sync_lock);  	/* Keep irq disabled to prevent changes to the clock */  	local_irq_save(flags); -	kvm_get_msr(v, MSR_IA32_TSC, &tsc_timestamp); -	kernel_ns = get_kernel_ns();  	this_tsc_khz = __get_cpu_var(cpu_tsc_khz); -  	if (unlikely(this_tsc_khz == 0)) {  		local_irq_restore(flags);  		kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);  		return 1;  	} +	if (!use_master_clock) { +		host_tsc = native_read_tsc(); +		kernel_ns = get_kernel_ns(); +	} + +	tsc_timestamp = kvm_x86_ops->read_l1_tsc(v, host_tsc);  	/*  	 * We may have to catch up the TSC to match elapsed wall clock @@ -1087,47 +1563,16 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)  	if (vcpu->tsc_catchup) {  		u64 tsc = compute_guest_tsc(v, kernel_ns);  		if (tsc > tsc_timestamp) { -			kvm_x86_ops->adjust_tsc_offset(v, tsc - tsc_timestamp); +			adjust_tsc_offset_guest(v, tsc - tsc_timestamp);  			tsc_timestamp = tsc;  		}  	}  	local_irq_restore(flags); -	if (!vcpu->time_page) +	if (!vcpu->pv_time_enabled)  		return 0; -	/* -	 * Time as measured by the TSC may go backwards when resetting the base -	 * tsc_timestamp.  The reason for this is that the TSC resolution is -	 * higher than the resolution of the other clock scales.  Thus, many -	 * possible measurments of the TSC correspond to one measurement of any -	 * other clock, and so a spread of values is possible.  This is not a -	 * problem for the computation of the nanosecond clock; with TSC rates -	 * around 1GHZ, there can only be a few cycles which correspond to one -	 * nanosecond value, and any path through this code will inevitably -	 * take longer than that.  However, with the kernel_ns value itself, -	 * the precision may be much lower, down to HZ granularity.  If the -	 * first sampling of TSC against kernel_ns ends in the low part of the -	 * range, and the second in the high end of the range, we can get: -	 * -	 * (TSC - offset_low) * S + kns_old > (TSC - offset_high) * S + kns_new -	 * -	 * As the sampling errors potentially range in the thousands of cycles, -	 * it is possible such a time value has already been observed by the -	 * guest.  To protect against this, we must compute the system time as -	 * observed by the guest and ensure the new system time is greater. -	 */ -	max_kernel_ns = 0; -	if (vcpu->hv_clock.tsc_timestamp && vcpu->last_guest_tsc) { -		max_kernel_ns = vcpu->last_guest_tsc - -				vcpu->hv_clock.tsc_timestamp; -		max_kernel_ns = pvclock_scale_delta(max_kernel_ns, -				    vcpu->hv_clock.tsc_to_system_mul, -				    vcpu->hv_clock.tsc_shift); -		max_kernel_ns += vcpu->last_kernel_ns; -	} -  	if (unlikely(vcpu->hw_tsc_khz != this_tsc_khz)) {  		kvm_get_time_scale(NSEC_PER_SEC / 1000, this_tsc_khz,  				   &vcpu->hv_clock.tsc_shift, @@ -1135,15 +1580,10 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)  		vcpu->hw_tsc_khz = this_tsc_khz;  	} -	if (max_kernel_ns > kernel_ns) -		kernel_ns = max_kernel_ns; -  	/* With all the info we got, fill in the values */  	vcpu->hv_clock.tsc_timestamp = tsc_timestamp;  	vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset; -	vcpu->last_kernel_ns = kernel_ns;  	vcpu->last_guest_tsc = tsc_timestamp; -	vcpu->hv_clock.flags = 0;  	/*  	 * The interface expects us to write an even number signaling that the @@ -1152,17 +1592,84 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)  	 */  	vcpu->hv_clock.version += 2; -	shared_kaddr = kmap_atomic(vcpu->time_page, KM_USER0); +	if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time, +		&guest_hv_clock, sizeof(guest_hv_clock)))) +		return 0; + +	/* retain PVCLOCK_GUEST_STOPPED if set in guest copy */ +	pvclock_flags = (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED); + +	if (vcpu->pvclock_set_guest_stopped_request) { +		pvclock_flags |= PVCLOCK_GUEST_STOPPED; +		vcpu->pvclock_set_guest_stopped_request = false; +	} -	memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock, -	       sizeof(vcpu->hv_clock)); +	/* If the host uses TSC clocksource, then it is stable */ +	if (use_master_clock) +		pvclock_flags |= PVCLOCK_TSC_STABLE_BIT; -	kunmap_atomic(shared_kaddr, KM_USER0); +	vcpu->hv_clock.flags = pvclock_flags; -	mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT); +	kvm_write_guest_cached(v->kvm, &vcpu->pv_time, +				&vcpu->hv_clock, +				sizeof(vcpu->hv_clock));  	return 0;  } +/* + * kvmclock updates which are isolated to a given vcpu, such as + * vcpu->cpu migration, should not allow system_timestamp from + * the rest of the vcpus to remain static. Otherwise ntp frequency + * correction applies to one vcpu's system_timestamp but not + * the others. + * + * So in those cases, request a kvmclock update for all vcpus. + * We need to rate-limit these requests though, as they can + * considerably slow guests that have a large number of vcpus. + * The time for a remote vcpu to update its kvmclock is bound + * by the delay we use to rate-limit the updates. + */ + +#define KVMCLOCK_UPDATE_DELAY msecs_to_jiffies(100) + +static void kvmclock_update_fn(struct work_struct *work) +{ +	int i; +	struct delayed_work *dwork = to_delayed_work(work); +	struct kvm_arch *ka = container_of(dwork, struct kvm_arch, +					   kvmclock_update_work); +	struct kvm *kvm = container_of(ka, struct kvm, arch); +	struct kvm_vcpu *vcpu; + +	kvm_for_each_vcpu(i, vcpu, kvm) { +		set_bit(KVM_REQ_CLOCK_UPDATE, &vcpu->requests); +		kvm_vcpu_kick(vcpu); +	} +} + +static void kvm_gen_kvmclock_update(struct kvm_vcpu *v) +{ +	struct kvm *kvm = v->kvm; + +	set_bit(KVM_REQ_CLOCK_UPDATE, &v->requests); +	schedule_delayed_work(&kvm->arch.kvmclock_update_work, +					KVMCLOCK_UPDATE_DELAY); +} + +#define KVMCLOCK_SYNC_PERIOD (300 * HZ) + +static void kvmclock_sync_fn(struct work_struct *work) +{ +	struct delayed_work *dwork = to_delayed_work(work); +	struct kvm_arch *ka = container_of(dwork, struct kvm_arch, +					   kvmclock_sync_work); +	struct kvm *kvm = container_of(ka, struct kvm, arch); + +	schedule_delayed_work(&kvm->arch.kvmclock_update_work, 0); +	schedule_delayed_work(&kvm->arch.kvmclock_sync_work, +					KVMCLOCK_SYNC_PERIOD); +} +  static bool msr_mtrr_valid(unsigned msr)  {  	switch (msr) { @@ -1314,12 +1821,11 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)  	if (page_num >= blob_size)  		goto out;  	r = -ENOMEM; -	page = kzalloc(PAGE_SIZE, GFP_KERNEL); -	if (!page) +	page = memdup_user(blob_addr + (page_num * PAGE_SIZE), PAGE_SIZE); +	if (IS_ERR(page)) { +		r = PTR_ERR(page);  		goto out; -	r = -EFAULT; -	if (copy_from_user(page, blob_addr + (page_num * PAGE_SIZE), PAGE_SIZE)) -		goto out_free; +	}  	if (kvm_write_guest(kvm, page_addr, page, PAGE_SIZE))  		goto out_free;  	r = 0; @@ -1340,6 +1846,8 @@ static bool kvm_hv_msr_partition_wide(u32 msr)  	switch (msr) {  	case HV_X64_MSR_GUEST_OS_ID:  	case HV_X64_MSR_HYPERCALL: +	case HV_X64_MSR_REFERENCE_TSC: +	case HV_X64_MSR_TIME_REF_COUNT:  		r = true;  		break;  	} @@ -1376,14 +1884,29 @@ static int set_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data)  			return 1;  		kvm_x86_ops->patch_hypercall(vcpu, instructions);  		((unsigned char *)instructions)[3] = 0xc3; /* ret */ -		if (copy_to_user((void __user *)addr, instructions, 4)) +		if (__copy_to_user((void __user *)addr, instructions, 4))  			return 1;  		kvm->arch.hv_hypercall = data; +		mark_page_dirty(kvm, gfn); +		break; +	} +	case HV_X64_MSR_REFERENCE_TSC: { +		u64 gfn; +		HV_REFERENCE_TSC_PAGE tsc_ref; +		memset(&tsc_ref, 0, sizeof(tsc_ref)); +		kvm->arch.hv_tsc_page = data; +		if (!(data & HV_X64_MSR_TSC_REFERENCE_ENABLE)) +			break; +		gfn = data >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT; +		if (kvm_write_guest(kvm, gfn << HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT, +			&tsc_ref, sizeof(tsc_ref))) +			return 1; +		mark_page_dirty(kvm, gfn);  		break;  	}  	default: -		pr_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x " -			  "data 0x%llx\n", msr, data); +		vcpu_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x " +			    "data 0x%llx\n", msr, data);  		return 1;  	}  	return 0; @@ -1393,19 +1916,25 @@ static int set_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 data)  {  	switch (msr) {  	case HV_X64_MSR_APIC_ASSIST_PAGE: { +		u64 gfn;  		unsigned long addr;  		if (!(data & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE)) {  			vcpu->arch.hv_vapic = data; +			if (kvm_lapic_enable_pv_eoi(vcpu, 0)) +				return 1;  			break;  		} -		addr = gfn_to_hva(vcpu->kvm, data >> -				  HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT); +		gfn = data >> HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT; +		addr = gfn_to_hva(vcpu->kvm, gfn);  		if (kvm_is_error_hva(addr))  			return 1; -		if (clear_user((void __user *)addr, PAGE_SIZE)) +		if (__clear_user((void __user *)addr, PAGE_SIZE))  			return 1;  		vcpu->arch.hv_vapic = data; +		mark_page_dirty(vcpu->kvm, gfn); +		if (kvm_lapic_enable_pv_eoi(vcpu, gfn_to_gpa(gfn) | KVM_MSR_ENABLED)) +			return 1;  		break;  	}  	case HV_X64_MSR_EOI: @@ -1415,37 +1944,107 @@ static int set_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 data)  	case HV_X64_MSR_TPR:  		return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data);  	default: -		pr_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x " -			  "data 0x%llx\n", msr, data); +		vcpu_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x " +			    "data 0x%llx\n", msr, data);  		return 1;  	}  	return 0;  } -int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) +static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)  { +	gpa_t gpa = data & ~0x3f; + +	/* Bits 2:5 are reserved, Should be zero */ +	if (data & 0x3c) +		return 1; + +	vcpu->arch.apf.msr_val = data; + +	if (!(data & KVM_ASYNC_PF_ENABLED)) { +		kvm_clear_async_pf_completion_queue(vcpu); +		kvm_async_pf_hash_reset(vcpu); +		return 0; +	} + +	if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa, +					sizeof(u32))) +		return 1; + +	vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS); +	kvm_async_pf_wakeup_all(vcpu); +	return 0; +} + +static void kvmclock_reset(struct kvm_vcpu *vcpu) +{ +	vcpu->arch.pv_time_enabled = false; +} + +static void accumulate_steal_time(struct kvm_vcpu *vcpu) +{ +	u64 delta; + +	if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) +		return; + +	delta = current->sched_info.run_delay - vcpu->arch.st.last_steal; +	vcpu->arch.st.last_steal = current->sched_info.run_delay; +	vcpu->arch.st.accum_steal = delta; +} + +static void record_steal_time(struct kvm_vcpu *vcpu) +{ +	if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) +		return; + +	if (unlikely(kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.st.stime, +		&vcpu->arch.st.steal, sizeof(struct kvm_steal_time)))) +		return; + +	vcpu->arch.st.steal.steal += vcpu->arch.st.accum_steal; +	vcpu->arch.st.steal.version += 2; +	vcpu->arch.st.accum_steal = 0; + +	kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime, +		&vcpu->arch.st.steal, sizeof(struct kvm_steal_time)); +} + +int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) +{ +	bool pr = false; +	u32 msr = msr_info->index; +	u64 data = msr_info->data; +  	switch (msr) { +	case MSR_AMD64_NB_CFG: +	case MSR_IA32_UCODE_REV: +	case MSR_IA32_UCODE_WRITE: +	case MSR_VM_HSAVE_PA: +	case MSR_AMD64_PATCH_LOADER: +	case MSR_AMD64_BU_CFG2: +		break; +  	case MSR_EFER:  		return set_efer(vcpu, data);  	case MSR_K7_HWCR:  		data &= ~(u64)0x40;	/* ignore flush filter disable */  		data &= ~(u64)0x100;	/* ignore ignne emulation enable */ +		data &= ~(u64)0x8;	/* ignore TLB cache disable */  		if (data != 0) { -			pr_unimpl(vcpu, "unimplemented HWCR wrmsr: 0x%llx\n", -				data); +			vcpu_unimpl(vcpu, "unimplemented HWCR wrmsr: 0x%llx\n", +				    data);  			return 1;  		}  		break;  	case MSR_FAM10H_MMIO_CONF_BASE:  		if (data != 0) { -			pr_unimpl(vcpu, "unimplemented MMIO_CONF_BASE wrmsr: " -				"0x%llx\n", data); +			vcpu_unimpl(vcpu, "unimplemented MMIO_CONF_BASE wrmsr: " +				    "0x%llx\n", data);  			return 1;  		}  		break; -	case MSR_AMD64_NB_CFG: -		break;  	case MSR_IA32_DEBUGCTLMSR:  		if (!data) {  			/* We support the non-activated case already */ @@ -1455,21 +2054,27 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)  			   thus reserved and should throw a #GP */  			return 1;  		} -		pr_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n", -			__func__, data); -		break; -	case MSR_IA32_UCODE_REV: -	case MSR_IA32_UCODE_WRITE: -	case MSR_VM_HSAVE_PA: -	case MSR_AMD64_PATCH_LOADER: +		vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n", +			    __func__, data);  		break;  	case 0x200 ... 0x2ff:  		return set_msr_mtrr(vcpu, msr, data);  	case MSR_IA32_APICBASE: -		kvm_set_apic_base(vcpu, data); -		break; +		return kvm_set_apic_base(vcpu, msr_info);  	case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:  		return kvm_x2apic_msr_write(vcpu, msr, data); +	case MSR_IA32_TSCDEADLINE: +		kvm_set_lapic_tscdeadline_msr(vcpu, data); +		break; +	case MSR_IA32_TSC_ADJUST: +		if (guest_cpuid_has_tsc_adjust(vcpu)) { +			if (!msr_info->host_initiated) { +				u64 adj = data - vcpu->arch.ia32_tsc_adjust_msr; +				kvm_x86_ops->adjust_tsc_offset(vcpu, adj, true); +			} +			vcpu->arch.ia32_tsc_adjust_msr = data; +		} +		break;  	case MSR_IA32_MISC_ENABLE:  		vcpu->arch.ia32_misc_enable_msr = data;  		break; @@ -1480,30 +2085,63 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)  		break;  	case MSR_KVM_SYSTEM_TIME_NEW:  	case MSR_KVM_SYSTEM_TIME: { -		if (vcpu->arch.time_page) { -			kvm_release_page_dirty(vcpu->arch.time_page); -			vcpu->arch.time_page = NULL; -		} +		u64 gpa_offset; +		kvmclock_reset(vcpu);  		vcpu->arch.time = data; -		kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); +		kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu);  		/* we verify if the enable bit is set... */  		if (!(data & 1))  			break; -		/* ...but clean it before doing the actual write */ -		vcpu->arch.time_offset = data & ~(PAGE_MASK | 1); +		gpa_offset = data & ~(PAGE_MASK | 1); -		vcpu->arch.time_page = -				gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT); +		if (kvm_gfn_to_hva_cache_init(vcpu->kvm, +		     &vcpu->arch.pv_time, data & ~1ULL, +		     sizeof(struct pvclock_vcpu_time_info))) +			vcpu->arch.pv_time_enabled = false; +		else +			vcpu->arch.pv_time_enabled = true; -		if (is_error_page(vcpu->arch.time_page)) { -			kvm_release_page_clean(vcpu->arch.time_page); -			vcpu->arch.time_page = NULL; -		}  		break;  	} +	case MSR_KVM_ASYNC_PF_EN: +		if (kvm_pv_enable_async_pf(vcpu, data)) +			return 1; +		break; +	case MSR_KVM_STEAL_TIME: + +		if (unlikely(!sched_info_on())) +			return 1; + +		if (data & KVM_STEAL_RESERVED_MASK) +			return 1; + +		if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime, +						data & KVM_STEAL_VALID_BITS, +						sizeof(struct kvm_steal_time))) +			return 1; + +		vcpu->arch.st.msr_val = data; + +		if (!(data & KVM_MSR_ENABLED)) +			break; + +		vcpu->arch.st.last_steal = current->sched_info.run_delay; + +		preempt_disable(); +		accumulate_steal_time(vcpu); +		preempt_enable(); + +		kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu); + +		break; +	case MSR_KVM_PV_EOI_EN: +		if (kvm_lapic_enable_pv_eoi(vcpu, data)) +			return 1; +		break; +  	case MSR_IA32_MCG_CTL:  	case MSR_IA32_MCG_STATUS:  	case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1: @@ -1516,34 +2154,42 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)  	 * which we perfectly emulate ;-). Any other value should be at least  	 * reported, some guests depend on them.  	 */ -	case MSR_P6_EVNTSEL0: -	case MSR_P6_EVNTSEL1:  	case MSR_K7_EVNTSEL0:  	case MSR_K7_EVNTSEL1:  	case MSR_K7_EVNTSEL2:  	case MSR_K7_EVNTSEL3:  		if (data != 0) -			pr_unimpl(vcpu, "unimplemented perfctr wrmsr: " -				"0x%x data 0x%llx\n", msr, data); +			vcpu_unimpl(vcpu, "unimplemented perfctr wrmsr: " +				    "0x%x data 0x%llx\n", msr, data);  		break;  	/* at least RHEL 4 unconditionally writes to the perfctr registers,  	 * so we ignore writes to make it happy.  	 */ -	case MSR_P6_PERFCTR0: -	case MSR_P6_PERFCTR1:  	case MSR_K7_PERFCTR0:  	case MSR_K7_PERFCTR1:  	case MSR_K7_PERFCTR2:  	case MSR_K7_PERFCTR3: -		pr_unimpl(vcpu, "unimplemented perfctr wrmsr: " -			"0x%x data 0x%llx\n", msr, data); +		vcpu_unimpl(vcpu, "unimplemented perfctr wrmsr: " +			    "0x%x data 0x%llx\n", msr, data); +		break; +	case MSR_P6_PERFCTR0: +	case MSR_P6_PERFCTR1: +		pr = true; +	case MSR_P6_EVNTSEL0: +	case MSR_P6_EVNTSEL1: +		if (kvm_pmu_msr(vcpu, msr)) +			return kvm_pmu_set_msr(vcpu, msr_info); + +		if (pr || data != 0) +			vcpu_unimpl(vcpu, "disabled perfctr wrmsr: " +				    "0x%x data 0x%llx\n", msr, data);  		break;  	case MSR_K7_CLK_CTL:  		/*  		 * Ignore all writes to this no longer documented MSR.  		 * Writes are only relevant for old K7 processors,  		 * all pre-dating SVM, but a recommended workaround from -		 * AMD for these chips. It is possible to speicify the +		 * AMD for these chips. It is possible to specify the  		 * affected processor models on the command line, hence  		 * the need to ignore the workaround.  		 */ @@ -1558,16 +2204,34 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)  		} else  			return set_msr_hyperv(vcpu, msr, data);  		break; +	case MSR_IA32_BBL_CR_CTL3: +		/* Drop writes to this legacy MSR -- see rdmsr +		 * counterpart for further detail. +		 */ +		vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n", msr, data); +		break; +	case MSR_AMD64_OSVW_ID_LENGTH: +		if (!guest_cpuid_has_osvw(vcpu)) +			return 1; +		vcpu->arch.osvw.length = data; +		break; +	case MSR_AMD64_OSVW_STATUS: +		if (!guest_cpuid_has_osvw(vcpu)) +			return 1; +		vcpu->arch.osvw.status = data; +		break;  	default:  		if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr))  			return xen_hvm_config(vcpu, data); +		if (kvm_pmu_msr(vcpu, msr)) +			return kvm_pmu_set_msr(vcpu, msr_info);  		if (!ignore_msrs) { -			pr_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n", -				msr, data); +			vcpu_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n", +				    msr, data);  			return 1;  		} else { -			pr_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n", -				msr, data); +			vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n", +				    msr, data);  			break;  		}  	} @@ -1669,8 +2333,16 @@ static int get_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)  	case HV_X64_MSR_HYPERCALL:  		data = kvm->arch.hv_hypercall;  		break; +	case HV_X64_MSR_TIME_REF_COUNT: { +		data = +		     div_u64(get_kernel_ns() + kvm->arch.kvmclock_offset, 100); +		break; +	} +	case HV_X64_MSR_REFERENCE_TSC: +		data = kvm->arch.hv_tsc_page; +		break;  	default: -		pr_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr); +		vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);  		return 1;  	} @@ -1686,9 +2358,12 @@ static int get_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)  	case HV_X64_MSR_VP_INDEX: {  		int r;  		struct kvm_vcpu *v; -		kvm_for_each_vcpu(r, v, vcpu->kvm) -			if (v == vcpu) +		kvm_for_each_vcpu(r, v, vcpu->kvm) { +			if (v == vcpu) {  				data = r; +				break; +			} +		}  		break;  	}  	case HV_X64_MSR_EOI: @@ -1697,8 +2372,11 @@ static int get_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)  		return kvm_hv_vapic_msr_read(vcpu, APIC_ICR, pdata);  	case HV_X64_MSR_TPR:  		return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata); +	case HV_X64_MSR_APIC_ASSIST_PAGE: +		data = vcpu->arch.hv_vapic; +		break;  	default: -		pr_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr); +		vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);  		return 1;  	}  	*pdata = data; @@ -1711,7 +2389,6 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)  	switch (msr) {  	case MSR_IA32_PLATFORM_ID: -	case MSR_IA32_UCODE_REV:  	case MSR_IA32_EBL_CR_POWERON:  	case MSR_IA32_DEBUGCTLMSR:  	case MSR_IA32_LASTBRANCHFROMIP: @@ -1721,17 +2398,25 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)  	case MSR_K8_SYSCFG:  	case MSR_K7_HWCR:  	case MSR_VM_HSAVE_PA: -	case MSR_P6_PERFCTR0: -	case MSR_P6_PERFCTR1: -	case MSR_P6_EVNTSEL0: -	case MSR_P6_EVNTSEL1:  	case MSR_K7_EVNTSEL0:  	case MSR_K7_PERFCTR0:  	case MSR_K8_INT_PENDING_MSG:  	case MSR_AMD64_NB_CFG:  	case MSR_FAM10H_MMIO_CONF_BASE: +	case MSR_AMD64_BU_CFG2: +		data = 0; +		break; +	case MSR_P6_PERFCTR0: +	case MSR_P6_PERFCTR1: +	case MSR_P6_EVNTSEL0: +	case MSR_P6_EVNTSEL1: +		if (kvm_pmu_msr(vcpu, msr)) +			return kvm_pmu_get_msr(vcpu, msr, pdata);  		data = 0;  		break; +	case MSR_IA32_UCODE_REV: +		data = 0x100000000ULL; +		break;  	case MSR_MTRRcap:  		data = 0x500 | KVM_NR_VAR_MTRR;  		break; @@ -1760,6 +2445,12 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)  	case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:  		return kvm_x2apic_msr_read(vcpu, msr, pdata);  		break; +	case MSR_IA32_TSCDEADLINE: +		data = kvm_get_lapic_tscdeadline_msr(vcpu); +		break; +	case MSR_IA32_TSC_ADJUST: +		data = (u64)vcpu->arch.ia32_tsc_adjust_msr; +		break;  	case MSR_IA32_MISC_ENABLE:  		data = vcpu->arch.ia32_misc_enable_msr;  		break; @@ -1780,6 +2471,15 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)  	case MSR_KVM_SYSTEM_TIME_NEW:  		data = vcpu->arch.time;  		break; +	case MSR_KVM_ASYNC_PF_EN: +		data = vcpu->arch.apf.msr_val; +		break; +	case MSR_KVM_STEAL_TIME: +		data = vcpu->arch.st.msr_val; +		break; +	case MSR_KVM_PV_EOI_EN: +		data = vcpu->arch.pv_eoi.msr_val; +		break;  	case MSR_IA32_P5_MC_ADDR:  	case MSR_IA32_P5_MC_TYPE:  	case MSR_IA32_MCG_CAP: @@ -1809,12 +2509,37 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)  		} else  			return get_msr_hyperv(vcpu, msr, pdata);  		break; +	case MSR_IA32_BBL_CR_CTL3: +		/* This legacy MSR exists but isn't fully documented in current +		 * silicon.  It is however accessed by winxp in very narrow +		 * scenarios where it sets bit #19, itself documented as +		 * a "reserved" bit.  Best effort attempt to source coherent +		 * read data here should the balance of the register be +		 * interpreted by the guest: +		 * +		 * L2 cache control register 3: 64GB range, 256KB size, +		 * enabled, latency 0x1, configured +		 */ +		data = 0xbe702111; +		break; +	case MSR_AMD64_OSVW_ID_LENGTH: +		if (!guest_cpuid_has_osvw(vcpu)) +			return 1; +		data = vcpu->arch.osvw.length; +		break; +	case MSR_AMD64_OSVW_STATUS: +		if (!guest_cpuid_has_osvw(vcpu)) +			return 1; +		data = vcpu->arch.osvw.status; +		break;  	default: +		if (kvm_pmu_msr(vcpu, msr)) +			return kvm_pmu_get_msr(vcpu, msr, pdata);  		if (!ignore_msrs) { -			pr_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr); +			vcpu_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr);  			return 1;  		} else { -			pr_unimpl(vcpu, "ignored rdmsr: 0x%x\n", msr); +			vcpu_unimpl(vcpu, "ignored rdmsr: 0x%x\n", msr);  			data = 0;  		}  		break; @@ -1868,15 +2593,12 @@ static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,  	if (msrs.nmsrs >= MAX_IO_MSRS)  		goto out; -	r = -ENOMEM;  	size = sizeof(struct kvm_msr_entry) * msrs.nmsrs; -	entries = kmalloc(size, GFP_KERNEL); -	if (!entries) +	entries = memdup_user(user_msrs->entries, size); +	if (IS_ERR(entries)) { +		r = PTR_ERR(entries);  		goto out; - -	r = -EFAULT; -	if (copy_from_user(entries, user_msrs->entries, size)) -		goto out_free; +	}  	r = n = __msr_io(vcpu, &msrs, entries, do_msr);  	if (r < 0) @@ -1904,16 +2626,18 @@ int kvm_dev_ioctl_check_extension(long ext)  	case KVM_CAP_MMU_SHADOW_CACHE_CONTROL:  	case KVM_CAP_SET_TSS_ADDR:  	case KVM_CAP_EXT_CPUID: +	case KVM_CAP_EXT_EMUL_CPUID:  	case KVM_CAP_CLOCKSOURCE:  	case KVM_CAP_PIT:  	case KVM_CAP_NOP_IO_DELAY:  	case KVM_CAP_MP_STATE:  	case KVM_CAP_SYNC_MMU: +	case KVM_CAP_USER_NMI:  	case KVM_CAP_REINJECT_CONTROL:  	case KVM_CAP_IRQ_INJECT_STATUS: -	case KVM_CAP_ASSIGN_DEV_IRQ:  	case KVM_CAP_IRQFD:  	case KVM_CAP_IOEVENTFD: +	case KVM_CAP_IOEVENTFD_NO_LENGTH:  	case KVM_CAP_PIT2:  	case KVM_CAP_PIT_STATE2:  	case KVM_CAP_SET_IDENTITY_MAP_ADDR: @@ -1927,6 +2651,16 @@ int kvm_dev_ioctl_check_extension(long ext)  	case KVM_CAP_DEBUGREGS:  	case KVM_CAP_X86_ROBUST_SINGLESTEP:  	case KVM_CAP_XSAVE: +	case KVM_CAP_ASYNC_PF: +	case KVM_CAP_GET_TSC_KHZ: +	case KVM_CAP_KVMCLOCK_CTRL: +	case KVM_CAP_READONLY_MEM: +	case KVM_CAP_HYPERV_TIME: +	case KVM_CAP_IOAPIC_POLARITY_IGNORED: +#ifdef CONFIG_KVM_DEVICE_ASSIGNMENT +	case KVM_CAP_ASSIGN_DEV_IRQ: +	case KVM_CAP_PCI_2_3: +#endif  		r = 1;  		break;  	case KVM_CAP_COALESCED_MMIO: @@ -1936,23 +2670,34 @@ int kvm_dev_ioctl_check_extension(long ext)  		r = !kvm_x86_ops->cpu_has_accelerated_tpr();  		break;  	case KVM_CAP_NR_VCPUS: +		r = KVM_SOFT_MAX_VCPUS; +		break; +	case KVM_CAP_MAX_VCPUS:  		r = KVM_MAX_VCPUS;  		break;  	case KVM_CAP_NR_MEMSLOTS: -		r = KVM_MEMORY_SLOTS; +		r = KVM_USER_MEM_SLOTS;  		break;  	case KVM_CAP_PV_MMU:	/* obsolete */  		r = 0;  		break; +#ifdef CONFIG_KVM_DEVICE_ASSIGNMENT  	case KVM_CAP_IOMMU: -		r = iommu_found(); +		r = iommu_present(&pci_bus_type);  		break; +#endif  	case KVM_CAP_MCE:  		r = KVM_MAX_MCE_BANKS;  		break;  	case KVM_CAP_XCRS:  		r = cpu_has_xsave;  		break; +	case KVM_CAP_TSC_CONTROL: +		r = kvm_has_tsc_control; +		break; +	case KVM_CAP_TSC_DEADLINE_TIMER: +		r = boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER); +		break;  	default:  		r = 0;  		break; @@ -1994,15 +2739,17 @@ long kvm_arch_dev_ioctl(struct file *filp,  		r = 0;  		break;  	} -	case KVM_GET_SUPPORTED_CPUID: { +	case KVM_GET_SUPPORTED_CPUID: +	case KVM_GET_EMULATED_CPUID: {  		struct kvm_cpuid2 __user *cpuid_arg = argp;  		struct kvm_cpuid2 cpuid;  		r = -EFAULT;  		if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))  			goto out; -		r = kvm_dev_ioctl_get_supported_cpuid(&cpuid, -						      cpuid_arg->entries); + +		r = kvm_dev_ioctl_get_cpuid(&cpuid, cpuid_arg->entries, +					    ioctl);  		if (r)  			goto out; @@ -2036,8 +2783,7 @@ static void wbinvd_ipi(void *garbage)  static bool need_emulate_wbinvd(struct kvm_vcpu *vcpu)  { -	return vcpu->kvm->arch.iommu_domain && -		!(vcpu->kvm->arch.iommu_flags & KVM_IOMMU_CACHE_COHERENCY); +	return kvm_arch_has_noncoherent_dma(vcpu->kvm);  }  void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) @@ -2052,21 +2798,38 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)  	}  	kvm_x86_ops->vcpu_load(vcpu, cpu); + +	/* Apply any externally detected TSC adjustments (due to suspend) */ +	if (unlikely(vcpu->arch.tsc_offset_adjustment)) { +		adjust_tsc_offset_host(vcpu, vcpu->arch.tsc_offset_adjustment); +		vcpu->arch.tsc_offset_adjustment = 0; +		set_bit(KVM_REQ_CLOCK_UPDATE, &vcpu->requests); +	} +  	if (unlikely(vcpu->cpu != cpu) || check_tsc_unstable()) { -		/* Make sure TSC doesn't go backwards */  		s64 tsc_delta = !vcpu->arch.last_host_tsc ? 0 :  				native_read_tsc() - vcpu->arch.last_host_tsc;  		if (tsc_delta < 0)  			mark_tsc_unstable("KVM discovered backwards TSC");  		if (check_tsc_unstable()) { -			kvm_x86_ops->adjust_tsc_offset(vcpu, -tsc_delta); +			u64 offset = kvm_x86_ops->compute_tsc_offset(vcpu, +						vcpu->arch.last_guest_tsc); +			kvm_x86_ops->write_tsc_offset(vcpu, offset);  			vcpu->arch.tsc_catchup = 1; -			kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);  		} +		/* +		 * On a host with synchronized TSC, there is no need to update +		 * kvmclock on vcpu->cpu migration +		 */ +		if (!vcpu->kvm->arch.use_master_clock || vcpu->cpu == -1) +			kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu);  		if (vcpu->cpu != cpu)  			kvm_migrate_timers(vcpu);  		vcpu->cpu = cpu;  	} + +	accumulate_steal_time(vcpu); +	kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);  }  void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) @@ -2076,363 +2839,10 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)  	vcpu->arch.last_host_tsc = native_read_tsc();  } -static int is_efer_nx(void) -{ -	unsigned long long efer = 0; - -	rdmsrl_safe(MSR_EFER, &efer); -	return efer & EFER_NX; -} - -static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu) -{ -	int i; -	struct kvm_cpuid_entry2 *e, *entry; - -	entry = NULL; -	for (i = 0; i < vcpu->arch.cpuid_nent; ++i) { -		e = &vcpu->arch.cpuid_entries[i]; -		if (e->function == 0x80000001) { -			entry = e; -			break; -		} -	} -	if (entry && (entry->edx & (1 << 20)) && !is_efer_nx()) { -		entry->edx &= ~(1 << 20); -		printk(KERN_INFO "kvm: guest NX capability removed\n"); -	} -} - -/* when an old userspace process fills a new kernel module */ -static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu, -				    struct kvm_cpuid *cpuid, -				    struct kvm_cpuid_entry __user *entries) -{ -	int r, i; -	struct kvm_cpuid_entry *cpuid_entries; - -	r = -E2BIG; -	if (cpuid->nent > KVM_MAX_CPUID_ENTRIES) -		goto out; -	r = -ENOMEM; -	cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) * cpuid->nent); -	if (!cpuid_entries) -		goto out; -	r = -EFAULT; -	if (copy_from_user(cpuid_entries, entries, -			   cpuid->nent * sizeof(struct kvm_cpuid_entry))) -		goto out_free; -	for (i = 0; i < cpuid->nent; i++) { -		vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function; -		vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax; -		vcpu->arch.cpuid_entries[i].ebx = cpuid_entries[i].ebx; -		vcpu->arch.cpuid_entries[i].ecx = cpuid_entries[i].ecx; -		vcpu->arch.cpuid_entries[i].edx = cpuid_entries[i].edx; -		vcpu->arch.cpuid_entries[i].index = 0; -		vcpu->arch.cpuid_entries[i].flags = 0; -		vcpu->arch.cpuid_entries[i].padding[0] = 0; -		vcpu->arch.cpuid_entries[i].padding[1] = 0; -		vcpu->arch.cpuid_entries[i].padding[2] = 0; -	} -	vcpu->arch.cpuid_nent = cpuid->nent; -	cpuid_fix_nx_cap(vcpu); -	r = 0; -	kvm_apic_set_version(vcpu); -	kvm_x86_ops->cpuid_update(vcpu); -	update_cpuid(vcpu); - -out_free: -	vfree(cpuid_entries); -out: -	return r; -} - -static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu, -				     struct kvm_cpuid2 *cpuid, -				     struct kvm_cpuid_entry2 __user *entries) -{ -	int r; - -	r = -E2BIG; -	if (cpuid->nent > KVM_MAX_CPUID_ENTRIES) -		goto out; -	r = -EFAULT; -	if (copy_from_user(&vcpu->arch.cpuid_entries, entries, -			   cpuid->nent * sizeof(struct kvm_cpuid_entry2))) -		goto out; -	vcpu->arch.cpuid_nent = cpuid->nent; -	kvm_apic_set_version(vcpu); -	kvm_x86_ops->cpuid_update(vcpu); -	update_cpuid(vcpu); -	return 0; - -out: -	return r; -} - -static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu, -				     struct kvm_cpuid2 *cpuid, -				     struct kvm_cpuid_entry2 __user *entries) -{ -	int r; - -	r = -E2BIG; -	if (cpuid->nent < vcpu->arch.cpuid_nent) -		goto out; -	r = -EFAULT; -	if (copy_to_user(entries, &vcpu->arch.cpuid_entries, -			 vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2))) -		goto out; -	return 0; - -out: -	cpuid->nent = vcpu->arch.cpuid_nent; -	return r; -} - -static void do_cpuid_1_ent(struct kvm_cpuid_entry2 *entry, u32 function, -			   u32 index) -{ -	entry->function = function; -	entry->index = index; -	cpuid_count(entry->function, entry->index, -		    &entry->eax, &entry->ebx, &entry->ecx, &entry->edx); -	entry->flags = 0; -} - -#define F(x) bit(X86_FEATURE_##x) - -static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, -			 u32 index, int *nent, int maxnent) -{ -	unsigned f_nx = is_efer_nx() ? F(NX) : 0; -#ifdef CONFIG_X86_64 -	unsigned f_gbpages = (kvm_x86_ops->get_lpage_level() == PT_PDPE_LEVEL) -				? F(GBPAGES) : 0; -	unsigned f_lm = F(LM); -#else -	unsigned f_gbpages = 0; -	unsigned f_lm = 0; -#endif -	unsigned f_rdtscp = kvm_x86_ops->rdtscp_supported() ? F(RDTSCP) : 0; - -	/* cpuid 1.edx */ -	const u32 kvm_supported_word0_x86_features = -		F(FPU) | F(VME) | F(DE) | F(PSE) | -		F(TSC) | F(MSR) | F(PAE) | F(MCE) | -		F(CX8) | F(APIC) | 0 /* Reserved */ | F(SEP) | -		F(MTRR) | F(PGE) | F(MCA) | F(CMOV) | -		F(PAT) | F(PSE36) | 0 /* PSN */ | F(CLFLSH) | -		0 /* Reserved, DS, ACPI */ | F(MMX) | -		F(FXSR) | F(XMM) | F(XMM2) | F(SELFSNOOP) | -		0 /* HTT, TM, Reserved, PBE */; -	/* cpuid 0x80000001.edx */ -	const u32 kvm_supported_word1_x86_features = -		F(FPU) | F(VME) | F(DE) | F(PSE) | -		F(TSC) | F(MSR) | F(PAE) | F(MCE) | -		F(CX8) | F(APIC) | 0 /* Reserved */ | F(SYSCALL) | -		F(MTRR) | F(PGE) | F(MCA) | F(CMOV) | -		F(PAT) | F(PSE36) | 0 /* Reserved */ | -		f_nx | 0 /* Reserved */ | F(MMXEXT) | F(MMX) | -		F(FXSR) | F(FXSR_OPT) | f_gbpages | f_rdtscp | -		0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW); -	/* cpuid 1.ecx */ -	const u32 kvm_supported_word4_x86_features = -		F(XMM3) | F(PCLMULQDQ) | 0 /* DTES64, MONITOR */ | -		0 /* DS-CPL, VMX, SMX, EST */ | -		0 /* TM2 */ | F(SSSE3) | 0 /* CNXT-ID */ | 0 /* Reserved */ | -		0 /* Reserved */ | F(CX16) | 0 /* xTPR Update, PDCM */ | -		0 /* Reserved, DCA */ | F(XMM4_1) | -		F(XMM4_2) | F(X2APIC) | F(MOVBE) | F(POPCNT) | -		0 /* Reserved*/ | F(AES) | F(XSAVE) | 0 /* OSXSAVE */ | F(AVX) | -		F(F16C); -	/* cpuid 0x80000001.ecx */ -	const u32 kvm_supported_word6_x86_features = -		F(LAHF_LM) | F(CMP_LEGACY) | 0 /*SVM*/ | 0 /* ExtApicSpace */ | -		F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) | -		F(3DNOWPREFETCH) | 0 /* OSVW */ | 0 /* IBS */ | F(XOP) | -		0 /* SKINIT, WDT, LWP */ | F(FMA4) | F(TBM); - -	/* all calls to cpuid_count() should be made on the same cpu */ -	get_cpu(); -	do_cpuid_1_ent(entry, function, index); -	++*nent; - -	switch (function) { -	case 0: -		entry->eax = min(entry->eax, (u32)0xd); -		break; -	case 1: -		entry->edx &= kvm_supported_word0_x86_features; -		entry->ecx &= kvm_supported_word4_x86_features; -		/* we support x2apic emulation even if host does not support -		 * it since we emulate x2apic in software */ -		entry->ecx |= F(X2APIC); -		break; -	/* function 2 entries are STATEFUL. That is, repeated cpuid commands -	 * may return different values. This forces us to get_cpu() before -	 * issuing the first command, and also to emulate this annoying behavior -	 * in kvm_emulate_cpuid() using KVM_CPUID_FLAG_STATE_READ_NEXT */ -	case 2: { -		int t, times = entry->eax & 0xff; - -		entry->flags |= KVM_CPUID_FLAG_STATEFUL_FUNC; -		entry->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT; -		for (t = 1; t < times && *nent < maxnent; ++t) { -			do_cpuid_1_ent(&entry[t], function, 0); -			entry[t].flags |= KVM_CPUID_FLAG_STATEFUL_FUNC; -			++*nent; -		} -		break; -	} -	/* function 4 and 0xb have additional index. */ -	case 4: { -		int i, cache_type; - -		entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX; -		/* read more entries until cache_type is zero */ -		for (i = 1; *nent < maxnent; ++i) { -			cache_type = entry[i - 1].eax & 0x1f; -			if (!cache_type) -				break; -			do_cpuid_1_ent(&entry[i], function, i); -			entry[i].flags |= -			       KVM_CPUID_FLAG_SIGNIFCANT_INDEX; -			++*nent; -		} -		break; -	} -	case 0xb: { -		int i, level_type; - -		entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX; -		/* read more entries until level_type is zero */ -		for (i = 1; *nent < maxnent; ++i) { -			level_type = entry[i - 1].ecx & 0xff00; -			if (!level_type) -				break; -			do_cpuid_1_ent(&entry[i], function, i); -			entry[i].flags |= -			       KVM_CPUID_FLAG_SIGNIFCANT_INDEX; -			++*nent; -		} -		break; -	} -	case 0xd: { -		int i; - -		entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX; -		for (i = 1; *nent < maxnent; ++i) { -			if (entry[i - 1].eax == 0 && i != 2) -				break; -			do_cpuid_1_ent(&entry[i], function, i); -			entry[i].flags |= -			       KVM_CPUID_FLAG_SIGNIFCANT_INDEX; -			++*nent; -		} -		break; -	} -	case KVM_CPUID_SIGNATURE: { -		char signature[12] = "KVMKVMKVM\0\0"; -		u32 *sigptr = (u32 *)signature; -		entry->eax = 0; -		entry->ebx = sigptr[0]; -		entry->ecx = sigptr[1]; -		entry->edx = sigptr[2]; -		break; -	} -	case KVM_CPUID_FEATURES: -		entry->eax = (1 << KVM_FEATURE_CLOCKSOURCE) | -			     (1 << KVM_FEATURE_NOP_IO_DELAY) | -			     (1 << KVM_FEATURE_CLOCKSOURCE2) | -			     (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT); -		entry->ebx = 0; -		entry->ecx = 0; -		entry->edx = 0; -		break; -	case 0x80000000: -		entry->eax = min(entry->eax, 0x8000001a); -		break; -	case 0x80000001: -		entry->edx &= kvm_supported_word1_x86_features; -		entry->ecx &= kvm_supported_word6_x86_features; -		break; -	} - -	kvm_x86_ops->set_supported_cpuid(function, entry); - -	put_cpu(); -} - -#undef F - -static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid, -				     struct kvm_cpuid_entry2 __user *entries) -{ -	struct kvm_cpuid_entry2 *cpuid_entries; -	int limit, nent = 0, r = -E2BIG; -	u32 func; - -	if (cpuid->nent < 1) -		goto out; -	if (cpuid->nent > KVM_MAX_CPUID_ENTRIES) -		cpuid->nent = KVM_MAX_CPUID_ENTRIES; -	r = -ENOMEM; -	cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry2) * cpuid->nent); -	if (!cpuid_entries) -		goto out; - -	do_cpuid_ent(&cpuid_entries[0], 0, 0, &nent, cpuid->nent); -	limit = cpuid_entries[0].eax; -	for (func = 1; func <= limit && nent < cpuid->nent; ++func) -		do_cpuid_ent(&cpuid_entries[nent], func, 0, -			     &nent, cpuid->nent); -	r = -E2BIG; -	if (nent >= cpuid->nent) -		goto out_free; - -	do_cpuid_ent(&cpuid_entries[nent], 0x80000000, 0, &nent, cpuid->nent); -	limit = cpuid_entries[nent - 1].eax; -	for (func = 0x80000001; func <= limit && nent < cpuid->nent; ++func) -		do_cpuid_ent(&cpuid_entries[nent], func, 0, -			     &nent, cpuid->nent); - - - -	r = -E2BIG; -	if (nent >= cpuid->nent) -		goto out_free; - -	do_cpuid_ent(&cpuid_entries[nent], KVM_CPUID_SIGNATURE, 0, &nent, -		     cpuid->nent); - -	r = -E2BIG; -	if (nent >= cpuid->nent) -		goto out_free; - -	do_cpuid_ent(&cpuid_entries[nent], KVM_CPUID_FEATURES, 0, &nent, -		     cpuid->nent); - -	r = -E2BIG; -	if (nent >= cpuid->nent) -		goto out_free; - -	r = -EFAULT; -	if (copy_to_user(entries, cpuid_entries, -			 nent * sizeof(struct kvm_cpuid_entry2))) -		goto out_free; -	cpuid->nent = nent; -	r = 0; - -out_free: -	vfree(cpuid_entries); -out: -	return r; -} -  static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,  				    struct kvm_lapic_state *s)  { +	kvm_x86_ops->sync_pir_to_irr(vcpu);  	memcpy(s->regs, vcpu->arch.apic->regs, sizeof *s);  	return 0; @@ -2441,8 +2851,7 @@ static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,  static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,  				    struct kvm_lapic_state *s)  { -	memcpy(vcpu->arch.apic->regs, s->regs, sizeof *s); -	kvm_apic_post_state_restore(vcpu); +	kvm_apic_post_state_restore(vcpu, s);  	update_cr8_intercept(vcpu);  	return 0; @@ -2451,7 +2860,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,  static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,  				    struct kvm_interrupt *irq)  { -	if (irq->irq < 0 || irq->irq >= 256) +	if (irq->irq >= KVM_NR_INTERRUPTS)  		return -EINVAL;  	if (irqchip_in_kernel(vcpu->kvm))  		return -ENXIO; @@ -2527,9 +2936,6 @@ static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu,  	if (mce->status & MCI_STATUS_UC) {  		if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) ||  		    !kvm_read_cr4_bits(vcpu, X86_CR4_MCE)) { -			printk(KERN_DEBUG "kvm: set_mce: " -			       "injects mce exception while " -			       "previous one is in progress!\n");  			kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);  			return 0;  		} @@ -2555,6 +2961,7 @@ static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu,  static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,  					       struct kvm_vcpu_events *events)  { +	process_nmi(vcpu);  	events->exception.injected =  		vcpu->arch.exception.pending &&  		!kvm_exception_is_soft(vcpu->arch.exception.nr); @@ -2572,14 +2979,13 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,  			KVM_X86_SHADOW_INT_MOV_SS | KVM_X86_SHADOW_INT_STI);  	events->nmi.injected = vcpu->arch.nmi_injected; -	events->nmi.pending = vcpu->arch.nmi_pending; +	events->nmi.pending = vcpu->arch.nmi_pending != 0;  	events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu);  	events->nmi.pad = 0; -	events->sipi_vector = vcpu->arch.sipi_vector; +	events->sipi_vector = 0; /* never valid when reporting to user space */  	events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING -			 | KVM_VCPUEVENT_VALID_SIPI_VECTOR  			 | KVM_VCPUEVENT_VALID_SHADOW);  	memset(&events->reserved, 0, sizeof(events->reserved));  } @@ -2592,6 +2998,7 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,  			      | KVM_VCPUEVENT_VALID_SHADOW))  		return -EINVAL; +	process_nmi(vcpu);  	vcpu->arch.exception.pending = events->exception.injected;  	vcpu->arch.exception.nr = events->exception.nr;  	vcpu->arch.exception.has_error_code = events->exception.has_error_code; @@ -2600,8 +3007,6 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,  	vcpu->arch.interrupt.pending = events->interrupt.injected;  	vcpu->arch.interrupt.nr = events->interrupt.nr;  	vcpu->arch.interrupt.soft = events->interrupt.soft; -	if (vcpu->arch.interrupt.pending && irqchip_in_kernel(vcpu->kvm)) -		kvm_pic_clear_isr_ack(vcpu->kvm);  	if (events->flags & KVM_VCPUEVENT_VALID_SHADOW)  		kvm_x86_ops->set_interrupt_shadow(vcpu,  						  events->interrupt.shadow); @@ -2611,8 +3016,9 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,  		vcpu->arch.nmi_pending = events->nmi.pending;  	kvm_x86_ops->set_nmi_mask(vcpu, events->nmi.masked); -	if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR) -		vcpu->arch.sipi_vector = events->sipi_vector; +	if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR && +	    kvm_vcpu_has_lapic(vcpu)) +		vcpu->arch.apic->sipi_vector = events->sipi_vector;  	kvm_make_request(KVM_REQ_EVENT, vcpu); @@ -2622,8 +3028,11 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,  static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu,  					     struct kvm_debugregs *dbgregs)  { +	unsigned long val; +  	memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db)); -	dbgregs->dr6 = vcpu->arch.dr6; +	_kvm_get_dr(vcpu, 6, &val); +	dbgregs->dr6 = val;  	dbgregs->dr7 = vcpu->arch.dr7;  	dbgregs->flags = 0;  	memset(&dbgregs->reserved, 0, sizeof(dbgregs->reserved)); @@ -2637,7 +3046,9 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,  	memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db));  	vcpu->arch.dr6 = dbgregs->dr6; +	kvm_update_dr6(vcpu);  	vcpu->arch.dr7 = dbgregs->dr7; +	kvm_update_dr7(vcpu);  	return 0;  } @@ -2645,11 +3056,13 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,  static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,  					 struct kvm_xsave *guest_xsave)  { -	if (cpu_has_xsave) +	if (cpu_has_xsave) {  		memcpy(guest_xsave->region,  			&vcpu->arch.guest_fpu.state->xsave, -			xstate_size); -	else { +			vcpu->arch.guest_xstate_size); +		*(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)] &= +			vcpu->arch.guest_supported_xcr0 | XSTATE_FPSSE; +	} else {  		memcpy(guest_xsave->region,  			&vcpu->arch.guest_fpu.state->fxsave,  			sizeof(struct i387_fxsave_struct)); @@ -2664,10 +3077,17 @@ static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,  	u64 xstate_bv =  		*(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)]; -	if (cpu_has_xsave) +	if (cpu_has_xsave) { +		/* +		 * Here we allow setting states that are not present in +		 * CPUID leaf 0xD, index 0, EDX:EAX.  This is for compatibility +		 * with old userspace. +		 */ +		if (xstate_bv & ~kvm_supported_xcr0()) +			return -EINVAL;  		memcpy(&vcpu->arch.guest_fpu.state->xsave, -			guest_xsave->region, xstate_size); -	else { +			guest_xsave->region, vcpu->arch.guest_xstate_size); +	} else {  		if (xstate_bv & ~XSTATE_FPSSE)  			return -EINVAL;  		memcpy(&vcpu->arch.guest_fpu.state->fxsave, @@ -2703,9 +3123,9 @@ static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu,  	for (i = 0; i < guest_xcrs->nr_xcrs; i++)  		/* Only support XCR0 currently */ -		if (guest_xcrs->xcrs[0].xcr == XCR_XFEATURE_ENABLED_MASK) { +		if (guest_xcrs->xcrs[i].xcr == XCR_XFEATURE_ENABLED_MASK) {  			r = __kvm_set_xcr(vcpu, XCR_XFEATURE_ENABLED_MASK, -				guest_xcrs->xcrs[0].value); +				guest_xcrs->xcrs[i].value);  			break;  		}  	if (r) @@ -2713,6 +3133,21 @@ static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu,  	return r;  } +/* + * kvm_set_guest_paused() indicates to the guest kernel that it has been + * stopped by the hypervisor.  This function will be called from the host only. + * EINVAL is returned when the host attempts to set the flag for a guest that + * does not support pv clocks. + */ +static int kvm_set_guest_paused(struct kvm_vcpu *vcpu) +{ +	if (!vcpu->arch.pv_time_enabled) +		return -EINVAL; +	vcpu->arch.pvclock_set_guest_stopped_request = true; +	kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); +	return 0; +} +  long kvm_arch_vcpu_ioctl(struct file *filp,  			 unsigned int ioctl, unsigned long arg)  { @@ -2750,17 +3185,11 @@ long kvm_arch_vcpu_ioctl(struct file *filp,  		r = -EINVAL;  		if (!vcpu->arch.apic)  			goto out; -		u.lapic = kmalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL); -		r = -ENOMEM; -		if (!u.lapic) -			goto out; -		r = -EFAULT; -		if (copy_from_user(u.lapic, argp, sizeof(struct kvm_lapic_state))) -			goto out; +		u.lapic = memdup_user(argp, sizeof(*u.lapic)); +		if (IS_ERR(u.lapic)) +			return PTR_ERR(u.lapic); +  		r = kvm_vcpu_ioctl_set_lapic(vcpu, u.lapic); -		if (r) -			goto out; -		r = 0;  		break;  	}  	case KVM_INTERRUPT: { @@ -2770,16 +3199,10 @@ long kvm_arch_vcpu_ioctl(struct file *filp,  		if (copy_from_user(&irq, argp, sizeof irq))  			goto out;  		r = kvm_vcpu_ioctl_interrupt(vcpu, &irq); -		if (r) -			goto out; -		r = 0;  		break;  	}  	case KVM_NMI: {  		r = kvm_vcpu_ioctl_nmi(vcpu); -		if (r) -			goto out; -		r = 0;  		break;  	}  	case KVM_SET_CPUID: { @@ -2790,8 +3213,6 @@ long kvm_arch_vcpu_ioctl(struct file *filp,  		if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))  			goto out;  		r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries); -		if (r) -			goto out;  		break;  	}  	case KVM_SET_CPUID2: { @@ -2803,8 +3224,6 @@ long kvm_arch_vcpu_ioctl(struct file *filp,  			goto out;  		r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid,  					      cpuid_arg->entries); -		if (r) -			goto out;  		break;  	}  	case KVM_GET_CPUID2: { @@ -2854,8 +3273,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,  		r = -EFAULT;  		if (copy_from_user(&va, argp, sizeof va))  			goto out; -		r = 0; -		kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); +		r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);  		break;  	}  	case KVM_X86_SETUP_MCE: { @@ -2935,14 +3353,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp,  		break;  	}  	case KVM_SET_XSAVE: { -		u.xsave = kzalloc(sizeof(struct kvm_xsave), GFP_KERNEL); -		r = -ENOMEM; -		if (!u.xsave) -			break; - -		r = -EFAULT; -		if (copy_from_user(u.xsave, argp, sizeof(struct kvm_xsave))) -			break; +		u.xsave = memdup_user(argp, sizeof(*u.xsave)); +		if (IS_ERR(u.xsave)) +			return PTR_ERR(u.xsave);  		r = kvm_vcpu_ioctl_x86_set_xsave(vcpu, u.xsave);  		break; @@ -2963,19 +3376,38 @@ long kvm_arch_vcpu_ioctl(struct file *filp,  		break;  	}  	case KVM_SET_XCRS: { -		u.xcrs = kzalloc(sizeof(struct kvm_xcrs), GFP_KERNEL); -		r = -ENOMEM; -		if (!u.xcrs) -			break; - -		r = -EFAULT; -		if (copy_from_user(u.xcrs, argp, -				   sizeof(struct kvm_xcrs))) -			break; +		u.xcrs = memdup_user(argp, sizeof(*u.xcrs)); +		if (IS_ERR(u.xcrs)) +			return PTR_ERR(u.xcrs);  		r = kvm_vcpu_ioctl_x86_set_xcrs(vcpu, u.xcrs);  		break;  	} +	case KVM_SET_TSC_KHZ: { +		u32 user_tsc_khz; + +		r = -EINVAL; +		user_tsc_khz = (u32)arg; + +		if (user_tsc_khz >= kvm_max_guest_tsc_khz) +			goto out; + +		if (user_tsc_khz == 0) +			user_tsc_khz = tsc_khz; + +		kvm_set_tsc_khz(vcpu, user_tsc_khz); + +		r = 0; +		goto out; +	} +	case KVM_GET_TSC_KHZ: { +		r = vcpu->arch.virtual_tsc_khz; +		goto out; +	} +	case KVM_KVMCLOCK_CTRL: { +		r = kvm_set_guest_paused(vcpu); +		goto out; +	}  	default:  		r = -EINVAL;  	} @@ -2984,12 +3416,17 @@ out:  	return r;  } +int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) +{ +	return VM_FAULT_SIGBUS; +} +  static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr)  {  	int ret;  	if (addr > (unsigned int)(-3 * PAGE_SIZE)) -		return -1; +		return -EINVAL;  	ret = kvm_x86_ops->set_tss_addr(kvm, addr);  	return ret;  } @@ -3008,12 +3445,10 @@ static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,  		return -EINVAL;  	mutex_lock(&kvm->slots_lock); -	spin_lock(&kvm->mmu_lock);  	kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);  	kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages; -	spin_unlock(&kvm->mmu_lock);  	mutex_unlock(&kvm->slots_lock);  	return 0;  } @@ -3137,86 +3572,109 @@ static int kvm_vm_ioctl_reinject(struct kvm *kvm,  	if (!kvm->arch.vpit)  		return -ENXIO;  	mutex_lock(&kvm->arch.vpit->pit_state.lock); -	kvm->arch.vpit->pit_state.pit_timer.reinject = control->pit_reinject; +	kvm->arch.vpit->pit_state.reinject = control->pit_reinject;  	mutex_unlock(&kvm->arch.vpit->pit_state.lock);  	return 0;  } -/* - * Get (and clear) the dirty memory log for a memory slot. +/** + * kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot + * @kvm: kvm instance + * @log: slot id and address to which we copy the log + * + * We need to keep it in mind that VCPU threads can write to the bitmap + * concurrently.  So, to avoid losing data, we keep the following order for + * each bit: + * + *   1. Take a snapshot of the bit and clear it if needed. + *   2. Write protect the corresponding page. + *   3. Flush TLB's if needed. + *   4. Copy the snapshot to the userspace. + * + * Between 2 and 3, the guest may write to the page using the remaining TLB + * entry.  This is not a problem because the page will be reported dirty at + * step 4 using the snapshot taken before and step 3 ensures that successive + * writes will be logged for the next call.   */ -int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, -				      struct kvm_dirty_log *log) +int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)  { -	int r, i; +	int r;  	struct kvm_memory_slot *memslot; -	unsigned long n; -	unsigned long is_dirty = 0; +	unsigned long n, i; +	unsigned long *dirty_bitmap; +	unsigned long *dirty_bitmap_buffer; +	bool is_dirty = false;  	mutex_lock(&kvm->slots_lock);  	r = -EINVAL; -	if (log->slot >= KVM_MEMORY_SLOTS) +	if (log->slot >= KVM_USER_MEM_SLOTS)  		goto out; -	memslot = &kvm->memslots->memslots[log->slot]; +	memslot = id_to_memslot(kvm->memslots, log->slot); + +	dirty_bitmap = memslot->dirty_bitmap;  	r = -ENOENT; -	if (!memslot->dirty_bitmap) +	if (!dirty_bitmap)  		goto out;  	n = kvm_dirty_bitmap_bytes(memslot); -	for (i = 0; !is_dirty && i < n/sizeof(long); i++) -		is_dirty = memslot->dirty_bitmap[i]; +	dirty_bitmap_buffer = dirty_bitmap + n / sizeof(long); +	memset(dirty_bitmap_buffer, 0, n); -	/* If nothing is dirty, don't bother messing with page tables. */ -	if (is_dirty) { -		struct kvm_memslots *slots, *old_slots; -		unsigned long *dirty_bitmap; +	spin_lock(&kvm->mmu_lock); -		r = -ENOMEM; -		dirty_bitmap = vmalloc(n); -		if (!dirty_bitmap) -			goto out; -		memset(dirty_bitmap, 0, n); +	for (i = 0; i < n / sizeof(long); i++) { +		unsigned long mask; +		gfn_t offset; -		r = -ENOMEM; -		slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL); -		if (!slots) { -			vfree(dirty_bitmap); -			goto out; -		} -		memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots)); -		slots->memslots[log->slot].dirty_bitmap = dirty_bitmap; +		if (!dirty_bitmap[i]) +			continue; -		old_slots = kvm->memslots; -		rcu_assign_pointer(kvm->memslots, slots); -		synchronize_srcu_expedited(&kvm->srcu); -		dirty_bitmap = old_slots->memslots[log->slot].dirty_bitmap; -		kfree(old_slots); +		is_dirty = true; -		spin_lock(&kvm->mmu_lock); -		kvm_mmu_slot_remove_write_access(kvm, log->slot); -		spin_unlock(&kvm->mmu_lock); +		mask = xchg(&dirty_bitmap[i], 0); +		dirty_bitmap_buffer[i] = mask; -		r = -EFAULT; -		if (copy_to_user(log->dirty_bitmap, dirty_bitmap, n)) { -			vfree(dirty_bitmap); -			goto out; -		} -		vfree(dirty_bitmap); -	} else { -		r = -EFAULT; -		if (clear_user(log->dirty_bitmap, n)) -			goto out; +		offset = i * BITS_PER_LONG; +		kvm_mmu_write_protect_pt_masked(kvm, memslot, offset, mask);  	} +	spin_unlock(&kvm->mmu_lock); + +	/* See the comments in kvm_mmu_slot_remove_write_access(). */ +	lockdep_assert_held(&kvm->slots_lock); + +	/* +	 * All the TLBs can be flushed out of mmu lock, see the comments in +	 * kvm_mmu_slot_remove_write_access(). +	 */ +	if (is_dirty) +		kvm_flush_remote_tlbs(kvm); + +	r = -EFAULT; +	if (copy_to_user(log->dirty_bitmap, dirty_bitmap_buffer, n)) +		goto out; +  	r = 0;  out:  	mutex_unlock(&kvm->slots_lock);  	return r;  } +int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event, +			bool line_status) +{ +	if (!irqchip_in_kernel(kvm)) +		return -ENXIO; + +	irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, +					irq_event->irq, irq_event->level, +					line_status); +	return 0; +} +  long kvm_arch_vm_ioctl(struct file *filp,  		       unsigned int ioctl, unsigned long arg)  { @@ -3237,8 +3695,6 @@ long kvm_arch_vm_ioctl(struct file *filp,  	switch (ioctl) {  	case KVM_SET_TSS_ADDR:  		r = kvm_vm_ioctl_set_tss_addr(kvm, arg); -		if (r < 0) -			goto out;  		break;  	case KVM_SET_IDENTITY_MAP_ADDR: {  		u64 ident_addr; @@ -3247,14 +3703,10 @@ long kvm_arch_vm_ioctl(struct file *filp,  		if (copy_from_user(&ident_addr, argp, sizeof ident_addr))  			goto out;  		r = kvm_vm_ioctl_set_identity_map_addr(kvm, ident_addr); -		if (r < 0) -			goto out;  		break;  	}  	case KVM_SET_NR_MMU_PAGES:  		r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg); -		if (r) -			goto out;  		break;  	case KVM_GET_NR_MMU_PAGES:  		r = kvm_vm_ioctl_get_nr_mmu_pages(kvm); @@ -3266,13 +3718,22 @@ long kvm_arch_vm_ioctl(struct file *filp,  		r = -EEXIST;  		if (kvm->arch.vpic)  			goto create_irqchip_unlock; +		r = -EINVAL; +		if (atomic_read(&kvm->online_vcpus)) +			goto create_irqchip_unlock;  		r = -ENOMEM;  		vpic = kvm_create_pic(kvm);  		if (vpic) {  			r = kvm_ioapic_init(kvm);  			if (r) { +				mutex_lock(&kvm->slots_lock);  				kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, -							  &vpic->dev); +							  &vpic->dev_master); +				kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, +							  &vpic->dev_slave); +				kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, +							  &vpic->dev_eclr); +				mutex_unlock(&kvm->slots_lock);  				kfree(vpic);  				goto create_irqchip_unlock;  			} @@ -3283,10 +3744,12 @@ long kvm_arch_vm_ioctl(struct file *filp,  		smp_wmb();  		r = kvm_setup_default_irq_routing(kvm);  		if (r) { +			mutex_lock(&kvm->slots_lock);  			mutex_lock(&kvm->irq_lock);  			kvm_ioapic_destroy(kvm);  			kvm_destroy_pic(kvm);  			mutex_unlock(&kvm->irq_lock); +			mutex_unlock(&kvm->slots_lock);  		}  	create_irqchip_unlock:  		mutex_unlock(&kvm->lock); @@ -3312,39 +3775,16 @@ long kvm_arch_vm_ioctl(struct file *filp,  	create_pit_unlock:  		mutex_unlock(&kvm->slots_lock);  		break; -	case KVM_IRQ_LINE_STATUS: -	case KVM_IRQ_LINE: { -		struct kvm_irq_level irq_event; - -		r = -EFAULT; -		if (copy_from_user(&irq_event, argp, sizeof irq_event)) -			goto out; -		r = -ENXIO; -		if (irqchip_in_kernel(kvm)) { -			__s32 status; -			status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, -					irq_event.irq, irq_event.level); -			if (ioctl == KVM_IRQ_LINE_STATUS) { -				r = -EFAULT; -				irq_event.status = status; -				if (copy_to_user(argp, &irq_event, -							sizeof irq_event)) -					goto out; -			} -			r = 0; -		} -		break; -	}  	case KVM_GET_IRQCHIP: {  		/* 0: PIC master, 1: PIC slave, 2: IOAPIC */ -		struct kvm_irqchip *chip = kmalloc(sizeof(*chip), GFP_KERNEL); +		struct kvm_irqchip *chip; -		r = -ENOMEM; -		if (!chip) +		chip = memdup_user(argp, sizeof(*chip)); +		if (IS_ERR(chip)) { +			r = PTR_ERR(chip);  			goto out; -		r = -EFAULT; -		if (copy_from_user(chip, argp, sizeof *chip)) -			goto get_irqchip_out; +		} +  		r = -ENXIO;  		if (!irqchip_in_kernel(kvm))  			goto get_irqchip_out; @@ -3357,20 +3797,18 @@ long kvm_arch_vm_ioctl(struct file *filp,  		r = 0;  	get_irqchip_out:  		kfree(chip); -		if (r) -			goto out;  		break;  	}  	case KVM_SET_IRQCHIP: {  		/* 0: PIC master, 1: PIC slave, 2: IOAPIC */ -		struct kvm_irqchip *chip = kmalloc(sizeof(*chip), GFP_KERNEL); +		struct kvm_irqchip *chip; -		r = -ENOMEM; -		if (!chip) +		chip = memdup_user(argp, sizeof(*chip)); +		if (IS_ERR(chip)) { +			r = PTR_ERR(chip);  			goto out; -		r = -EFAULT; -		if (copy_from_user(chip, argp, sizeof *chip)) -			goto set_irqchip_out; +		} +  		r = -ENXIO;  		if (!irqchip_in_kernel(kvm))  			goto set_irqchip_out; @@ -3380,8 +3818,6 @@ long kvm_arch_vm_ioctl(struct file *filp,  		r = 0;  	set_irqchip_out:  		kfree(chip); -		if (r) -			goto out;  		break;  	}  	case KVM_GET_PIT: { @@ -3408,9 +3844,6 @@ long kvm_arch_vm_ioctl(struct file *filp,  		if (!kvm->arch.vpit)  			goto out;  		r = kvm_vm_ioctl_set_pit(kvm, &u.ps); -		if (r) -			goto out; -		r = 0;  		break;  	}  	case KVM_GET_PIT2: { @@ -3434,9 +3867,6 @@ long kvm_arch_vm_ioctl(struct file *filp,  		if (!kvm->arch.vpit)  			goto out;  		r = kvm_vm_ioctl_set_pit2(kvm, &u.ps2); -		if (r) -			goto out; -		r = 0;  		break;  	}  	case KVM_REINJECT_CONTROL: { @@ -3445,9 +3875,6 @@ long kvm_arch_vm_ioctl(struct file *filp,  		if (copy_from_user(&control, argp, sizeof(control)))  			goto out;  		r = kvm_vm_ioctl_reinject(kvm, &control); -		if (r) -			goto out; -		r = 0;  		break;  	}  	case KVM_XEN_HVM_CONFIG: { @@ -3480,6 +3907,7 @@ long kvm_arch_vm_ioctl(struct file *filp,  		delta = user_ns.clock - now_ns;  		local_irq_enable();  		kvm->arch.kvmclock_offset = delta; +		kvm_gen_update_masterclock(kvm);  		break;  	}  	case KVM_GET_CLOCK: { @@ -3516,6 +3944,23 @@ static void kvm_init_msr_list(void)  	for (i = j = KVM_SAVE_MSRS_BEGIN; i < ARRAY_SIZE(msrs_to_save); i++) {  		if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0)  			continue; + +		/* +		 * Even MSRs that are valid in the host may not be exposed +		 * to the guests in some cases.  We could work around this +		 * in VMX with the generic MSR save/load machinery, but it +		 * is not really worthwhile since it will really only +		 * happen with nested virtualization. +		 */ +		switch (msrs_to_save[i]) { +		case MSR_IA32_BNDCFGS: +			if (!kvm_x86_ops->mpx_supported()) +				continue; +			break; +		default: +			break; +		} +  		if (j < i)  			msrs_to_save[j] = msrs_to_save[i];  		j++; @@ -3526,20 +3971,43 @@ static void kvm_init_msr_list(void)  static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len,  			   const void *v)  { -	if (vcpu->arch.apic && -	    !kvm_iodevice_write(&vcpu->arch.apic->dev, addr, len, v)) -		return 0; +	int handled = 0; +	int n; + +	do { +		n = min(len, 8); +		if (!(vcpu->arch.apic && +		      !kvm_iodevice_write(&vcpu->arch.apic->dev, addr, n, v)) +		    && kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, addr, n, v)) +			break; +		handled += n; +		addr += n; +		len -= n; +		v += n; +	} while (len); -	return kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, addr, len, v); +	return handled;  }  static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v)  { -	if (vcpu->arch.apic && -	    !kvm_iodevice_read(&vcpu->arch.apic->dev, addr, len, v)) -		return 0; +	int handled = 0; +	int n; + +	do { +		n = min(len, 8); +		if (!(vcpu->arch.apic && +		      !kvm_iodevice_read(&vcpu->arch.apic->dev, addr, n, v)) +		    && kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, addr, n, v)) +			break; +		trace_kvm_mmio(KVM_TRACE_MMIO_READ, n, addr, *(u64 *)v); +		handled += n; +		addr += n; +		len -= n; +		v += n; +	} while (len); -	return kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, addr, len, v); +	return handled;  }  static void kvm_set_segment(struct kvm_vcpu *vcpu, @@ -3554,71 +4022,66 @@ void kvm_get_segment(struct kvm_vcpu *vcpu,  	kvm_x86_ops->get_segment(vcpu, var, seg);  } -static gpa_t translate_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access) -{ -	return gpa; -} - -static gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access) +gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access)  {  	gpa_t t_gpa; -	u32 error; +	struct x86_exception exception;  	BUG_ON(!mmu_is_nested(vcpu));  	/* NPT walks are always user-walks */  	access |= PFERR_USER_MASK; -	t_gpa  = vcpu->arch.mmu.gva_to_gpa(vcpu, gpa, access, &error); -	if (t_gpa == UNMAPPED_GVA) -		vcpu->arch.fault.nested = true; +	t_gpa  = vcpu->arch.mmu.gva_to_gpa(vcpu, gpa, access, &exception);  	return t_gpa;  } -gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, u32 *error) +gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, +			      struct x86_exception *exception)  {  	u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; -	return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, error); +	return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);  } - gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, u32 *error) + gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, +				struct x86_exception *exception)  {  	u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;  	access |= PFERR_FETCH_MASK; -	return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, error); +	return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);  } -gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, u32 *error) +gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, +			       struct x86_exception *exception)  {  	u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;  	access |= PFERR_WRITE_MASK; -	return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, error); +	return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);  }  /* uses this to access any guest's mapped memory without checking CPL */ -gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, u32 *error) +gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, +				struct x86_exception *exception)  { -	return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, 0, error); +	return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, 0, exception);  }  static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,  				      struct kvm_vcpu *vcpu, u32 access, -				      u32 *error) +				      struct x86_exception *exception)  {  	void *data = val;  	int r = X86EMUL_CONTINUE;  	while (bytes) {  		gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access, -							    error); +							    exception);  		unsigned offset = addr & (PAGE_SIZE-1);  		unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset);  		int ret; -		if (gpa == UNMAPPED_GVA) { -			r = X86EMUL_PROPAGATE_FAULT; -			goto out; -		} +		if (gpa == UNMAPPED_GVA) +			return X86EMUL_PROPAGATE_FAULT;  		ret = kvm_read_guest(vcpu->kvm, gpa, data, toread);  		if (ret < 0) {  			r = X86EMUL_IO_NEEDED; @@ -3634,48 +4097,57 @@ out:  }  /* used for instruction fetching */ -static int kvm_fetch_guest_virt(gva_t addr, void *val, unsigned int bytes, -				struct kvm_vcpu *vcpu, u32 *error) +static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt, +				gva_t addr, void *val, unsigned int bytes, +				struct x86_exception *exception)  { +	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);  	u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; +  	return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, -					  access | PFERR_FETCH_MASK, error); +					  access | PFERR_FETCH_MASK, +					  exception);  } -static int kvm_read_guest_virt(gva_t addr, void *val, unsigned int bytes, -			       struct kvm_vcpu *vcpu, u32 *error) +int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt, +			       gva_t addr, void *val, unsigned int bytes, +			       struct x86_exception *exception)  { +	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);  	u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; +  	return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, -					  error); +					  exception);  } +EXPORT_SYMBOL_GPL(kvm_read_guest_virt); -static int kvm_read_guest_virt_system(gva_t addr, void *val, unsigned int bytes, -			       struct kvm_vcpu *vcpu, u32 *error) +static int kvm_read_guest_virt_system(struct x86_emulate_ctxt *ctxt, +				      gva_t addr, void *val, unsigned int bytes, +				      struct x86_exception *exception)  { -	return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, error); +	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); +	return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, exception);  } -static int kvm_write_guest_virt_system(gva_t addr, void *val, +int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt, +				       gva_t addr, void *val,  				       unsigned int bytes, -				       struct kvm_vcpu *vcpu, -				       u32 *error) +				       struct x86_exception *exception)  { +	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);  	void *data = val;  	int r = X86EMUL_CONTINUE;  	while (bytes) {  		gpa_t gpa =  vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr,  							     PFERR_WRITE_MASK, -							     error); +							     exception);  		unsigned offset = addr & (PAGE_SIZE-1);  		unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);  		int ret; -		if (gpa == UNMAPPED_GVA) { -			r = X86EMUL_PROPAGATE_FAULT; -			goto out; -		} +		if (gpa == UNMAPPED_GVA) +			return X86EMUL_PROPAGATE_FAULT;  		ret = kvm_write_guest(vcpu->kvm, gpa, data, towrite);  		if (ret < 0) {  			r = X86EMUL_IO_NEEDED; @@ -3689,127 +4161,237 @@ static int kvm_write_guest_virt_system(gva_t addr, void *val,  out:  	return r;  } +EXPORT_SYMBOL_GPL(kvm_write_guest_virt_system); -static int emulator_read_emulated(unsigned long addr, -				  void *val, -				  unsigned int bytes, -				  unsigned int *error_code, -				  struct kvm_vcpu *vcpu) +static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva, +				gpa_t *gpa, struct x86_exception *exception, +				bool write)  { -	gpa_t                 gpa; +	u32 access = ((kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0) +		| (write ? PFERR_WRITE_MASK : 0); -	if (vcpu->mmio_read_completed) { -		memcpy(val, vcpu->mmio_data, bytes); -		trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes, -			       vcpu->mmio_phys_addr, *(u64 *)val); -		vcpu->mmio_read_completed = 0; -		return X86EMUL_CONTINUE; +	if (vcpu_match_mmio_gva(vcpu, gva) +	    && !permission_fault(vcpu, vcpu->arch.walk_mmu, +				 vcpu->arch.access, access)) { +		*gpa = vcpu->arch.mmio_gfn << PAGE_SHIFT | +					(gva & (PAGE_SIZE - 1)); +		trace_vcpu_match_mmio(gva, *gpa, write, false); +		return 1;  	} -	gpa = kvm_mmu_gva_to_gpa_read(vcpu, addr, error_code); +	*gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); -	if (gpa == UNMAPPED_GVA) -		return X86EMUL_PROPAGATE_FAULT; +	if (*gpa == UNMAPPED_GVA) +		return -1;  	/* For APIC access vmexit */ -	if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE) -		goto mmio; - -	if (kvm_read_guest_virt(addr, val, bytes, vcpu, NULL) -				== X86EMUL_CONTINUE) -		return X86EMUL_CONTINUE; +	if ((*gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE) +		return 1; -mmio: -	/* -	 * Is this MMIO handled locally? -	 */ -	if (!vcpu_mmio_read(vcpu, gpa, bytes, val)) { -		trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes, gpa, *(u64 *)val); -		return X86EMUL_CONTINUE; +	if (vcpu_match_mmio_gpa(vcpu, *gpa)) { +		trace_vcpu_match_mmio(gva, *gpa, write, true); +		return 1;  	} -	trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, bytes, gpa, 0); - -	vcpu->mmio_needed = 1; -	vcpu->run->exit_reason = KVM_EXIT_MMIO; -	vcpu->run->mmio.phys_addr = vcpu->mmio_phys_addr = gpa; -	vcpu->run->mmio.len = vcpu->mmio_size = bytes; -	vcpu->run->mmio.is_write = vcpu->mmio_is_write = 0; - -	return X86EMUL_IO_NEEDED; +	return 0;  }  int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, -			  const void *val, int bytes) +			const void *val, int bytes)  {  	int ret;  	ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes);  	if (ret < 0)  		return 0; -	kvm_mmu_pte_write(vcpu, gpa, val, bytes, 1); +	kvm_mmu_pte_write(vcpu, gpa, val, bytes);  	return 1;  } -static int emulator_write_emulated_onepage(unsigned long addr, -					   const void *val, -					   unsigned int bytes, -					   unsigned int *error_code, -					   struct kvm_vcpu *vcpu) +struct read_write_emulator_ops { +	int (*read_write_prepare)(struct kvm_vcpu *vcpu, void *val, +				  int bytes); +	int (*read_write_emulate)(struct kvm_vcpu *vcpu, gpa_t gpa, +				  void *val, int bytes); +	int (*read_write_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa, +			       int bytes, void *val); +	int (*read_write_exit_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa, +				    void *val, int bytes); +	bool write; +}; + +static int read_prepare(struct kvm_vcpu *vcpu, void *val, int bytes) +{ +	if (vcpu->mmio_read_completed) { +		trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes, +			       vcpu->mmio_fragments[0].gpa, *(u64 *)val); +		vcpu->mmio_read_completed = 0; +		return 1; +	} + +	return 0; +} + +static int read_emulate(struct kvm_vcpu *vcpu, gpa_t gpa, +			void *val, int bytes)  { -	gpa_t                 gpa; +	return !kvm_read_guest(vcpu->kvm, gpa, val, bytes); +} -	gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, error_code); +static int write_emulate(struct kvm_vcpu *vcpu, gpa_t gpa, +			 void *val, int bytes) +{ +	return emulator_write_phys(vcpu, gpa, val, bytes); +} -	if (gpa == UNMAPPED_GVA) +static int write_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes, void *val) +{ +	trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, bytes, gpa, *(u64 *)val); +	return vcpu_mmio_write(vcpu, gpa, bytes, val); +} + +static int read_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, +			  void *val, int bytes) +{ +	trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, bytes, gpa, 0); +	return X86EMUL_IO_NEEDED; +} + +static int write_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, +			   void *val, int bytes) +{ +	struct kvm_mmio_fragment *frag = &vcpu->mmio_fragments[0]; + +	memcpy(vcpu->run->mmio.data, frag->data, min(8u, frag->len)); +	return X86EMUL_CONTINUE; +} + +static const struct read_write_emulator_ops read_emultor = { +	.read_write_prepare = read_prepare, +	.read_write_emulate = read_emulate, +	.read_write_mmio = vcpu_mmio_read, +	.read_write_exit_mmio = read_exit_mmio, +}; + +static const struct read_write_emulator_ops write_emultor = { +	.read_write_emulate = write_emulate, +	.read_write_mmio = write_mmio, +	.read_write_exit_mmio = write_exit_mmio, +	.write = true, +}; + +static int emulator_read_write_onepage(unsigned long addr, void *val, +				       unsigned int bytes, +				       struct x86_exception *exception, +				       struct kvm_vcpu *vcpu, +				       const struct read_write_emulator_ops *ops) +{ +	gpa_t gpa; +	int handled, ret; +	bool write = ops->write; +	struct kvm_mmio_fragment *frag; + +	ret = vcpu_mmio_gva_to_gpa(vcpu, addr, &gpa, exception, write); + +	if (ret < 0)  		return X86EMUL_PROPAGATE_FAULT;  	/* For APIC access vmexit */ -	if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE) +	if (ret)  		goto mmio; -	if (emulator_write_phys(vcpu, gpa, val, bytes)) +	if (ops->read_write_emulate(vcpu, gpa, val, bytes))  		return X86EMUL_CONTINUE;  mmio: -	trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, bytes, gpa, *(u64 *)val);  	/*  	 * Is this MMIO handled locally?  	 */ -	if (!vcpu_mmio_write(vcpu, gpa, bytes, val)) +	handled = ops->read_write_mmio(vcpu, gpa, bytes, val); +	if (handled == bytes)  		return X86EMUL_CONTINUE; -	vcpu->mmio_needed = 1; -	vcpu->run->exit_reason = KVM_EXIT_MMIO; -	vcpu->run->mmio.phys_addr = vcpu->mmio_phys_addr = gpa; -	vcpu->run->mmio.len = vcpu->mmio_size = bytes; -	vcpu->run->mmio.is_write = vcpu->mmio_is_write = 1; -	memcpy(vcpu->run->mmio.data, val, bytes); +	gpa += handled; +	bytes -= handled; +	val += handled; +	WARN_ON(vcpu->mmio_nr_fragments >= KVM_MAX_MMIO_FRAGMENTS); +	frag = &vcpu->mmio_fragments[vcpu->mmio_nr_fragments++]; +	frag->gpa = gpa; +	frag->data = val; +	frag->len = bytes;  	return X86EMUL_CONTINUE;  } -int emulator_write_emulated(unsigned long addr, -			    const void *val, -			    unsigned int bytes, -			    unsigned int *error_code, -			    struct kvm_vcpu *vcpu) +int emulator_read_write(struct x86_emulate_ctxt *ctxt, unsigned long addr, +			void *val, unsigned int bytes, +			struct x86_exception *exception, +			const struct read_write_emulator_ops *ops)  { +	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); +	gpa_t gpa; +	int rc; + +	if (ops->read_write_prepare && +		  ops->read_write_prepare(vcpu, val, bytes)) +		return X86EMUL_CONTINUE; + +	vcpu->mmio_nr_fragments = 0; +  	/* Crossing a page boundary? */  	if (((addr + bytes - 1) ^ addr) & PAGE_MASK) { -		int rc, now; +		int now;  		now = -addr & ~PAGE_MASK; -		rc = emulator_write_emulated_onepage(addr, val, now, error_code, -						     vcpu); +		rc = emulator_read_write_onepage(addr, val, now, exception, +						 vcpu, ops); +  		if (rc != X86EMUL_CONTINUE)  			return rc;  		addr += now;  		val += now;  		bytes -= now;  	} -	return emulator_write_emulated_onepage(addr, val, bytes, error_code, -					       vcpu); + +	rc = emulator_read_write_onepage(addr, val, bytes, exception, +					 vcpu, ops); +	if (rc != X86EMUL_CONTINUE) +		return rc; + +	if (!vcpu->mmio_nr_fragments) +		return rc; + +	gpa = vcpu->mmio_fragments[0].gpa; + +	vcpu->mmio_needed = 1; +	vcpu->mmio_cur_fragment = 0; + +	vcpu->run->mmio.len = min(8u, vcpu->mmio_fragments[0].len); +	vcpu->run->mmio.is_write = vcpu->mmio_is_write = ops->write; +	vcpu->run->exit_reason = KVM_EXIT_MMIO; +	vcpu->run->mmio.phys_addr = gpa; + +	return ops->read_write_exit_mmio(vcpu, gpa, val, bytes); +} + +static int emulator_read_emulated(struct x86_emulate_ctxt *ctxt, +				  unsigned long addr, +				  void *val, +				  unsigned int bytes, +				  struct x86_exception *exception) +{ +	return emulator_read_write(ctxt, addr, val, bytes, +				   exception, &read_emultor); +} + +int emulator_write_emulated(struct x86_emulate_ctxt *ctxt, +			    unsigned long addr, +			    const void *val, +			    unsigned int bytes, +			    struct x86_exception *exception) +{ +	return emulator_read_write(ctxt, addr, (void *)val, bytes, +				   exception, &write_emultor);  }  #define CMPXCHG_TYPE(t, ptr, old, new) \ @@ -3822,13 +4404,14 @@ int emulator_write_emulated(unsigned long addr,  	(cmpxchg64((u64 *)(ptr), *(u64 *)(old), *(u64 *)(new)) == *(u64 *)(old))  #endif -static int emulator_cmpxchg_emulated(unsigned long addr, +static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt, +				     unsigned long addr,  				     const void *old,  				     const void *new,  				     unsigned int bytes, -				     unsigned int *error_code, -				     struct kvm_vcpu *vcpu) +				     struct x86_exception *exception)  { +	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);  	gpa_t gpa;  	struct page *page;  	char *kaddr; @@ -3848,12 +4431,10 @@ static int emulator_cmpxchg_emulated(unsigned long addr,  		goto emul_write;  	page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT); -	if (is_error_page(page)) { -		kvm_release_page_clean(page); +	if (is_error_page(page))  		goto emul_write; -	} -	kaddr = kmap_atomic(page, KM_USER0); +	kaddr = kmap_atomic(page);  	kaddr += offset_in_page(gpa);  	switch (bytes) {  	case 1: @@ -3871,20 +4452,21 @@ static int emulator_cmpxchg_emulated(unsigned long addr,  	default:  		BUG();  	} -	kunmap_atomic(kaddr, KM_USER0); +	kunmap_atomic(kaddr);  	kvm_release_page_dirty(page);  	if (!exchanged)  		return X86EMUL_CMPXCHG_FAILED; -	kvm_mmu_pte_write(vcpu, gpa, new, bytes, 1); +	mark_page_dirty(vcpu->kvm, gpa >> PAGE_SHIFT); +	kvm_mmu_pte_write(vcpu, gpa, new, bytes);  	return X86EMUL_CONTINUE;  emul_write:  	printk_once(KERN_WARNING "kvm: emulating exchange as write\n"); -	return emulator_write_emulated(addr, new, bytes, error_code, vcpu); +	return emulator_write_emulated(ctxt, addr, new, bytes, exception);  }  static int kernel_pio(struct kvm_vcpu *vcpu, void *pd) @@ -3902,29 +4484,22 @@ static int kernel_pio(struct kvm_vcpu *vcpu, void *pd)  	return r;  } - -static int emulator_pio_in_emulated(int size, unsigned short port, void *val, -			     unsigned int count, struct kvm_vcpu *vcpu) +static int emulator_pio_in_out(struct kvm_vcpu *vcpu, int size, +			       unsigned short port, void *val, +			       unsigned int count, bool in)  { -	if (vcpu->arch.pio.count) -		goto data_avail; - -	trace_kvm_pio(0, port, size, 1); -  	vcpu->arch.pio.port = port; -	vcpu->arch.pio.in = 1; +	vcpu->arch.pio.in = in;  	vcpu->arch.pio.count  = count;  	vcpu->arch.pio.size = size;  	if (!kernel_pio(vcpu, vcpu->arch.pio_data)) { -	data_avail: -		memcpy(val, vcpu->arch.pio_data, size * count);  		vcpu->arch.pio.count = 0;  		return 1;  	}  	vcpu->run->exit_reason = KVM_EXIT_IO; -	vcpu->run->io.direction = KVM_EXIT_IO_IN; +	vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;  	vcpu->run->io.size = size;  	vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;  	vcpu->run->io.count = count; @@ -3933,43 +4508,47 @@ static int emulator_pio_in_emulated(int size, unsigned short port, void *val,  	return 0;  } -static int emulator_pio_out_emulated(int size, unsigned short port, -			      const void *val, unsigned int count, -			      struct kvm_vcpu *vcpu) +static int emulator_pio_in_emulated(struct x86_emulate_ctxt *ctxt, +				    int size, unsigned short port, void *val, +				    unsigned int count)  { -	trace_kvm_pio(1, port, size, 1); - -	vcpu->arch.pio.port = port; -	vcpu->arch.pio.in = 0; -	vcpu->arch.pio.count = count; -	vcpu->arch.pio.size = size; +	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); +	int ret; -	memcpy(vcpu->arch.pio_data, val, size * count); +	if (vcpu->arch.pio.count) +		goto data_avail; -	if (!kernel_pio(vcpu, vcpu->arch.pio_data)) { +	ret = emulator_pio_in_out(vcpu, size, port, val, count, true); +	if (ret) { +data_avail: +		memcpy(val, vcpu->arch.pio_data, size * count); +		trace_kvm_pio(KVM_PIO_IN, port, size, count, vcpu->arch.pio_data);  		vcpu->arch.pio.count = 0;  		return 1;  	} -	vcpu->run->exit_reason = KVM_EXIT_IO; -	vcpu->run->io.direction = KVM_EXIT_IO_OUT; -	vcpu->run->io.size = size; -	vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE; -	vcpu->run->io.count = count; -	vcpu->run->io.port = port; -  	return 0;  } +static int emulator_pio_out_emulated(struct x86_emulate_ctxt *ctxt, +				     int size, unsigned short port, +				     const void *val, unsigned int count) +{ +	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); + +	memcpy(vcpu->arch.pio_data, val, size * count); +	trace_kvm_pio(KVM_PIO_OUT, port, size, count, vcpu->arch.pio_data); +	return emulator_pio_in_out(vcpu, size, port, (void *)val, count, false); +} +  static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)  {  	return kvm_x86_ops->get_segment_base(vcpu, seg);  } -int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address) +static void emulator_invlpg(struct x86_emulate_ctxt *ctxt, ulong address)  { -	kvm_mmu_invlpg(vcpu, address); -	return X86EMUL_CONTINUE; +	kvm_mmu_invlpg(emul_to_vcpu(ctxt), address);  }  int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu) @@ -3978,33 +4557,33 @@ int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu)  		return X86EMUL_CONTINUE;  	if (kvm_x86_ops->has_wbinvd_exit()) { -		preempt_disable(); +		int cpu = get_cpu(); + +		cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask);  		smp_call_function_many(vcpu->arch.wbinvd_dirty_mask,  				wbinvd_ipi, NULL, 1); -		preempt_enable(); +		put_cpu();  		cpumask_clear(vcpu->arch.wbinvd_dirty_mask); -	} -	wbinvd(); +	} else +		wbinvd();  	return X86EMUL_CONTINUE;  }  EXPORT_SYMBOL_GPL(kvm_emulate_wbinvd); -int emulate_clts(struct kvm_vcpu *vcpu) +static void emulator_wbinvd(struct x86_emulate_ctxt *ctxt)  { -	kvm_x86_ops->set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~X86_CR0_TS)); -	kvm_x86_ops->fpu_activate(vcpu); -	return X86EMUL_CONTINUE; +	kvm_emulate_wbinvd(emul_to_vcpu(ctxt));  } -int emulator_get_dr(int dr, unsigned long *dest, struct kvm_vcpu *vcpu) +int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest)  { -	return _kvm_get_dr(vcpu, dr, dest); +	return _kvm_get_dr(emul_to_vcpu(ctxt), dr, dest);  } -int emulator_set_dr(int dr, unsigned long value, struct kvm_vcpu *vcpu) +int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)  { -	return __kvm_set_dr(vcpu, dr, value); +	return __kvm_set_dr(emul_to_vcpu(ctxt), dr, value);  }  static u64 mk_cr_64(u64 curr_cr, u32 new_val) @@ -4012,8 +4591,9 @@ static u64 mk_cr_64(u64 curr_cr, u32 new_val)  	return (curr_cr & ~((1ULL << 32) - 1)) | new_val;  } -static unsigned long emulator_get_cr(int cr, struct kvm_vcpu *vcpu) +static unsigned long emulator_get_cr(struct x86_emulate_ctxt *ctxt, int cr)  { +	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);  	unsigned long value;  	switch (cr) { @@ -4024,7 +4604,7 @@ static unsigned long emulator_get_cr(int cr, struct kvm_vcpu *vcpu)  		value = vcpu->arch.cr2;  		break;  	case 3: -		value = vcpu->arch.cr3; +		value = kvm_read_cr3(vcpu);  		break;  	case 4:  		value = kvm_read_cr4(vcpu); @@ -4033,15 +4613,16 @@ static unsigned long emulator_get_cr(int cr, struct kvm_vcpu *vcpu)  		value = kvm_get_cr8(vcpu);  		break;  	default: -		vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr); +		kvm_err("%s: unexpected cr %u\n", __func__, cr);  		return 0;  	}  	return value;  } -static int emulator_set_cr(int cr, unsigned long val, struct kvm_vcpu *vcpu) +static int emulator_set_cr(struct x86_emulate_ctxt *ctxt, int cr, ulong val)  { +	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);  	int res = 0;  	switch (cr) { @@ -4058,51 +4639,69 @@ static int emulator_set_cr(int cr, unsigned long val, struct kvm_vcpu *vcpu)  		res = kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val));  		break;  	case 8: -		res = __kvm_set_cr8(vcpu, val & 0xfUL); +		res = kvm_set_cr8(vcpu, val);  		break;  	default: -		vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr); +		kvm_err("%s: unexpected cr %u\n", __func__, cr);  		res = -1;  	}  	return res;  } -static int emulator_get_cpl(struct kvm_vcpu *vcpu) +static int emulator_get_cpl(struct x86_emulate_ctxt *ctxt) +{ +	return kvm_x86_ops->get_cpl(emul_to_vcpu(ctxt)); +} + +static void emulator_get_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt) +{ +	kvm_x86_ops->get_gdt(emul_to_vcpu(ctxt), dt); +} + +static void emulator_get_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)  { -	return kvm_x86_ops->get_cpl(vcpu); +	kvm_x86_ops->get_idt(emul_to_vcpu(ctxt), dt);  } -static void emulator_get_gdt(struct desc_ptr *dt, struct kvm_vcpu *vcpu) +static void emulator_set_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)  { -	kvm_x86_ops->get_gdt(vcpu, dt); +	kvm_x86_ops->set_gdt(emul_to_vcpu(ctxt), dt);  } -static void emulator_get_idt(struct desc_ptr *dt, struct kvm_vcpu *vcpu) +static void emulator_set_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)  { -	kvm_x86_ops->get_idt(vcpu, dt); +	kvm_x86_ops->set_idt(emul_to_vcpu(ctxt), dt);  } -static unsigned long emulator_get_cached_segment_base(int seg, -						      struct kvm_vcpu *vcpu) +static unsigned long emulator_get_cached_segment_base( +	struct x86_emulate_ctxt *ctxt, int seg)  { -	return get_segment_base(vcpu, seg); +	return get_segment_base(emul_to_vcpu(ctxt), seg);  } -static bool emulator_get_cached_descriptor(struct desc_struct *desc, int seg, -					   struct kvm_vcpu *vcpu) +static bool emulator_get_segment(struct x86_emulate_ctxt *ctxt, u16 *selector, +				 struct desc_struct *desc, u32 *base3, +				 int seg)  {  	struct kvm_segment var; -	kvm_get_segment(vcpu, &var, seg); +	kvm_get_segment(emul_to_vcpu(ctxt), &var, seg); +	*selector = var.selector; -	if (var.unusable) +	if (var.unusable) { +		memset(desc, 0, sizeof(*desc));  		return false; +	}  	if (var.g)  		var.limit >>= 12;  	set_desc_limit(desc, var.limit);  	set_desc_base(desc, (unsigned long)var.base); +#ifdef CONFIG_X86_64 +	if (base3) +		*base3 = var.base >> 32; +#endif  	desc->type = var.type;  	desc->s = var.s;  	desc->dpl = var.dpl; @@ -4115,15 +4714,18 @@ static bool emulator_get_cached_descriptor(struct desc_struct *desc, int seg,  	return true;  } -static void emulator_set_cached_descriptor(struct desc_struct *desc, int seg, -					   struct kvm_vcpu *vcpu) +static void emulator_set_segment(struct x86_emulate_ctxt *ctxt, u16 selector, +				 struct desc_struct *desc, u32 base3, +				 int seg)  { +	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);  	struct kvm_segment var; -	/* needed to preserve selector */ -	kvm_get_segment(vcpu, &var, seg); - +	var.selector = selector;  	var.base = get_desc_base(desc); +#ifdef CONFIG_X86_64 +	var.base |= ((u64)base3) << 32; +#endif  	var.limit = get_desc_limit(desc);  	if (desc->g)  		var.limit = (var.limit << 12) | 0xfff; @@ -4143,57 +4745,109 @@ static void emulator_set_cached_descriptor(struct desc_struct *desc, int seg,  	return;  } -static u16 emulator_get_segment_selector(int seg, struct kvm_vcpu *vcpu) +static int emulator_get_msr(struct x86_emulate_ctxt *ctxt, +			    u32 msr_index, u64 *pdata) +{ +	return kvm_get_msr(emul_to_vcpu(ctxt), msr_index, pdata); +} + +static int emulator_set_msr(struct x86_emulate_ctxt *ctxt, +			    u32 msr_index, u64 data) +{ +	struct msr_data msr; + +	msr.data = data; +	msr.index = msr_index; +	msr.host_initiated = false; +	return kvm_set_msr(emul_to_vcpu(ctxt), &msr); +} + +static int emulator_read_pmc(struct x86_emulate_ctxt *ctxt, +			     u32 pmc, u64 *pdata) +{ +	return kvm_pmu_read_pmc(emul_to_vcpu(ctxt), pmc, pdata); +} + +static void emulator_halt(struct x86_emulate_ctxt *ctxt) +{ +	emul_to_vcpu(ctxt)->arch.halt_request = 1; +} + +static void emulator_get_fpu(struct x86_emulate_ctxt *ctxt) +{ +	preempt_disable(); +	kvm_load_guest_fpu(emul_to_vcpu(ctxt)); +	/* +	 * CR0.TS may reference the host fpu state, not the guest fpu state, +	 * so it may be clear at this point. +	 */ +	clts(); +} + +static void emulator_put_fpu(struct x86_emulate_ctxt *ctxt)  { -	struct kvm_segment kvm_seg; +	preempt_enable(); +} + +static int emulator_intercept(struct x86_emulate_ctxt *ctxt, +			      struct x86_instruction_info *info, +			      enum x86_intercept_stage stage) +{ +	return kvm_x86_ops->check_intercept(emul_to_vcpu(ctxt), info, stage); +} -	kvm_get_segment(vcpu, &kvm_seg, seg); -	return kvm_seg.selector; +static void emulator_get_cpuid(struct x86_emulate_ctxt *ctxt, +			       u32 *eax, u32 *ebx, u32 *ecx, u32 *edx) +{ +	kvm_cpuid(emul_to_vcpu(ctxt), eax, ebx, ecx, edx);  } -static void emulator_set_segment_selector(u16 sel, int seg, -					  struct kvm_vcpu *vcpu) +static ulong emulator_read_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg)  { -	struct kvm_segment kvm_seg; +	return kvm_register_read(emul_to_vcpu(ctxt), reg); +} -	kvm_get_segment(vcpu, &kvm_seg, seg); -	kvm_seg.selector = sel; -	kvm_set_segment(vcpu, &kvm_seg, seg); +static void emulator_write_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg, ulong val) +{ +	kvm_register_write(emul_to_vcpu(ctxt), reg, val);  } -static struct x86_emulate_ops emulate_ops = { +static const struct x86_emulate_ops emulate_ops = { +	.read_gpr            = emulator_read_gpr, +	.write_gpr           = emulator_write_gpr,  	.read_std            = kvm_read_guest_virt_system,  	.write_std           = kvm_write_guest_virt_system,  	.fetch               = kvm_fetch_guest_virt,  	.read_emulated       = emulator_read_emulated,  	.write_emulated      = emulator_write_emulated,  	.cmpxchg_emulated    = emulator_cmpxchg_emulated, +	.invlpg              = emulator_invlpg,  	.pio_in_emulated     = emulator_pio_in_emulated,  	.pio_out_emulated    = emulator_pio_out_emulated, -	.get_cached_descriptor = emulator_get_cached_descriptor, -	.set_cached_descriptor = emulator_set_cached_descriptor, -	.get_segment_selector = emulator_get_segment_selector, -	.set_segment_selector = emulator_set_segment_selector, +	.get_segment         = emulator_get_segment, +	.set_segment         = emulator_set_segment,  	.get_cached_segment_base = emulator_get_cached_segment_base,  	.get_gdt             = emulator_get_gdt,  	.get_idt	     = emulator_get_idt, +	.set_gdt             = emulator_set_gdt, +	.set_idt	     = emulator_set_idt,  	.get_cr              = emulator_get_cr,  	.set_cr              = emulator_set_cr,  	.cpl                 = emulator_get_cpl,  	.get_dr              = emulator_get_dr,  	.set_dr              = emulator_set_dr, -	.set_msr             = kvm_set_msr, -	.get_msr             = kvm_get_msr, +	.set_msr             = emulator_set_msr, +	.get_msr             = emulator_get_msr, +	.read_pmc            = emulator_read_pmc, +	.halt                = emulator_halt, +	.wbinvd              = emulator_wbinvd, +	.fix_hypercall       = emulator_fix_hypercall, +	.get_fpu             = emulator_get_fpu, +	.put_fpu             = emulator_put_fpu, +	.intercept           = emulator_intercept, +	.get_cpuid           = emulator_get_cpuid,  }; -static void cache_all_regs(struct kvm_vcpu *vcpu) -{ -	kvm_register_read(vcpu, VCPU_REGS_RAX); -	kvm_register_read(vcpu, VCPU_REGS_RSP); -	kvm_register_read(vcpu, VCPU_REGS_RIP); -	vcpu->arch.regs_dirty = ~0; -} -  static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask)  {  	u32 int_shadow = kvm_x86_ops->get_interrupt_shadow(vcpu, mask); @@ -4211,58 +4865,69 @@ static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask)  static void inject_emulated_exception(struct kvm_vcpu *vcpu)  {  	struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; -	if (ctxt->exception == PF_VECTOR) -		kvm_propagate_fault(vcpu); -	else if (ctxt->error_code_valid) -		kvm_queue_exception_e(vcpu, ctxt->exception, ctxt->error_code); +	if (ctxt->exception.vector == PF_VECTOR) +		kvm_propagate_fault(vcpu, &ctxt->exception); +	else if (ctxt->exception.error_code_valid) +		kvm_queue_exception_e(vcpu, ctxt->exception.vector, +				      ctxt->exception.error_code);  	else -		kvm_queue_exception(vcpu, ctxt->exception); +		kvm_queue_exception(vcpu, ctxt->exception.vector); +} + +static void init_decode_cache(struct x86_emulate_ctxt *ctxt) +{ +	memset(&ctxt->opcode_len, 0, +	       (void *)&ctxt->_regs - (void *)&ctxt->opcode_len); + +	ctxt->fetch.start = 0; +	ctxt->fetch.end = 0; +	ctxt->io_read.pos = 0; +	ctxt->io_read.end = 0; +	ctxt->mem_read.pos = 0; +	ctxt->mem_read.end = 0;  }  static void init_emulate_ctxt(struct kvm_vcpu *vcpu)  { -	struct decode_cache *c = &vcpu->arch.emulate_ctxt.decode; +	struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;  	int cs_db, cs_l; -	cache_all_regs(vcpu); -  	kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); -	vcpu->arch.emulate_ctxt.vcpu = vcpu; -	vcpu->arch.emulate_ctxt.eflags = kvm_x86_ops->get_rflags(vcpu); -	vcpu->arch.emulate_ctxt.eip = kvm_rip_read(vcpu); -	vcpu->arch.emulate_ctxt.mode = -		(!is_protmode(vcpu)) ? X86EMUL_MODE_REAL : -		(vcpu->arch.emulate_ctxt.eflags & X86_EFLAGS_VM) -		? X86EMUL_MODE_VM86 : cs_l -		? X86EMUL_MODE_PROT64 :	cs_db -		? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16; -	memset(c, 0, sizeof(struct decode_cache)); -	memcpy(c->regs, vcpu->arch.regs, sizeof c->regs); +	ctxt->eflags = kvm_get_rflags(vcpu); +	ctxt->eip = kvm_rip_read(vcpu); +	ctxt->mode = (!is_protmode(vcpu))		? X86EMUL_MODE_REAL : +		     (ctxt->eflags & X86_EFLAGS_VM)	? X86EMUL_MODE_VM86 : +		     (cs_l && is_long_mode(vcpu))	? X86EMUL_MODE_PROT64 : +		     cs_db				? X86EMUL_MODE_PROT32 : +							  X86EMUL_MODE_PROT16; +	ctxt->guest_mode = is_guest_mode(vcpu); + +	init_decode_cache(ctxt); +	vcpu->arch.emulate_regs_need_sync_from_vcpu = false;  } -int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq) +int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip)  { -	struct decode_cache *c = &vcpu->arch.emulate_ctxt.decode; +	struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;  	int ret;  	init_emulate_ctxt(vcpu); -	vcpu->arch.emulate_ctxt.decode.op_bytes = 2; -	vcpu->arch.emulate_ctxt.decode.ad_bytes = 2; -	vcpu->arch.emulate_ctxt.decode.eip = vcpu->arch.emulate_ctxt.eip; -	ret = emulate_int_real(&vcpu->arch.emulate_ctxt, &emulate_ops, irq); +	ctxt->op_bytes = 2; +	ctxt->ad_bytes = 2; +	ctxt->_eip = ctxt->eip + inc_eip; +	ret = emulate_int_real(ctxt, irq);  	if (ret != X86EMUL_CONTINUE)  		return EMULATE_FAIL; -	vcpu->arch.emulate_ctxt.eip = c->eip; -	memcpy(vcpu->arch.regs, c->regs, sizeof c->regs); -	kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.eip); -	kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags); +	ctxt->eip = ctxt->_eip; +	kvm_rip_write(vcpu, ctxt->eip); +	kvm_set_rflags(vcpu, ctxt->eflags);  	if (irq == NMI_VECTOR) -		vcpu->arch.nmi_pending = false; +		vcpu->arch.nmi_pending = 0;  	else  		vcpu->arch.interrupt.pending = false; @@ -4272,101 +4937,275 @@ EXPORT_SYMBOL_GPL(kvm_inject_realmode_interrupt);  static int handle_emulation_failure(struct kvm_vcpu *vcpu)  { +	int r = EMULATE_DONE; +  	++vcpu->stat.insn_emulation_fail;  	trace_kvm_emulate_insn_failed(vcpu); -	vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; -	vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; -	vcpu->run->internal.ndata = 0; +	if (!is_guest_mode(vcpu)) { +		vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; +		vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; +		vcpu->run->internal.ndata = 0; +		r = EMULATE_FAIL; +	}  	kvm_queue_exception(vcpu, UD_VECTOR); -	return EMULATE_FAIL; + +	return r;  } -static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t gva) +static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t cr2, +				  bool write_fault_to_shadow_pgtable, +				  int emulation_type)  { -	gpa_t gpa; +	gpa_t gpa = cr2; +	pfn_t pfn; -	if (tdp_enabled) +	if (emulation_type & EMULTYPE_NO_REEXECUTE)  		return false; +	if (!vcpu->arch.mmu.direct_map) { +		/* +		 * Write permission should be allowed since only +		 * write access need to be emulated. +		 */ +		gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2, NULL); + +		/* +		 * If the mapping is invalid in guest, let cpu retry +		 * it to generate fault. +		 */ +		if (gpa == UNMAPPED_GVA) +			return true; +	} + +	/* +	 * Do not retry the unhandleable instruction if it faults on the +	 * readonly host memory, otherwise it will goto a infinite loop: +	 * retry instruction -> write #PF -> emulation fail -> retry +	 * instruction -> ... +	 */ +	pfn = gfn_to_pfn(vcpu->kvm, gpa_to_gfn(gpa)); + +	/* +	 * If the instruction failed on the error pfn, it can not be fixed, +	 * report the error to userspace. +	 */ +	if (is_error_noslot_pfn(pfn)) +		return false; + +	kvm_release_pfn_clean(pfn); + +	/* The instructions are well-emulated on direct mmu. */ +	if (vcpu->arch.mmu.direct_map) { +		unsigned int indirect_shadow_pages; + +		spin_lock(&vcpu->kvm->mmu_lock); +		indirect_shadow_pages = vcpu->kvm->arch.indirect_shadow_pages; +		spin_unlock(&vcpu->kvm->mmu_lock); + +		if (indirect_shadow_pages) +			kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); + +		return true; +	} +  	/*  	 * if emulation was due to access to shadowed page table -	 * and it failed try to unshadow page and re-entetr the +	 * and it failed try to unshadow page and re-enter the  	 * guest to let CPU execute the instruction.  	 */ -	if (kvm_mmu_unprotect_page_virt(vcpu, gva)) -		return true; +	kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); -	gpa = kvm_mmu_gva_to_gpa_system(vcpu, gva, NULL); +	/* +	 * If the access faults on its page table, it can not +	 * be fixed by unprotecting shadow page and it should +	 * be reported to userspace. +	 */ +	return !write_fault_to_shadow_pgtable; +} -	if (gpa == UNMAPPED_GVA) -		return true; /* let cpu generate fault */ +static bool retry_instruction(struct x86_emulate_ctxt *ctxt, +			      unsigned long cr2,  int emulation_type) +{ +	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); +	unsigned long last_retry_eip, last_retry_addr, gpa = cr2; -	if (!kvm_is_error_hva(gfn_to_hva(vcpu->kvm, gpa >> PAGE_SHIFT))) -		return true; +	last_retry_eip = vcpu->arch.last_retry_eip; +	last_retry_addr = vcpu->arch.last_retry_addr; + +	/* +	 * If the emulation is caused by #PF and it is non-page_table +	 * writing instruction, it means the VM-EXIT is caused by shadow +	 * page protected, we can zap the shadow page and retry this +	 * instruction directly. +	 * +	 * Note: if the guest uses a non-page-table modifying instruction +	 * on the PDE that points to the instruction, then we will unmap +	 * the instruction and go to an infinite loop. So, we cache the +	 * last retried eip and the last fault address, if we meet the eip +	 * and the address again, we can break out of the potential infinite +	 * loop. +	 */ +	vcpu->arch.last_retry_eip = vcpu->arch.last_retry_addr = 0; + +	if (!(emulation_type & EMULTYPE_RETRY)) +		return false; + +	if (x86_page_table_writing_insn(ctxt)) +		return false; + +	if (ctxt->eip == last_retry_eip && last_retry_addr == cr2) +		return false; + +	vcpu->arch.last_retry_eip = ctxt->eip; +	vcpu->arch.last_retry_addr = cr2; + +	if (!vcpu->arch.mmu.direct_map) +		gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2, NULL); + +	kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); + +	return true; +} + +static int complete_emulated_mmio(struct kvm_vcpu *vcpu); +static int complete_emulated_pio(struct kvm_vcpu *vcpu); + +static int kvm_vcpu_check_hw_bp(unsigned long addr, u32 type, u32 dr7, +				unsigned long *db) +{ +	u32 dr6 = 0; +	int i; +	u32 enable, rwlen; + +	enable = dr7; +	rwlen = dr7 >> 16; +	for (i = 0; i < 4; i++, enable >>= 2, rwlen >>= 4) +		if ((enable & 3) && (rwlen & 15) == type && db[i] == addr) +			dr6 |= (1 << i); +	return dr6; +} + +static void kvm_vcpu_check_singlestep(struct kvm_vcpu *vcpu, int *r) +{ +	struct kvm_run *kvm_run = vcpu->run; + +	/* +	 * Use the "raw" value to see if TF was passed to the processor. +	 * Note that the new value of the flags has not been saved yet. +	 * +	 * This is correct even for TF set by the guest, because "the +	 * processor will not generate this exception after the instruction +	 * that sets the TF flag". +	 */ +	unsigned long rflags = kvm_x86_ops->get_rflags(vcpu); + +	if (unlikely(rflags & X86_EFLAGS_TF)) { +		if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) { +			kvm_run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1; +			kvm_run->debug.arch.pc = vcpu->arch.singlestep_rip; +			kvm_run->debug.arch.exception = DB_VECTOR; +			kvm_run->exit_reason = KVM_EXIT_DEBUG; +			*r = EMULATE_USER_EXIT; +		} else { +			vcpu->arch.emulate_ctxt.eflags &= ~X86_EFLAGS_TF; +			/* +			 * "Certain debug exceptions may clear bit 0-3.  The +			 * remaining contents of the DR6 register are never +			 * cleared by the processor". +			 */ +			vcpu->arch.dr6 &= ~15; +			vcpu->arch.dr6 |= DR6_BS; +			kvm_queue_exception(vcpu, DB_VECTOR); +		} +	} +} + +static bool kvm_vcpu_check_breakpoint(struct kvm_vcpu *vcpu, int *r) +{ +	struct kvm_run *kvm_run = vcpu->run; +	unsigned long eip = vcpu->arch.emulate_ctxt.eip; +	u32 dr6 = 0; + +	if (unlikely(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) && +	    (vcpu->arch.guest_debug_dr7 & DR7_BP_EN_MASK)) { +		dr6 = kvm_vcpu_check_hw_bp(eip, 0, +					   vcpu->arch.guest_debug_dr7, +					   vcpu->arch.eff_db); + +		if (dr6 != 0) { +			kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1; +			kvm_run->debug.arch.pc = kvm_rip_read(vcpu) + +				get_segment_base(vcpu, VCPU_SREG_CS); + +			kvm_run->debug.arch.exception = DB_VECTOR; +			kvm_run->exit_reason = KVM_EXIT_DEBUG; +			*r = EMULATE_USER_EXIT; +			return true; +		} +	} + +	if (unlikely(vcpu->arch.dr7 & DR7_BP_EN_MASK)) { +		dr6 = kvm_vcpu_check_hw_bp(eip, 0, +					   vcpu->arch.dr7, +					   vcpu->arch.db); + +		if (dr6 != 0) { +			vcpu->arch.dr6 &= ~15; +			vcpu->arch.dr6 |= dr6; +			kvm_queue_exception(vcpu, DB_VECTOR); +			*r = EMULATE_DONE; +			return true; +		} +	}  	return false;  } -int emulate_instruction(struct kvm_vcpu *vcpu, -			unsigned long cr2, -			u16 error_code, -			int emulation_type) +int x86_emulate_instruction(struct kvm_vcpu *vcpu, +			    unsigned long cr2, +			    int emulation_type, +			    void *insn, +			    int insn_len)  {  	int r; -	struct decode_cache *c = &vcpu->arch.emulate_ctxt.decode; +	struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; +	bool writeback = true; +	bool write_fault_to_spt = vcpu->arch.write_fault_to_shadow_pgtable; -	kvm_clear_exception_queue(vcpu); -	vcpu->arch.mmio_fault_cr2 = cr2;  	/* -	 * TODO: fix emulate.c to use guest_read/write_register -	 * instead of direct ->regs accesses, can save hundred cycles -	 * on Intel for instructions that don't read/change RSP, for -	 * for example. +	 * Clear write_fault_to_shadow_pgtable here to ensure it is +	 * never reused.  	 */ -	cache_all_regs(vcpu); +	vcpu->arch.write_fault_to_shadow_pgtable = false; +	kvm_clear_exception_queue(vcpu);  	if (!(emulation_type & EMULTYPE_NO_DECODE)) {  		init_emulate_ctxt(vcpu); -		vcpu->arch.emulate_ctxt.interruptibility = 0; -		vcpu->arch.emulate_ctxt.exception = -1; -		vcpu->arch.emulate_ctxt.perm_ok = false; -		r = x86_decode_insn(&vcpu->arch.emulate_ctxt); -		if (r == X86EMUL_PROPAGATE_FAULT) -			goto done; +		/* +		 * We will reenter on the same instruction since +		 * we do not set complete_userspace_io.  This does not +		 * handle watchpoints yet, those would be handled in +		 * the emulate_ops. +		 */ +		if (kvm_vcpu_check_breakpoint(vcpu, &r)) +			return r; -		trace_kvm_emulate_insn_start(vcpu); +		ctxt->interruptibility = 0; +		ctxt->have_exception = false; +		ctxt->perm_ok = false; -		/* Only allow emulation of specific instructions on #UD -		 * (namely VMMCALL, sysenter, sysexit, syscall)*/ -		if (emulation_type & EMULTYPE_TRAP_UD) { -			if (!c->twobyte) -				return EMULATE_FAIL; -			switch (c->b) { -			case 0x01: /* VMMCALL */ -				if (c->modrm_mod != 3 || c->modrm_rm != 1) -					return EMULATE_FAIL; -				break; -			case 0x34: /* sysenter */ -			case 0x35: /* sysexit */ -				if (c->modrm_mod != 0 || c->modrm_rm != 0) -					return EMULATE_FAIL; -				break; -			case 0x05: /* syscall */ -				if (c->modrm_mod != 0 || c->modrm_rm != 0) -					return EMULATE_FAIL; -				break; -			default: -				return EMULATE_FAIL; -			} +		ctxt->ud = emulation_type & EMULTYPE_TRAP_UD; -			if (!(c->modrm_reg == 0 || c->modrm_reg == 3)) -				return EMULATE_FAIL; -		} +		r = x86_decode_insn(ctxt, insn, insn_len); +		trace_kvm_emulate_insn_start(vcpu);  		++vcpu->stat.insn_emulation; -		if (r)  { -			if (reexecute_instruction(vcpu, cr2)) +		if (r != EMULATION_OK)  { +			if (emulation_type & EMULTYPE_TRAP_UD) +				return EMULATE_FAIL; +			if (reexecute_instruction(vcpu, cr2, write_fault_to_spt, +						emulation_type))  				return EMULATE_DONE;  			if (emulation_type & EMULTYPE_SKIP)  				return EMULATE_FAIL; @@ -4375,55 +5214,76 @@ int emulate_instruction(struct kvm_vcpu *vcpu,  	}  	if (emulation_type & EMULTYPE_SKIP) { -		kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.decode.eip); +		kvm_rip_write(vcpu, ctxt->_eip);  		return EMULATE_DONE;  	} -	/* this is needed for vmware backdor interface to work since it +	if (retry_instruction(ctxt, cr2, emulation_type)) +		return EMULATE_DONE; + +	/* this is needed for vmware backdoor interface to work since it  	   changes registers values  during IO operation */ -	memcpy(c->regs, vcpu->arch.regs, sizeof c->regs); +	if (vcpu->arch.emulate_regs_need_sync_from_vcpu) { +		vcpu->arch.emulate_regs_need_sync_from_vcpu = false; +		emulator_invalidate_register_cache(ctxt); +	}  restart: -	r = x86_emulate_insn(&vcpu->arch.emulate_ctxt); +	r = x86_emulate_insn(ctxt); + +	if (r == EMULATION_INTERCEPTED) +		return EMULATE_DONE;  	if (r == EMULATION_FAILED) { -		if (reexecute_instruction(vcpu, cr2)) +		if (reexecute_instruction(vcpu, cr2, write_fault_to_spt, +					emulation_type))  			return EMULATE_DONE;  		return handle_emulation_failure(vcpu);  	} -done: -	if (vcpu->arch.emulate_ctxt.exception >= 0) { +	if (ctxt->have_exception) {  		inject_emulated_exception(vcpu);  		r = EMULATE_DONE;  	} else if (vcpu->arch.pio.count) { -		if (!vcpu->arch.pio.in) +		if (!vcpu->arch.pio.in) { +			/* FIXME: return into emulator if single-stepping.  */  			vcpu->arch.pio.count = 0; -		r = EMULATE_DO_MMIO; +		} else { +			writeback = false; +			vcpu->arch.complete_userspace_io = complete_emulated_pio; +		} +		r = EMULATE_USER_EXIT;  	} else if (vcpu->mmio_needed) { -		if (vcpu->mmio_is_write) -			vcpu->mmio_needed = 0; -		r = EMULATE_DO_MMIO; +		if (!vcpu->mmio_is_write) +			writeback = false; +		r = EMULATE_USER_EXIT; +		vcpu->arch.complete_userspace_io = complete_emulated_mmio;  	} else if (r == EMULATION_RESTART)  		goto restart;  	else  		r = EMULATE_DONE; -	toggle_interruptibility(vcpu, vcpu->arch.emulate_ctxt.interruptibility); -	kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags); -	kvm_make_request(KVM_REQ_EVENT, vcpu); -	memcpy(vcpu->arch.regs, c->regs, sizeof c->regs); -	kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.eip); +	if (writeback) { +		toggle_interruptibility(vcpu, ctxt->interruptibility); +		kvm_make_request(KVM_REQ_EVENT, vcpu); +		vcpu->arch.emulate_regs_need_sync_to_vcpu = false; +		kvm_rip_write(vcpu, ctxt->eip); +		if (r == EMULATE_DONE) +			kvm_vcpu_check_singlestep(vcpu, &r); +		kvm_set_rflags(vcpu, ctxt->eflags); +	} else +		vcpu->arch.emulate_regs_need_sync_to_vcpu = true;  	return r;  } -EXPORT_SYMBOL_GPL(emulate_instruction); +EXPORT_SYMBOL_GPL(x86_emulate_instruction);  int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port)  {  	unsigned long val = kvm_register_read(vcpu, VCPU_REGS_RAX); -	int ret = emulator_pio_out_emulated(size, port, &val, 1, vcpu); +	int ret = emulator_pio_out_emulated(&vcpu->arch.emulate_ctxt, +					    size, port, &val, 1);  	/* do not return to emulator after return from userspace */  	vcpu->arch.pio.count = 0;  	return ret; @@ -4432,7 +5292,7 @@ EXPORT_SYMBOL_GPL(kvm_fast_pio_out);  static void tsc_bad(void *info)  { -	__get_cpu_var(cpu_tsc_khz) = 0; +	__this_cpu_write(cpu_tsc_khz, 0);  }  static void tsc_khz_changed(void *data) @@ -4446,7 +5306,7 @@ static void tsc_khz_changed(void *data)  		khz = cpufreq_quick_get(raw_smp_processor_id());  	if (!khz)  		khz = tsc_khz; -	__get_cpu_var(cpu_tsc_khz) = khz; +	__this_cpu_write(cpu_tsc_khz, khz);  }  static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val, @@ -4564,14 +5424,17 @@ static void kvm_timer_init(void)  	int cpu;  	max_tsc_khz = tsc_khz; -	register_hotcpu_notifier(&kvmclock_cpu_notifier_block); + +	cpu_notifier_register_begin();  	if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {  #ifdef CONFIG_CPU_FREQ  		struct cpufreq_policy policy;  		memset(&policy, 0, sizeof(policy)); -		cpufreq_get_policy(&policy, get_cpu()); +		cpu = get_cpu(); +		cpufreq_get_policy(&policy, cpu);  		if (policy.cpuinfo.max_freq)  			max_tsc_khz = policy.cpuinfo.max_freq; +		put_cpu();  #endif  		cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block,  					  CPUFREQ_TRANSITION_NOTIFIER); @@ -4579,21 +5442,25 @@ static void kvm_timer_init(void)  	pr_debug("kvm: max_tsc_khz = %ld\n", max_tsc_khz);  	for_each_online_cpu(cpu)  		smp_call_function_single(cpu, tsc_khz_changed, NULL, 1); + +	__register_hotcpu_notifier(&kvmclock_cpu_notifier_block); +	cpu_notifier_register_done(); +  }  static DEFINE_PER_CPU(struct kvm_vcpu *, current_vcpu); -static int kvm_is_in_guest(void) +int kvm_is_in_guest(void)  { -	return percpu_read(current_vcpu) != NULL; +	return __this_cpu_read(current_vcpu) != NULL;  }  static int kvm_is_user_mode(void)  {  	int user_mode = 3; -	if (percpu_read(current_vcpu)) -		user_mode = kvm_x86_ops->get_cpl(percpu_read(current_vcpu)); +	if (__this_cpu_read(current_vcpu)) +		user_mode = kvm_x86_ops->get_cpl(__this_cpu_read(current_vcpu));  	return user_mode != 0;  } @@ -4602,8 +5469,8 @@ static unsigned long kvm_get_guest_ip(void)  {  	unsigned long ip = 0; -	if (percpu_read(current_vcpu)) -		ip = kvm_rip_read(percpu_read(current_vcpu)); +	if (__this_cpu_read(current_vcpu)) +		ip = kvm_rip_read(__this_cpu_read(current_vcpu));  	return ip;  } @@ -4616,20 +5483,94 @@ static struct perf_guest_info_callbacks kvm_guest_cbs = {  void kvm_before_handle_nmi(struct kvm_vcpu *vcpu)  { -	percpu_write(current_vcpu, vcpu); +	__this_cpu_write(current_vcpu, vcpu);  }  EXPORT_SYMBOL_GPL(kvm_before_handle_nmi);  void kvm_after_handle_nmi(struct kvm_vcpu *vcpu)  { -	percpu_write(current_vcpu, NULL); +	__this_cpu_write(current_vcpu, NULL);  }  EXPORT_SYMBOL_GPL(kvm_after_handle_nmi); +static void kvm_set_mmio_spte_mask(void) +{ +	u64 mask; +	int maxphyaddr = boot_cpu_data.x86_phys_bits; + +	/* +	 * Set the reserved bits and the present bit of an paging-structure +	 * entry to generate page fault with PFER.RSV = 1. +	 */ +	 /* Mask the reserved physical address bits. */ +	mask = ((1ull << (51 - maxphyaddr + 1)) - 1) << maxphyaddr; + +	/* Bit 62 is always reserved for 32bit host. */ +	mask |= 0x3ull << 62; + +	/* Set the present bit. */ +	mask |= 1ull; + +#ifdef CONFIG_X86_64 +	/* +	 * If reserved bit is not supported, clear the present bit to disable +	 * mmio page fault. +	 */ +	if (maxphyaddr == 52) +		mask &= ~1ull; +#endif + +	kvm_mmu_set_mmio_spte_mask(mask); +} + +#ifdef CONFIG_X86_64 +static void pvclock_gtod_update_fn(struct work_struct *work) +{ +	struct kvm *kvm; + +	struct kvm_vcpu *vcpu; +	int i; + +	spin_lock(&kvm_lock); +	list_for_each_entry(kvm, &vm_list, vm_list) +		kvm_for_each_vcpu(i, vcpu, kvm) +			set_bit(KVM_REQ_MASTERCLOCK_UPDATE, &vcpu->requests); +	atomic_set(&kvm_guest_has_master_clock, 0); +	spin_unlock(&kvm_lock); +} + +static DECLARE_WORK(pvclock_gtod_work, pvclock_gtod_update_fn); + +/* + * Notification about pvclock gtod data update. + */ +static int pvclock_gtod_notify(struct notifier_block *nb, unsigned long unused, +			       void *priv) +{ +	struct pvclock_gtod_data *gtod = &pvclock_gtod_data; +	struct timekeeper *tk = priv; + +	update_pvclock_gtod(tk); + +	/* disable master clock if host does not trust, or does not +	 * use, TSC clocksource +	 */ +	if (gtod->clock.vclock_mode != VCLOCK_TSC && +	    atomic_read(&kvm_guest_has_master_clock) != 0) +		queue_work(system_long_wq, &pvclock_gtod_work); + +	return 0; +} + +static struct notifier_block pvclock_gtod_notifier = { +	.notifier_call = pvclock_gtod_notify, +}; +#endif +  int kvm_arch_init(void *opaque)  {  	int r; -	struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque; +	struct kvm_x86_ops *ops = opaque;  	if (kvm_x86_ops) {  		printk(KERN_ERR "kvm: already loaded the other module\n"); @@ -4648,15 +5589,22 @@ int kvm_arch_init(void *opaque)  		goto out;  	} +	r = -ENOMEM; +	shared_msrs = alloc_percpu(struct kvm_shared_msrs); +	if (!shared_msrs) { +		printk(KERN_ERR "kvm: failed to allocate percpu kvm_shared_msrs\n"); +		goto out; +	} +  	r = kvm_mmu_module_init();  	if (r) -		goto out; +		goto out_free_percpu; -	kvm_init_msr_list(); +	kvm_set_mmio_spte_mask();  	kvm_x86_ops = ops; -	kvm_mmu_set_nonpresent_ptes(0ull, 0ull); -	kvm_mmu_set_base_ptes(PT_PRESENT_MASK); +	kvm_init_msr_list(); +  	kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK,  			PT_DIRTY_MASK, PT64_NX_MASK, 0); @@ -4667,8 +5615,15 @@ int kvm_arch_init(void *opaque)  	if (cpu_has_xsave)  		host_xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK); +	kvm_lapic_init(); +#ifdef CONFIG_X86_64 +	pvclock_gtod_register_notifier(&pvclock_gtod_notifier); +#endif +  	return 0; +out_free_percpu: +	free_percpu(shared_msrs);  out:  	return r;  } @@ -4681,8 +5636,12 @@ void kvm_arch_exit(void)  		cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block,  					    CPUFREQ_TRANSITION_NOTIFIER);  	unregister_hotcpu_notifier(&kvmclock_cpu_notifier_block); +#ifdef CONFIG_X86_64 +	pvclock_gtod_unregister_notifier(&pvclock_gtod_notifier); +#endif  	kvm_x86_ops = NULL;  	kvm_mmu_module_exit(); +	free_percpu(shared_msrs);  }  int kvm_emulate_halt(struct kvm_vcpu *vcpu) @@ -4698,15 +5657,6 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu)  }  EXPORT_SYMBOL_GPL(kvm_emulate_halt); -static inline gpa_t hc_gpa(struct kvm_vcpu *vcpu, unsigned long a0, -			   unsigned long a1) -{ -	if (is_long_mode(vcpu)) -		return a0; -	else -		return a0 | ((gpa_t)a1 << 32); -} -  int kvm_hv_hypercall(struct kvm_vcpu *vcpu)  {  	u64 param, ingpa, outgpa, ret; @@ -4769,6 +5719,23 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu)  	return 1;  } +/* + * kvm_pv_kick_cpu_op:  Kick a vcpu. + * + * @apicid - apicid of vcpu to be kicked. + */ +static void kvm_pv_kick_cpu_op(struct kvm *kvm, unsigned long flags, int apicid) +{ +	struct kvm_lapic_irq lapic_irq; + +	lapic_irq.shorthand = 0; +	lapic_irq.dest_mode = 0; +	lapic_irq.dest_id = apicid; + +	lapic_irq.delivery_mode = APIC_DM_REMRD; +	kvm_irq_delivery_to_apic(kvm, 0, &lapic_irq, NULL); +} +  int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)  {  	unsigned long nr, a0, a1, a2, a3, ret; @@ -4802,8 +5769,9 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)  	case KVM_HC_VAPIC_POLL_IRQ:  		ret = 0;  		break; -	case KVM_HC_MMU_OP: -		r = kvm_pv_mmu_op(vcpu, a0, hc_gpa(vcpu, a1, a2), &ret); +	case KVM_HC_KICK_CPU: +		kvm_pv_kick_cpu_op(vcpu->kvm, a0, a1); +		ret = 0;  		break;  	default:  		ret = -KVM_ENOSYS; @@ -4816,137 +5784,17 @@ out:  }  EXPORT_SYMBOL_GPL(kvm_emulate_hypercall); -int kvm_fix_hypercall(struct kvm_vcpu *vcpu) +static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt)  { +	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);  	char instruction[3];  	unsigned long rip = kvm_rip_read(vcpu); -	/* -	 * Blow out the MMU to ensure that no other VCPU has an active mapping -	 * to ensure that the updated hypercall appears atomically across all -	 * VCPUs. -	 */ -	kvm_mmu_zap_all(vcpu->kvm); -  	kvm_x86_ops->patch_hypercall(vcpu, instruction); -	return emulator_write_emulated(rip, instruction, 3, NULL, vcpu); -} - -void realmode_lgdt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base) -{ -	struct desc_ptr dt = { limit, base }; - -	kvm_x86_ops->set_gdt(vcpu, &dt); -} - -void realmode_lidt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base) -{ -	struct desc_ptr dt = { limit, base }; - -	kvm_x86_ops->set_idt(vcpu, &dt); -} - -static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i) -{ -	struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i]; -	int j, nent = vcpu->arch.cpuid_nent; - -	e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT; -	/* when no next entry is found, the current entry[i] is reselected */ -	for (j = i + 1; ; j = (j + 1) % nent) { -		struct kvm_cpuid_entry2 *ej = &vcpu->arch.cpuid_entries[j]; -		if (ej->function == e->function) { -			ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT; -			return j; -		} -	} -	return 0; /* silence gcc, even though control never reaches here */ +	return emulator_write_emulated(ctxt, rip, instruction, 3, NULL);  } -/* find an entry with matching function, matching index (if needed), and that - * should be read next (if it's stateful) */ -static int is_matching_cpuid_entry(struct kvm_cpuid_entry2 *e, -	u32 function, u32 index) -{ -	if (e->function != function) -		return 0; -	if ((e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) && e->index != index) -		return 0; -	if ((e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC) && -	    !(e->flags & KVM_CPUID_FLAG_STATE_READ_NEXT)) -		return 0; -	return 1; -} - -struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu, -					      u32 function, u32 index) -{ -	int i; -	struct kvm_cpuid_entry2 *best = NULL; - -	for (i = 0; i < vcpu->arch.cpuid_nent; ++i) { -		struct kvm_cpuid_entry2 *e; - -		e = &vcpu->arch.cpuid_entries[i]; -		if (is_matching_cpuid_entry(e, function, index)) { -			if (e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC) -				move_to_next_stateful_cpuid_entry(vcpu, i); -			best = e; -			break; -		} -		/* -		 * Both basic or both extended? -		 */ -		if (((e->function ^ function) & 0x80000000) == 0) -			if (!best || e->function > best->function) -				best = e; -	} -	return best; -} -EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry); - -int cpuid_maxphyaddr(struct kvm_vcpu *vcpu) -{ -	struct kvm_cpuid_entry2 *best; - -	best = kvm_find_cpuid_entry(vcpu, 0x80000000, 0); -	if (!best || best->eax < 0x80000008) -		goto not_found; -	best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0); -	if (best) -		return best->eax & 0xff; -not_found: -	return 36; -} - -void kvm_emulate_cpuid(struct kvm_vcpu *vcpu) -{ -	u32 function, index; -	struct kvm_cpuid_entry2 *best; - -	function = kvm_register_read(vcpu, VCPU_REGS_RAX); -	index = kvm_register_read(vcpu, VCPU_REGS_RCX); -	kvm_register_write(vcpu, VCPU_REGS_RAX, 0); -	kvm_register_write(vcpu, VCPU_REGS_RBX, 0); -	kvm_register_write(vcpu, VCPU_REGS_RCX, 0); -	kvm_register_write(vcpu, VCPU_REGS_RDX, 0); -	best = kvm_find_cpuid_entry(vcpu, function, index); -	if (best) { -		kvm_register_write(vcpu, VCPU_REGS_RAX, best->eax); -		kvm_register_write(vcpu, VCPU_REGS_RBX, best->ebx); -		kvm_register_write(vcpu, VCPU_REGS_RCX, best->ecx); -		kvm_register_write(vcpu, VCPU_REGS_RDX, best->edx); -	} -	kvm_x86_ops->skip_emulated_instruction(vcpu); -	trace_kvm_cpuid(function, -			kvm_register_read(vcpu, VCPU_REGS_RAX), -			kvm_register_read(vcpu, VCPU_REGS_RBX), -			kvm_register_read(vcpu, VCPU_REGS_RCX), -			kvm_register_read(vcpu, VCPU_REGS_RDX)); -} -EXPORT_SYMBOL_GPL(kvm_emulate_cpuid); -  /*   * Check if userspace requested an interrupt window, and that the   * interrupt window is open. @@ -4976,33 +5824,6 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu)  			!kvm_event_needs_reinjection(vcpu);  } -static void vapic_enter(struct kvm_vcpu *vcpu) -{ -	struct kvm_lapic *apic = vcpu->arch.apic; -	struct page *page; - -	if (!apic || !apic->vapic_addr) -		return; - -	page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); - -	vcpu->arch.apic->vapic_page = page; -} - -static void vapic_exit(struct kvm_vcpu *vcpu) -{ -	struct kvm_lapic *apic = vcpu->arch.apic; -	int idx; - -	if (!apic || !apic->vapic_addr) -		return; - -	idx = srcu_read_lock(&vcpu->kvm->srcu); -	kvm_release_page_dirty(apic->vapic_page); -	mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); -	srcu_read_unlock(&vcpu->kvm->srcu, idx); -} -  static void update_cr8_intercept(struct kvm_vcpu *vcpu)  {  	int max_irr, tpr; @@ -5026,8 +5847,10 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu)  	kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr);  } -static void inject_pending_event(struct kvm_vcpu *vcpu) +static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win)  { +	int r; +  	/* try to reinject previous events if any */  	if (vcpu->arch.exception.pending) {  		trace_kvm_inj_exception(vcpu->arch.exception.nr, @@ -5037,65 +5860,108 @@ static void inject_pending_event(struct kvm_vcpu *vcpu)  					  vcpu->arch.exception.has_error_code,  					  vcpu->arch.exception.error_code,  					  vcpu->arch.exception.reinject); -		return; +		return 0;  	}  	if (vcpu->arch.nmi_injected) {  		kvm_x86_ops->set_nmi(vcpu); -		return; +		return 0;  	}  	if (vcpu->arch.interrupt.pending) {  		kvm_x86_ops->set_irq(vcpu); -		return; +		return 0; +	} + +	if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) { +		r = kvm_x86_ops->check_nested_events(vcpu, req_int_win); +		if (r != 0) +			return r;  	}  	/* try to inject new event if pending */  	if (vcpu->arch.nmi_pending) {  		if (kvm_x86_ops->nmi_allowed(vcpu)) { -			vcpu->arch.nmi_pending = false; +			--vcpu->arch.nmi_pending;  			vcpu->arch.nmi_injected = true;  			kvm_x86_ops->set_nmi(vcpu);  		} -	} else if (kvm_cpu_has_interrupt(vcpu)) { +	} else if (kvm_cpu_has_injectable_intr(vcpu)) { +		/* +		 * Because interrupts can be injected asynchronously, we are +		 * calling check_nested_events again here to avoid a race condition. +		 * See https://lkml.org/lkml/2014/7/2/60 for discussion about this +		 * proposal and current concerns.  Perhaps we should be setting +		 * KVM_REQ_EVENT only on certain events and not unconditionally? +		 */ +		if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) { +			r = kvm_x86_ops->check_nested_events(vcpu, req_int_win); +			if (r != 0) +				return r; +		}  		if (kvm_x86_ops->interrupt_allowed(vcpu)) {  			kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu),  					    false);  			kvm_x86_ops->set_irq(vcpu);  		}  	} +	return 0;  } -static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu) +static void process_nmi(struct kvm_vcpu *vcpu)  { -	if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) && -			!vcpu->guest_xcr0_loaded) { -		/* kvm_set_xcr() also depends on this */ -		xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0); -		vcpu->guest_xcr0_loaded = 1; -	} +	unsigned limit = 2; + +	/* +	 * x86 is limited to one NMI running, and one NMI pending after it. +	 * If an NMI is already in progress, limit further NMIs to just one. +	 * Otherwise, allow two (and we'll inject the first one immediately). +	 */ +	if (kvm_x86_ops->get_nmi_mask(vcpu) || vcpu->arch.nmi_injected) +		limit = 1; + +	vcpu->arch.nmi_pending += atomic_xchg(&vcpu->arch.nmi_queued, 0); +	vcpu->arch.nmi_pending = min(vcpu->arch.nmi_pending, limit); +	kvm_make_request(KVM_REQ_EVENT, vcpu);  } -static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu) +static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)  { -	if (vcpu->guest_xcr0_loaded) { -		if (vcpu->arch.xcr0 != host_xcr0) -			xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0); -		vcpu->guest_xcr0_loaded = 0; -	} +	u64 eoi_exit_bitmap[4]; +	u32 tmr[8]; + +	if (!kvm_apic_hw_enabled(vcpu->arch.apic)) +		return; + +	memset(eoi_exit_bitmap, 0, 32); +	memset(tmr, 0, 32); + +	kvm_ioapic_scan_entry(vcpu, eoi_exit_bitmap, tmr); +	kvm_x86_ops->load_eoi_exitmap(vcpu, eoi_exit_bitmap); +	kvm_apic_update_tmr(vcpu, tmr);  } +/* + * Returns 1 to let __vcpu_run() continue the guest execution loop without + * exiting to the userspace.  Otherwise, the value will be returned to the + * userspace. + */  static int vcpu_enter_guest(struct kvm_vcpu *vcpu)  {  	int r;  	bool req_int_win = !irqchip_in_kernel(vcpu->kvm) &&  		vcpu->run->request_interrupt_window; +	bool req_immediate_exit = false;  	if (vcpu->requests) {  		if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu))  			kvm_mmu_unload(vcpu);  		if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu))  			__kvm_migrate_timers(vcpu); +		if (kvm_check_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu)) +			kvm_gen_update_masterclock(vcpu->kvm); +		if (kvm_check_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu)) +			kvm_gen_kvmclock_update(vcpu);  		if (kvm_check_request(KVM_REQ_CLOCK_UPDATE, vcpu)) {  			r = kvm_guest_time_update(vcpu);  			if (unlikely(r)) @@ -5119,27 +5985,57 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)  			vcpu->fpu_active = 0;  			kvm_x86_ops->fpu_deactivate(vcpu);  		} +		if (kvm_check_request(KVM_REQ_APF_HALT, vcpu)) { +			/* Page is swapped out. Do synthetic halt */ +			vcpu->arch.apf.halted = true; +			r = 1; +			goto out; +		} +		if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu)) +			record_steal_time(vcpu); +		if (kvm_check_request(KVM_REQ_NMI, vcpu)) +			process_nmi(vcpu); +		if (kvm_check_request(KVM_REQ_PMU, vcpu)) +			kvm_handle_pmu_event(vcpu); +		if (kvm_check_request(KVM_REQ_PMI, vcpu)) +			kvm_deliver_pmi(vcpu); +		if (kvm_check_request(KVM_REQ_SCAN_IOAPIC, vcpu)) +			vcpu_scan_ioapic(vcpu);  	} -	r = kvm_mmu_reload(vcpu); -	if (unlikely(r)) -		goto out; -  	if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) { -		inject_pending_event(vcpu); +		kvm_apic_accept_events(vcpu); +		if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) { +			r = 1; +			goto out; +		} +		if (inject_pending_event(vcpu, req_int_win) != 0) +			req_immediate_exit = true;  		/* enable NMI/IRQ window open exits if needed */ -		if (vcpu->arch.nmi_pending) +		else if (vcpu->arch.nmi_pending)  			kvm_x86_ops->enable_nmi_window(vcpu); -		else if (kvm_cpu_has_interrupt(vcpu) || req_int_win) +		else if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win)  			kvm_x86_ops->enable_irq_window(vcpu);  		if (kvm_lapic_enabled(vcpu)) { +			/* +			 * Update architecture specific hints for APIC +			 * virtual interrupt delivery. +			 */ +			if (kvm_x86_ops->hwapic_irr_update) +				kvm_x86_ops->hwapic_irr_update(vcpu, +					kvm_lapic_find_highest_irr(vcpu));  			update_cr8_intercept(vcpu);  			kvm_lapic_sync_to_vapic(vcpu);  		}  	} +	r = kvm_mmu_reload(vcpu); +	if (unlikely(r)) { +		goto cancel_injection; +	} +  	preempt_disable();  	kvm_x86_ops->prepare_guest_switch(vcpu); @@ -5147,23 +6043,30 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)  		kvm_load_guest_fpu(vcpu);  	kvm_load_guest_xcr0(vcpu); -	atomic_set(&vcpu->guest_mode, 1); -	smp_wmb(); +	vcpu->mode = IN_GUEST_MODE; + +	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); + +	/* We should set ->mode before check ->requests, +	 * see the comment in make_all_cpus_request. +	 */ +	smp_mb__after_srcu_read_unlock();  	local_irq_disable(); -	if (!atomic_read(&vcpu->guest_mode) || vcpu->requests +	if (vcpu->mode == EXITING_GUEST_MODE || vcpu->requests  	    || need_resched() || signal_pending(current)) { -		atomic_set(&vcpu->guest_mode, 0); +		vcpu->mode = OUTSIDE_GUEST_MODE;  		smp_wmb();  		local_irq_enable();  		preempt_enable(); -		kvm_x86_ops->cancel_injection(vcpu); +		vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);  		r = 1; -		goto out; +		goto cancel_injection;  	} -	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); +	if (req_immediate_exit) +		smp_send_reschedule(vcpu->cpu);  	kvm_guest_enter(); @@ -5173,12 +6076,28 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)  		set_debugreg(vcpu->arch.eff_db[1], 1);  		set_debugreg(vcpu->arch.eff_db[2], 2);  		set_debugreg(vcpu->arch.eff_db[3], 3); +		set_debugreg(vcpu->arch.dr6, 6);  	}  	trace_kvm_entry(vcpu->vcpu_id);  	kvm_x86_ops->run(vcpu);  	/* +	 * Do this here before restoring debug registers on the host.  And +	 * since we do this before handling the vmexit, a DR access vmexit +	 * can (a) read the correct value of the debug registers, (b) set +	 * KVM_DEBUGREG_WONT_EXIT again. +	 */ +	if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) { +		int i; + +		WARN_ON(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP); +		kvm_x86_ops->sync_dirty_debug_regs(vcpu); +		for (i = 0; i < KVM_NR_DB_REGS; i++) +			vcpu->arch.eff_db[i] = vcpu->arch.db[i]; +	} + +	/*  	 * If the guest has used debug registers, at least dr7  	 * will be disabled while returning to the host.  	 * If we don't have active breakpoints in the host, we don't @@ -5188,11 +6107,14 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)  	if (hw_breakpoint_active())  		hw_breakpoint_restore(); -	kvm_get_msr(vcpu, MSR_IA32_TSC, &vcpu->arch.last_guest_tsc); +	vcpu->arch.last_guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu, +							   native_read_tsc()); -	atomic_set(&vcpu->guest_mode, 0); +	vcpu->mode = OUTSIDE_GUEST_MODE;  	smp_wmb(); -	local_irq_enable(); + +	/* Interrupt is enabled by handle_external_intr() */ +	kvm_x86_ops->handle_external_intr(vcpu);  	++vcpu->stat.exits; @@ -5218,10 +6140,19 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)  		profile_hit(KVM_PROFILING, (void *)rip);  	} +	if (unlikely(vcpu->arch.tsc_always_catchup)) +		kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); -	kvm_lapic_sync_from_vapic(vcpu); +	if (vcpu->arch.apic_attention) +		kvm_lapic_sync_from_vapic(vcpu);  	r = kvm_x86_ops->handle_exit(vcpu); +	return r; + +cancel_injection: +	kvm_x86_ops->cancel_injection(vcpu); +	if (unlikely(vcpu->arch.apic_attention)) +		kvm_lapic_sync_from_vapic(vcpu);  out:  	return r;  } @@ -5232,36 +6163,29 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)  	int r;  	struct kvm *kvm = vcpu->kvm; -	if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED)) { -		pr_debug("vcpu %d received sipi with vector # %x\n", -			 vcpu->vcpu_id, vcpu->arch.sipi_vector); -		kvm_lapic_reset(vcpu); -		r = kvm_arch_vcpu_reset(vcpu); -		if (r) -			return r; -		vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; -	} -  	vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); -	vapic_enter(vcpu);  	r = 1;  	while (r > 0) { -		if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE) +		if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE && +		    !vcpu->arch.apf.halted)  			r = vcpu_enter_guest(vcpu);  		else {  			srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);  			kvm_vcpu_block(vcpu);  			vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); -			if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) -			{ +			if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) { +				kvm_apic_accept_events(vcpu);  				switch(vcpu->arch.mp_state) {  				case KVM_MP_STATE_HALTED: +					vcpu->arch.pv.pv_unhalted = false;  					vcpu->arch.mp_state =  						KVM_MP_STATE_RUNNABLE;  				case KVM_MP_STATE_RUNNABLE: +					vcpu->arch.apf.halted = false; +					break; +				case KVM_MP_STATE_INIT_RECEIVED:  					break; -				case KVM_MP_STATE_SIPI_RECEIVED:  				default:  					r = -EINTR;  					break; @@ -5281,6 +6205,9 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)  			vcpu->run->exit_reason = KVM_EXIT_INTR;  			++vcpu->stat.request_irq_exits;  		} + +		kvm_check_async_pf_completion(vcpu); +  		if (signal_pending(current)) {  			r = -EINTR;  			vcpu->run->exit_reason = KVM_EXIT_INTR; @@ -5288,54 +6215,133 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)  		}  		if (need_resched()) {  			srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); -			kvm_resched(vcpu); +			cond_resched();  			vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);  		}  	}  	srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); -	vapic_exit(vcpu); -  	return r;  } +static inline int complete_emulated_io(struct kvm_vcpu *vcpu) +{ +	int r; +	vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); +	r = emulate_instruction(vcpu, EMULTYPE_NO_DECODE); +	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); +	if (r != EMULATE_DONE) +		return 0; +	return 1; +} + +static int complete_emulated_pio(struct kvm_vcpu *vcpu) +{ +	BUG_ON(!vcpu->arch.pio.count); + +	return complete_emulated_io(vcpu); +} + +/* + * Implements the following, as a state machine: + * + * read: + *   for each fragment + *     for each mmio piece in the fragment + *       write gpa, len + *       exit + *       copy data + *   execute insn + * + * write: + *   for each fragment + *     for each mmio piece in the fragment + *       write gpa, len + *       copy data + *       exit + */ +static int complete_emulated_mmio(struct kvm_vcpu *vcpu) +{ +	struct kvm_run *run = vcpu->run; +	struct kvm_mmio_fragment *frag; +	unsigned len; + +	BUG_ON(!vcpu->mmio_needed); + +	/* Complete previous fragment */ +	frag = &vcpu->mmio_fragments[vcpu->mmio_cur_fragment]; +	len = min(8u, frag->len); +	if (!vcpu->mmio_is_write) +		memcpy(frag->data, run->mmio.data, len); + +	if (frag->len <= 8) { +		/* Switch to the next fragment. */ +		frag++; +		vcpu->mmio_cur_fragment++; +	} else { +		/* Go forward to the next mmio piece. */ +		frag->data += len; +		frag->gpa += len; +		frag->len -= len; +	} + +	if (vcpu->mmio_cur_fragment >= vcpu->mmio_nr_fragments) { +		vcpu->mmio_needed = 0; + +		/* FIXME: return into emulator if single-stepping.  */ +		if (vcpu->mmio_is_write) +			return 1; +		vcpu->mmio_read_completed = 1; +		return complete_emulated_io(vcpu); +	} + +	run->exit_reason = KVM_EXIT_MMIO; +	run->mmio.phys_addr = frag->gpa; +	if (vcpu->mmio_is_write) +		memcpy(run->mmio.data, frag->data, min(8u, frag->len)); +	run->mmio.len = min(8u, frag->len); +	run->mmio.is_write = vcpu->mmio_is_write; +	vcpu->arch.complete_userspace_io = complete_emulated_mmio; +	return 0; +} + +  int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)  {  	int r;  	sigset_t sigsaved; +	if (!tsk_used_math(current) && init_fpu(current)) +		return -ENOMEM; +  	if (vcpu->sigset_active)  		sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);  	if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {  		kvm_vcpu_block(vcpu); +		kvm_apic_accept_events(vcpu);  		clear_bit(KVM_REQ_UNHALT, &vcpu->requests);  		r = -EAGAIN;  		goto out;  	}  	/* re-sync apic's tpr */ -	if (!irqchip_in_kernel(vcpu->kvm)) -		kvm_set_cr8(vcpu, kvm_run->cr8); - -	if (vcpu->arch.pio.count || vcpu->mmio_needed) { -		if (vcpu->mmio_needed) { -			memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8); -			vcpu->mmio_read_completed = 1; -			vcpu->mmio_needed = 0; -		} -		vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); -		r = emulate_instruction(vcpu, 0, 0, EMULTYPE_NO_DECODE); -		srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); -		if (r != EMULATE_DONE) { -			r = 0; +	if (!irqchip_in_kernel(vcpu->kvm)) { +		if (kvm_set_cr8(vcpu, kvm_run->cr8) != 0) { +			r = -EINVAL;  			goto out;  		}  	} -	if (kvm_run->exit_reason == KVM_EXIT_HYPERCALL) -		kvm_register_write(vcpu, VCPU_REGS_RAX, -				     kvm_run->hypercall.ret); + +	if (unlikely(vcpu->arch.complete_userspace_io)) { +		int (*cui)(struct kvm_vcpu *) = vcpu->arch.complete_userspace_io; +		vcpu->arch.complete_userspace_io = NULL; +		r = cui(vcpu); +		if (r <= 0) +			goto out; +	} else +		WARN_ON(vcpu->arch.pio.count || vcpu->mmio_needed);  	r = __vcpu_run(vcpu); @@ -5349,6 +6355,17 @@ out:  int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)  { +	if (vcpu->arch.emulate_regs_need_sync_to_vcpu) { +		/* +		 * We are here if userspace calls get_regs() in the middle of +		 * instruction emulation. Registers state needs to be copied +		 * back from emulation context to vcpu. Userspace shouldn't do +		 * that usually, but some bad designed PV devices (vmware +		 * backdoor interface) need this to work +		 */ +		emulator_writeback_register_cache(&vcpu->arch.emulate_ctxt); +		vcpu->arch.emulate_regs_need_sync_to_vcpu = false; +	}  	regs->rax = kvm_register_read(vcpu, VCPU_REGS_RAX);  	regs->rbx = kvm_register_read(vcpu, VCPU_REGS_RBX);  	regs->rcx = kvm_register_read(vcpu, VCPU_REGS_RCX); @@ -5376,6 +6393,9 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)  int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)  { +	vcpu->arch.emulate_regs_need_sync_from_vcpu = true; +	vcpu->arch.emulate_regs_need_sync_to_vcpu = false; +  	kvm_register_write(vcpu, VCPU_REGS_RAX, regs->rax);  	kvm_register_write(vcpu, VCPU_REGS_RBX, regs->rbx);  	kvm_register_write(vcpu, VCPU_REGS_RCX, regs->rcx); @@ -5439,7 +6459,7 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,  	sregs->cr0 = kvm_read_cr0(vcpu);  	sregs->cr2 = vcpu->arch.cr2; -	sregs->cr3 = vcpu->arch.cr3; +	sregs->cr3 = kvm_read_cr3(vcpu);  	sregs->cr4 = kvm_read_cr4(vcpu);  	sregs->cr8 = kvm_get_cr8(vcpu);  	sregs->efer = vcpu->arch.efer; @@ -5457,36 +6477,48 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,  int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,  				    struct kvm_mp_state *mp_state)  { -	mp_state->mp_state = vcpu->arch.mp_state; +	kvm_apic_accept_events(vcpu); +	if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED && +					vcpu->arch.pv.pv_unhalted) +		mp_state->mp_state = KVM_MP_STATE_RUNNABLE; +	else +		mp_state->mp_state = vcpu->arch.mp_state; +  	return 0;  }  int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,  				    struct kvm_mp_state *mp_state)  { -	vcpu->arch.mp_state = mp_state->mp_state; +	if (!kvm_vcpu_has_lapic(vcpu) && +	    mp_state->mp_state != KVM_MP_STATE_RUNNABLE) +		return -EINVAL; + +	if (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED) { +		vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED; +		set_bit(KVM_APIC_SIPI, &vcpu->arch.apic->pending_events); +	} else +		vcpu->arch.mp_state = mp_state->mp_state;  	kvm_make_request(KVM_REQ_EVENT, vcpu);  	return 0;  } -int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason, -		    bool has_error_code, u32 error_code) +int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index, +		    int reason, bool has_error_code, u32 error_code)  { -	struct decode_cache *c = &vcpu->arch.emulate_ctxt.decode; +	struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;  	int ret;  	init_emulate_ctxt(vcpu); -	ret = emulator_task_switch(&vcpu->arch.emulate_ctxt, -				   tss_selector, reason, has_error_code, -				   error_code); +	ret = emulator_task_switch(ctxt, tss_selector, idt_index, reason, +				   has_error_code, error_code);  	if (ret)  		return EMULATE_FAIL; -	memcpy(vcpu->arch.regs, c->regs, sizeof c->regs); -	kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.eip); -	kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags); +	kvm_rip_write(vcpu, ctxt->eip); +	kvm_set_rflags(vcpu, ctxt->eflags);  	kvm_make_request(KVM_REQ_EVENT, vcpu);  	return EMULATE_DONE;  } @@ -5495,10 +6527,14 @@ EXPORT_SYMBOL_GPL(kvm_task_switch);  int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,  				  struct kvm_sregs *sregs)  { +	struct msr_data apic_base_msr;  	int mmu_reset_needed = 0; -	int pending_vec, max_bits; +	int pending_vec, max_bits, idx;  	struct desc_ptr dt; +	if (!guest_cpuid_has_xsave(vcpu) && (sregs->cr4 & X86_CR4_OSXSAVE)) +		return -EINVAL; +  	dt.size = sregs->idt.limit;  	dt.address = sregs->idt.base;  	kvm_x86_ops->set_idt(vcpu, &dt); @@ -5507,14 +6543,17 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,  	kvm_x86_ops->set_gdt(vcpu, &dt);  	vcpu->arch.cr2 = sregs->cr2; -	mmu_reset_needed |= vcpu->arch.cr3 != sregs->cr3; +	mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3;  	vcpu->arch.cr3 = sregs->cr3; +	__set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);  	kvm_set_cr8(vcpu, sregs->cr8);  	mmu_reset_needed |= vcpu->arch.efer != sregs->efer;  	kvm_x86_ops->set_efer(vcpu, sregs->efer); -	kvm_set_apic_base(vcpu, sregs->apic_base); +	apic_base_msr.data = sregs->apic_base; +	apic_base_msr.host_initiated = true; +	kvm_set_apic_base(vcpu, &apic_base_msr);  	mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0;  	kvm_x86_ops->set_cr0(vcpu, sregs->cr0); @@ -5522,22 +6561,25 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,  	mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4;  	kvm_x86_ops->set_cr4(vcpu, sregs->cr4); +	if (sregs->cr4 & X86_CR4_OSXSAVE) +		kvm_update_cpuid(vcpu); + +	idx = srcu_read_lock(&vcpu->kvm->srcu);  	if (!is_long_mode(vcpu) && is_pae(vcpu)) { -		load_pdptrs(vcpu, vcpu->arch.walk_mmu, vcpu->arch.cr3); +		load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu));  		mmu_reset_needed = 1;  	} +	srcu_read_unlock(&vcpu->kvm->srcu, idx);  	if (mmu_reset_needed)  		kvm_mmu_reset_context(vcpu); -	max_bits = (sizeof sregs->interrupt_bitmap) << 3; +	max_bits = KVM_NR_INTERRUPTS;  	pending_vec = find_first_bit(  		(const unsigned long *)sregs->interrupt_bitmap, max_bits);  	if (pending_vec < max_bits) {  		kvm_queue_interrupt(vcpu, pending_vec, false);  		pr_debug("Set back pending irq %d\n", pending_vec); -		if (irqchip_in_kernel(vcpu->kvm)) -			kvm_pic_clear_isr_ack(vcpu->kvm);  	}  	kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS); @@ -5592,13 +6634,12 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,  	if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) {  		for (i = 0; i < KVM_NR_DB_REGS; ++i)  			vcpu->arch.eff_db[i] = dbg->arch.debugreg[i]; -		vcpu->arch.switch_db_regs = -			(dbg->arch.debugreg[7] & DR7_BP_EN_MASK); +		vcpu->arch.guest_debug_dr7 = dbg->arch.debugreg[7];  	} else {  		for (i = 0; i < KVM_NR_DB_REGS; i++)  			vcpu->arch.eff_db[i] = vcpu->arch.db[i]; -		vcpu->arch.switch_db_regs = (vcpu->arch.dr7 & DR7_BP_EN_MASK);  	} +	kvm_update_dr7(vcpu);  	if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)  		vcpu->arch.singlestep_rip = kvm_rip_read(vcpu) + @@ -5610,7 +6651,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,  	 */  	kvm_set_rflags(vcpu, rflags); -	kvm_x86_ops->set_guest_debug(vcpu, dbg); +	kvm_x86_ops->update_db_bp_intercept(vcpu);  	r = 0; @@ -5712,7 +6753,7 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)  	 */  	kvm_put_guest_xcr0(vcpu);  	vcpu->guest_fpu_loaded = 1; -	unlazy_fpu(current); +	__kernel_fpu_begin();  	fpu_restore_checking(&vcpu->arch.guest_fpu);  	trace_kvm_fpu(1);  } @@ -5726,6 +6767,7 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)  	vcpu->guest_fpu_loaded = 0;  	fpu_save_init(&vcpu->arch.guest_fpu); +	__kernel_fpu_end();  	++vcpu->stat.fpu_reload;  	kvm_make_request(KVM_REQ_DEACTIVATE_FPU, vcpu);  	trace_kvm_fpu(0); @@ -5733,10 +6775,7 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)  void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)  { -	if (vcpu->arch.time_page) { -		kvm_release_page_dirty(vcpu->arch.time_page); -		vcpu->arch.time_page = NULL; -	} +	kvmclock_reset(vcpu);  	free_cpumask_var(vcpu->arch.wbinvd_dirty_mask);  	fx_free(vcpu); @@ -5758,23 +6797,44 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)  	int r;  	vcpu->arch.mtrr_state.have_fixed = 1; -	vcpu_load(vcpu); -	r = kvm_arch_vcpu_reset(vcpu); -	if (r == 0) -		r = kvm_mmu_setup(vcpu); +	r = vcpu_load(vcpu); +	if (r) +		return r; +	kvm_vcpu_reset(vcpu); +	kvm_mmu_setup(vcpu);  	vcpu_put(vcpu); -	if (r < 0) -		goto free_vcpu; -	return 0; -free_vcpu: -	kvm_x86_ops->vcpu_free(vcpu); +	return r; +} + +int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) +{ +	int r; +	struct msr_data msr; +	struct kvm *kvm = vcpu->kvm; + +	r = vcpu_load(vcpu); +	if (r) +		return r; +	msr.data = 0x0; +	msr.index = MSR_IA32_TSC; +	msr.host_initiated = true; +	kvm_write_tsc(vcpu, &msr); +	vcpu_put(vcpu); + +	schedule_delayed_work(&kvm->arch.kvmclock_sync_work, +					KVMCLOCK_SYNC_PERIOD); +  	return r;  }  void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)  { -	vcpu_load(vcpu); +	int r; +	vcpu->arch.apf.msr_val = 0; + +	r = vcpu_load(vcpu); +	BUG_ON(r);  	kvm_mmu_unload(vcpu);  	vcpu_put(vcpu); @@ -5782,19 +6842,46 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)  	kvm_x86_ops->vcpu_free(vcpu);  } -int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu) +void kvm_vcpu_reset(struct kvm_vcpu *vcpu)  { -	vcpu->arch.nmi_pending = false; +	atomic_set(&vcpu->arch.nmi_queued, 0); +	vcpu->arch.nmi_pending = 0;  	vcpu->arch.nmi_injected = false; -	vcpu->arch.switch_db_regs = 0;  	memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db));  	vcpu->arch.dr6 = DR6_FIXED_1; +	kvm_update_dr6(vcpu);  	vcpu->arch.dr7 = DR7_FIXED_1; +	kvm_update_dr7(vcpu);  	kvm_make_request(KVM_REQ_EVENT, vcpu); +	vcpu->arch.apf.msr_val = 0; +	vcpu->arch.st.msr_val = 0; + +	kvmclock_reset(vcpu); + +	kvm_clear_async_pf_completion_queue(vcpu); +	kvm_async_pf_hash_reset(vcpu); +	vcpu->arch.apf.halted = false; + +	kvm_pmu_reset(vcpu); + +	memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs)); +	vcpu->arch.regs_avail = ~0; +	vcpu->arch.regs_dirty = ~0; + +	kvm_x86_ops->vcpu_reset(vcpu); +} + +void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, unsigned int vector) +{ +	struct kvm_segment cs; -	return kvm_x86_ops->vcpu_reset(vcpu); +	kvm_get_segment(vcpu, &cs, VCPU_SREG_CS); +	cs.selector = vector << 8; +	cs.base = vector << 12; +	kvm_set_segment(vcpu, &cs, VCPU_SREG_CS); +	kvm_rip_write(vcpu, 0);  }  int kvm_arch_hardware_enable(void *garbage) @@ -5802,13 +6889,91 @@ int kvm_arch_hardware_enable(void *garbage)  	struct kvm *kvm;  	struct kvm_vcpu *vcpu;  	int i; +	int ret; +	u64 local_tsc; +	u64 max_tsc = 0; +	bool stable, backwards_tsc = false;  	kvm_shared_msr_cpu_online(); -	list_for_each_entry(kvm, &vm_list, vm_list) -		kvm_for_each_vcpu(i, vcpu, kvm) -			if (vcpu->cpu == smp_processor_id()) -				kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); -	return kvm_x86_ops->hardware_enable(garbage); +	ret = kvm_x86_ops->hardware_enable(garbage); +	if (ret != 0) +		return ret; + +	local_tsc = native_read_tsc(); +	stable = !check_tsc_unstable(); +	list_for_each_entry(kvm, &vm_list, vm_list) { +		kvm_for_each_vcpu(i, vcpu, kvm) { +			if (!stable && vcpu->cpu == smp_processor_id()) +				set_bit(KVM_REQ_CLOCK_UPDATE, &vcpu->requests); +			if (stable && vcpu->arch.last_host_tsc > local_tsc) { +				backwards_tsc = true; +				if (vcpu->arch.last_host_tsc > max_tsc) +					max_tsc = vcpu->arch.last_host_tsc; +			} +		} +	} + +	/* +	 * Sometimes, even reliable TSCs go backwards.  This happens on +	 * platforms that reset TSC during suspend or hibernate actions, but +	 * maintain synchronization.  We must compensate.  Fortunately, we can +	 * detect that condition here, which happens early in CPU bringup, +	 * before any KVM threads can be running.  Unfortunately, we can't +	 * bring the TSCs fully up to date with real time, as we aren't yet far +	 * enough into CPU bringup that we know how much real time has actually +	 * elapsed; our helper function, get_kernel_ns() will be using boot +	 * variables that haven't been updated yet. +	 * +	 * So we simply find the maximum observed TSC above, then record the +	 * adjustment to TSC in each VCPU.  When the VCPU later gets loaded, +	 * the adjustment will be applied.  Note that we accumulate +	 * adjustments, in case multiple suspend cycles happen before some VCPU +	 * gets a chance to run again.  In the event that no KVM threads get a +	 * chance to run, we will miss the entire elapsed period, as we'll have +	 * reset last_host_tsc, so VCPUs will not have the TSC adjusted and may +	 * loose cycle time.  This isn't too big a deal, since the loss will be +	 * uniform across all VCPUs (not to mention the scenario is extremely +	 * unlikely). It is possible that a second hibernate recovery happens +	 * much faster than a first, causing the observed TSC here to be +	 * smaller; this would require additional padding adjustment, which is +	 * why we set last_host_tsc to the local tsc observed here. +	 * +	 * N.B. - this code below runs only on platforms with reliable TSC, +	 * as that is the only way backwards_tsc is set above.  Also note +	 * that this runs for ALL vcpus, which is not a bug; all VCPUs should +	 * have the same delta_cyc adjustment applied if backwards_tsc +	 * is detected.  Note further, this adjustment is only done once, +	 * as we reset last_host_tsc on all VCPUs to stop this from being +	 * called multiple times (one for each physical CPU bringup). +	 * +	 * Platforms with unreliable TSCs don't have to deal with this, they +	 * will be compensated by the logic in vcpu_load, which sets the TSC to +	 * catchup mode.  This will catchup all VCPUs to real time, but cannot +	 * guarantee that they stay in perfect synchronization. +	 */ +	if (backwards_tsc) { +		u64 delta_cyc = max_tsc - local_tsc; +		backwards_tsc_observed = true; +		list_for_each_entry(kvm, &vm_list, vm_list) { +			kvm_for_each_vcpu(i, vcpu, kvm) { +				vcpu->arch.tsc_offset_adjustment += delta_cyc; +				vcpu->arch.last_host_tsc = local_tsc; +				set_bit(KVM_REQ_MASTERCLOCK_UPDATE, +					&vcpu->requests); +			} + +			/* +			 * We have to disable TSC offset matching.. if you were +			 * booting a VM while issuing an S4 host suspend.... +			 * you may have some problem.  Solving this issue is +			 * left as an exercise to the reader. +			 */ +			kvm->arch.last_tsc_nsec = 0; +			kvm->arch.last_tsc_write = 0; +		} + +	} +	return 0;  }  void kvm_arch_hardware_disable(void *garbage) @@ -5832,6 +6997,13 @@ void kvm_arch_check_processor_compat(void *rtn)  	kvm_x86_ops->check_processor_compatibility(rtn);  } +bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) +{ +	return irqchip_in_kernel(vcpu->kvm) == (vcpu->arch.apic != NULL); +} + +struct static_key kvm_no_apic_vcpu __read_mostly; +  int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)  {  	struct page *page; @@ -5841,11 +7013,8 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)  	BUG_ON(vcpu->kvm == NULL);  	kvm = vcpu->kvm; +	vcpu->arch.pv.pv_unhalted = false;  	vcpu->arch.emulate_ctxt.ops = &emulate_ops; -	vcpu->arch.walk_mmu = &vcpu->arch.mmu; -	vcpu->arch.mmu.root_hpa = INVALID_PAGE; -	vcpu->arch.mmu.translate_gpa = translate_gpa; -	vcpu->arch.nested_mmu.translate_gpa = translate_nested_gpa;  	if (!irqchip_in_kernel(kvm) || kvm_vcpu_is_bsp(vcpu))  		vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;  	else @@ -5858,8 +7027,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)  	}  	vcpu->arch.pio_data = page_address(page); -	if (!kvm->arch.virtual_tsc_khz) -		kvm_arch_set_tsc_khz(kvm, max_tsc_khz); +	kvm_set_tsc_khz(vcpu, max_tsc_khz);  	r = kvm_mmu_create(vcpu);  	if (r < 0) @@ -5869,7 +7037,8 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)  		r = kvm_create_lapic(vcpu);  		if (r < 0)  			goto fail_mmu_destroy; -	} +	} else +		static_key_slow_inc(&kvm_no_apic_vcpu);  	vcpu->arch.mce_banks = kzalloc(KVM_MAX_MCE_BANKS * sizeof(u64) * 4,  				       GFP_KERNEL); @@ -5879,10 +7048,27 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)  	}  	vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS; -	if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask, GFP_KERNEL)) +	if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask, GFP_KERNEL)) { +		r = -ENOMEM;  		goto fail_free_mce_banks; +	} + +	r = fx_init(vcpu); +	if (r) +		goto fail_free_wbinvd_dirty_mask; + +	vcpu->arch.ia32_tsc_adjust_msr = 0x0; +	vcpu->arch.pv_time_enabled = false; + +	vcpu->arch.guest_supported_xcr0 = 0; +	vcpu->arch.guest_xstate_size = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET; + +	kvm_async_pf_hash_reset(vcpu); +	kvm_pmu_init(vcpu);  	return 0; +fail_free_wbinvd_dirty_mask: +	free_cpumask_var(vcpu->arch.wbinvd_dirty_mask);  fail_free_mce_banks:  	kfree(vcpu->arch.mce_banks);  fail_free_lapic: @@ -5899,35 +7085,50 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)  {  	int idx; +	kvm_pmu_destroy(vcpu);  	kfree(vcpu->arch.mce_banks);  	kvm_free_lapic(vcpu);  	idx = srcu_read_lock(&vcpu->kvm->srcu);  	kvm_mmu_destroy(vcpu);  	srcu_read_unlock(&vcpu->kvm->srcu, idx);  	free_page((unsigned long)vcpu->arch.pio_data); +	if (!irqchip_in_kernel(vcpu->kvm)) +		static_key_slow_dec(&kvm_no_apic_vcpu);  } -struct  kvm *kvm_arch_create_vm(void) +int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)  { -	struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL); - -	if (!kvm) -		return ERR_PTR(-ENOMEM); +	if (type) +		return -EINVAL;  	INIT_LIST_HEAD(&kvm->arch.active_mmu_pages); +	INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages);  	INIT_LIST_HEAD(&kvm->arch.assigned_dev_head); +	atomic_set(&kvm->arch.noncoherent_dma_count, 0);  	/* Reserve bit 0 of irq_sources_bitmap for userspace irq source */  	set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap); +	/* Reserve bit 1 of irq_sources_bitmap for irqfd-resampler */ +	set_bit(KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID, +		&kvm->arch.irq_sources_bitmap); + +	raw_spin_lock_init(&kvm->arch.tsc_write_lock); +	mutex_init(&kvm->arch.apic_map_lock); +	spin_lock_init(&kvm->arch.pvclock_gtod_sync_lock); + +	pvclock_update_vm_gtod_copy(kvm); -	spin_lock_init(&kvm->arch.tsc_write_lock); +	INIT_DELAYED_WORK(&kvm->arch.kvmclock_update_work, kvmclock_update_fn); +	INIT_DELAYED_WORK(&kvm->arch.kvmclock_sync_work, kvmclock_sync_fn); -	return kvm; +	return 0;  }  static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)  { -	vcpu_load(vcpu); +	int r; +	r = vcpu_load(vcpu); +	BUG_ON(r);  	kvm_mmu_unload(vcpu);  	vcpu_put(vcpu);  } @@ -5940,8 +7141,10 @@ static void kvm_free_vcpus(struct kvm *kvm)  	/*  	 * Unpin any mmu pages first.  	 */ -	kvm_for_each_vcpu(i, vcpu, kvm) +	kvm_for_each_vcpu(i, vcpu, kvm) { +		kvm_clear_async_pf_completion_queue(vcpu);  		kvm_unload_vcpu_mmu(vcpu); +	}  	kvm_for_each_vcpu(i, vcpu, kvm)  		kvm_arch_vcpu_free(vcpu); @@ -5955,125 +7158,226 @@ static void kvm_free_vcpus(struct kvm *kvm)  void kvm_arch_sync_events(struct kvm *kvm)  { +	cancel_delayed_work_sync(&kvm->arch.kvmclock_sync_work); +	cancel_delayed_work_sync(&kvm->arch.kvmclock_update_work);  	kvm_free_all_assigned_devices(kvm);  	kvm_free_pit(kvm);  }  void kvm_arch_destroy_vm(struct kvm *kvm)  { +	if (current->mm == kvm->mm) { +		/* +		 * Free memory regions allocated on behalf of userspace, +		 * unless the the memory map has changed due to process exit +		 * or fd copying. +		 */ +		struct kvm_userspace_memory_region mem; +		memset(&mem, 0, sizeof(mem)); +		mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT; +		kvm_set_memory_region(kvm, &mem); + +		mem.slot = IDENTITY_PAGETABLE_PRIVATE_MEMSLOT; +		kvm_set_memory_region(kvm, &mem); + +		mem.slot = TSS_PRIVATE_MEMSLOT; +		kvm_set_memory_region(kvm, &mem); +	}  	kvm_iommu_unmap_guest(kvm);  	kfree(kvm->arch.vpic);  	kfree(kvm->arch.vioapic);  	kvm_free_vcpus(kvm); -	kvm_free_physmem(kvm);  	if (kvm->arch.apic_access_page)  		put_page(kvm->arch.apic_access_page);  	if (kvm->arch.ept_identity_pagetable)  		put_page(kvm->arch.ept_identity_pagetable); -	cleanup_srcu_struct(&kvm->srcu); -	kfree(kvm); +	kfree(rcu_dereference_check(kvm->arch.apic_map, 1)); +} + +void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, +			   struct kvm_memory_slot *dont) +{ +	int i; + +	for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) { +		if (!dont || free->arch.rmap[i] != dont->arch.rmap[i]) { +			kvm_kvfree(free->arch.rmap[i]); +			free->arch.rmap[i] = NULL; +		} +		if (i == 0) +			continue; + +		if (!dont || free->arch.lpage_info[i - 1] != +			     dont->arch.lpage_info[i - 1]) { +			kvm_kvfree(free->arch.lpage_info[i - 1]); +			free->arch.lpage_info[i - 1] = NULL; +		} +	} +} + +int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, +			    unsigned long npages) +{ +	int i; + +	for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) { +		unsigned long ugfn; +		int lpages; +		int level = i + 1; + +		lpages = gfn_to_index(slot->base_gfn + npages - 1, +				      slot->base_gfn, level) + 1; + +		slot->arch.rmap[i] = +			kvm_kvzalloc(lpages * sizeof(*slot->arch.rmap[i])); +		if (!slot->arch.rmap[i]) +			goto out_free; +		if (i == 0) +			continue; + +		slot->arch.lpage_info[i - 1] = kvm_kvzalloc(lpages * +					sizeof(*slot->arch.lpage_info[i - 1])); +		if (!slot->arch.lpage_info[i - 1]) +			goto out_free; + +		if (slot->base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1)) +			slot->arch.lpage_info[i - 1][0].write_count = 1; +		if ((slot->base_gfn + npages) & (KVM_PAGES_PER_HPAGE(level) - 1)) +			slot->arch.lpage_info[i - 1][lpages - 1].write_count = 1; +		ugfn = slot->userspace_addr >> PAGE_SHIFT; +		/* +		 * If the gfn and userspace address are not aligned wrt each +		 * other, or if explicitly asked to, disable large page +		 * support for this slot +		 */ +		if ((slot->base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1) || +		    !kvm_largepages_enabled()) { +			unsigned long j; + +			for (j = 0; j < lpages; ++j) +				slot->arch.lpage_info[i - 1][j].write_count = 1; +		} +	} + +	return 0; + +out_free: +	for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) { +		kvm_kvfree(slot->arch.rmap[i]); +		slot->arch.rmap[i] = NULL; +		if (i == 0) +			continue; + +		kvm_kvfree(slot->arch.lpage_info[i - 1]); +		slot->arch.lpage_info[i - 1] = NULL; +	} +	return -ENOMEM; +} + +void kvm_arch_memslots_updated(struct kvm *kvm) +{ +	/* +	 * memslots->generation has been incremented. +	 * mmio generation may have reached its maximum value. +	 */ +	kvm_mmu_invalidate_mmio_sptes(kvm);  }  int kvm_arch_prepare_memory_region(struct kvm *kvm,  				struct kvm_memory_slot *memslot, -				struct kvm_memory_slot old,  				struct kvm_userspace_memory_region *mem, -				int user_alloc) +				enum kvm_mr_change change)  { -	int npages = memslot->npages; -	int map_flags = MAP_PRIVATE | MAP_ANONYMOUS; +	/* +	 * Only private memory slots need to be mapped here since +	 * KVM_SET_MEMORY_REGION ioctl is no longer supported. +	 */ +	if ((memslot->id >= KVM_USER_MEM_SLOTS) && (change == KVM_MR_CREATE)) { +		unsigned long userspace_addr; -	/* Prevent internal slot pages from being moved by fork()/COW. */ -	if (memslot->id >= KVM_MEMORY_SLOTS) -		map_flags = MAP_SHARED | MAP_ANONYMOUS; +		/* +		 * MAP_SHARED to prevent internal slot pages from being moved +		 * by fork()/COW. +		 */ +		userspace_addr = vm_mmap(NULL, 0, memslot->npages * PAGE_SIZE, +					 PROT_READ | PROT_WRITE, +					 MAP_SHARED | MAP_ANONYMOUS, 0); -	/*To keep backward compatibility with older userspace, -	 *x86 needs to hanlde !user_alloc case. -	 */ -	if (!user_alloc) { -		if (npages && !old.rmap) { -			unsigned long userspace_addr; - -			down_write(¤t->mm->mmap_sem); -			userspace_addr = do_mmap(NULL, 0, -						 npages * PAGE_SIZE, -						 PROT_READ | PROT_WRITE, -						 map_flags, -						 0); -			up_write(¤t->mm->mmap_sem); - -			if (IS_ERR((void *)userspace_addr)) -				return PTR_ERR((void *)userspace_addr); - -			memslot->userspace_addr = userspace_addr; -		} -	} +		if (IS_ERR((void *)userspace_addr)) +			return PTR_ERR((void *)userspace_addr); +		memslot->userspace_addr = userspace_addr; +	}  	return 0;  }  void kvm_arch_commit_memory_region(struct kvm *kvm,  				struct kvm_userspace_memory_region *mem, -				struct kvm_memory_slot old, -				int user_alloc) +				const struct kvm_memory_slot *old, +				enum kvm_mr_change change)  { -	int npages = mem->memory_size >> PAGE_SHIFT; +	int nr_mmu_pages = 0; -	if (!user_alloc && !old.user_alloc && old.rmap && !npages) { +	if ((mem->slot >= KVM_USER_MEM_SLOTS) && (change == KVM_MR_DELETE)) {  		int ret; -		down_write(¤t->mm->mmap_sem); -		ret = do_munmap(current->mm, old.userspace_addr, -				old.npages * PAGE_SIZE); -		up_write(¤t->mm->mmap_sem); +		ret = vm_munmap(old->userspace_addr, +				old->npages * PAGE_SIZE);  		if (ret < 0)  			printk(KERN_WARNING  			       "kvm_vm_ioctl_set_memory_region: "  			       "failed to munmap memory\n");  	} -	spin_lock(&kvm->mmu_lock); -	if (!kvm->arch.n_requested_mmu_pages) { -		unsigned int nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm); +	if (!kvm->arch.n_requested_mmu_pages) +		nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm); + +	if (nr_mmu_pages)  		kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages); -	} +	/* +	 * Write protect all pages for dirty logging. +	 * +	 * All the sptes including the large sptes which point to this +	 * slot are set to readonly. We can not create any new large +	 * spte on this slot until the end of the logging. +	 * +	 * See the comments in fast_page_fault(). +	 */ +	if ((change != KVM_MR_DELETE) && (mem->flags & KVM_MEM_LOG_DIRTY_PAGES)) +		kvm_mmu_slot_remove_write_access(kvm, mem->slot); +} -	kvm_mmu_slot_remove_write_access(kvm, mem->slot); -	spin_unlock(&kvm->mmu_lock); +void kvm_arch_flush_shadow_all(struct kvm *kvm) +{ +	kvm_mmu_invalidate_zap_all_pages(kvm);  } -void kvm_arch_flush_shadow(struct kvm *kvm) +void kvm_arch_flush_shadow_memslot(struct kvm *kvm, +				   struct kvm_memory_slot *slot)  { -	kvm_mmu_zap_all(kvm); -	kvm_reload_remote_mmus(kvm); +	kvm_mmu_invalidate_zap_all_pages(kvm);  }  int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)  { -	return vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE -		|| vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED -		|| vcpu->arch.nmi_pending || +	if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) +		kvm_x86_ops->check_nested_events(vcpu, false); + +	return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE && +		!vcpu->arch.apf.halted) +		|| !list_empty_careful(&vcpu->async_pf.done) +		|| kvm_apic_has_events(vcpu) +		|| vcpu->arch.pv.pv_unhalted +		|| atomic_read(&vcpu->arch.nmi_queued) ||  		(kvm_arch_interrupt_allowed(vcpu) &&  		 kvm_cpu_has_interrupt(vcpu));  } -void kvm_vcpu_kick(struct kvm_vcpu *vcpu) +int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)  { -	int me; -	int cpu = vcpu->cpu; - -	if (waitqueue_active(&vcpu->wq)) { -		wake_up_interruptible(&vcpu->wq); -		++vcpu->stat.halt_wakeup; -	} - -	me = get_cpu(); -	if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) -		if (atomic_xchg(&vcpu->guest_mode, 0)) -			smp_send_reschedule(cpu); -	put_cpu(); +	return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;  }  int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu) @@ -6111,6 +7415,166 @@ void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)  }  EXPORT_SYMBOL_GPL(kvm_set_rflags); +void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work) +{ +	int r; + +	if ((vcpu->arch.mmu.direct_map != work->arch.direct_map) || +	      work->wakeup_all) +		return; + +	r = kvm_mmu_reload(vcpu); +	if (unlikely(r)) +		return; + +	if (!vcpu->arch.mmu.direct_map && +	      work->arch.cr3 != vcpu->arch.mmu.get_cr3(vcpu)) +		return; + +	vcpu->arch.mmu.page_fault(vcpu, work->gva, 0, true); +} + +static inline u32 kvm_async_pf_hash_fn(gfn_t gfn) +{ +	return hash_32(gfn & 0xffffffff, order_base_2(ASYNC_PF_PER_VCPU)); +} + +static inline u32 kvm_async_pf_next_probe(u32 key) +{ +	return (key + 1) & (roundup_pow_of_two(ASYNC_PF_PER_VCPU) - 1); +} + +static void kvm_add_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) +{ +	u32 key = kvm_async_pf_hash_fn(gfn); + +	while (vcpu->arch.apf.gfns[key] != ~0) +		key = kvm_async_pf_next_probe(key); + +	vcpu->arch.apf.gfns[key] = gfn; +} + +static u32 kvm_async_pf_gfn_slot(struct kvm_vcpu *vcpu, gfn_t gfn) +{ +	int i; +	u32 key = kvm_async_pf_hash_fn(gfn); + +	for (i = 0; i < roundup_pow_of_two(ASYNC_PF_PER_VCPU) && +		     (vcpu->arch.apf.gfns[key] != gfn && +		      vcpu->arch.apf.gfns[key] != ~0); i++) +		key = kvm_async_pf_next_probe(key); + +	return key; +} + +bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) +{ +	return vcpu->arch.apf.gfns[kvm_async_pf_gfn_slot(vcpu, gfn)] == gfn; +} + +static void kvm_del_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) +{ +	u32 i, j, k; + +	i = j = kvm_async_pf_gfn_slot(vcpu, gfn); +	while (true) { +		vcpu->arch.apf.gfns[i] = ~0; +		do { +			j = kvm_async_pf_next_probe(j); +			if (vcpu->arch.apf.gfns[j] == ~0) +				return; +			k = kvm_async_pf_hash_fn(vcpu->arch.apf.gfns[j]); +			/* +			 * k lies cyclically in ]i,j] +			 * |    i.k.j | +			 * |....j i.k.| or  |.k..j i...| +			 */ +		} while ((i <= j) ? (i < k && k <= j) : (i < k || k <= j)); +		vcpu->arch.apf.gfns[i] = vcpu->arch.apf.gfns[j]; +		i = j; +	} +} + +static int apf_put_user(struct kvm_vcpu *vcpu, u32 val) +{ + +	return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &val, +				      sizeof(val)); +} + +void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu, +				     struct kvm_async_pf *work) +{ +	struct x86_exception fault; + +	trace_kvm_async_pf_not_present(work->arch.token, work->gva); +	kvm_add_async_pf_gfn(vcpu, work->arch.gfn); + +	if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) || +	    (vcpu->arch.apf.send_user_only && +	     kvm_x86_ops->get_cpl(vcpu) == 0)) +		kvm_make_request(KVM_REQ_APF_HALT, vcpu); +	else if (!apf_put_user(vcpu, KVM_PV_REASON_PAGE_NOT_PRESENT)) { +		fault.vector = PF_VECTOR; +		fault.error_code_valid = true; +		fault.error_code = 0; +		fault.nested_page_fault = false; +		fault.address = work->arch.token; +		kvm_inject_page_fault(vcpu, &fault); +	} +} + +void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, +				 struct kvm_async_pf *work) +{ +	struct x86_exception fault; + +	trace_kvm_async_pf_ready(work->arch.token, work->gva); +	if (work->wakeup_all) +		work->arch.token = ~0; /* broadcast wakeup */ +	else +		kvm_del_async_pf_gfn(vcpu, work->arch.gfn); + +	if ((vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) && +	    !apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) { +		fault.vector = PF_VECTOR; +		fault.error_code_valid = true; +		fault.error_code = 0; +		fault.nested_page_fault = false; +		fault.address = work->arch.token; +		kvm_inject_page_fault(vcpu, &fault); +	} +	vcpu->arch.apf.halted = false; +	vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; +} + +bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu) +{ +	if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED)) +		return true; +	else +		return !kvm_event_needs_reinjection(vcpu) && +			kvm_x86_ops->interrupt_allowed(vcpu); +} + +void kvm_arch_register_noncoherent_dma(struct kvm *kvm) +{ +	atomic_inc(&kvm->arch.noncoherent_dma_count); +} +EXPORT_SYMBOL_GPL(kvm_arch_register_noncoherent_dma); + +void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm) +{ +	atomic_dec(&kvm->arch.noncoherent_dma_count); +} +EXPORT_SYMBOL_GPL(kvm_arch_unregister_noncoherent_dma); + +bool kvm_arch_has_noncoherent_dma(struct kvm *kvm) +{ +	return atomic_read(&kvm->arch.noncoherent_dma_count); +} +EXPORT_SYMBOL_GPL(kvm_arch_has_noncoherent_dma); +  EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);  EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq);  EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_page_fault); @@ -6123,3 +7587,4 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intr_vmexit);  EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_invlpga);  EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_skinit);  EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intercepts); +EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_write_tsc_offset);  | 
