aboutsummaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorMarcelo Tosatti <mtosatti@redhat.com>2009-09-18 17:29:26 -0300
committerGreg Kroah-Hartman <gregkh@suse.de>2009-10-05 09:32:25 -0700
commit7b05aacb73a87411552cf83ab0a0bf519d47ef2f (patch)
tree25db0754ba5703ef3d1164b386991a3fe4119dc4 /arch
parent1cb6728f666f55ecfcf79a9713c6f2b23d1b2925 (diff)
KVM: MMU: fix missing locking in alloc_mmu_pages
(cherry picked from commit 6a1ac77110ee3e8d8dfdef8442f3b30b3d83e6a2) n_requested_mmu_pages/n_free_mmu_pages are used by kvm_mmu_change_mmu_pages to calculate the number of pages to zap. alloc_mmu_pages, called from the vcpu initialization path, modifies this variables without proper locking, which can result in a negative value in kvm_mmu_change_mmu_pages (say, with cpu hotplug). Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kvm/mmu.c2
1 files changed, 2 insertions, 0 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index a5cdb35af51..e74574e32f1 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2713,12 +2713,14 @@ static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
ASSERT(vcpu);
+ spin_lock(&vcpu->kvm->mmu_lock);
if (vcpu->kvm->arch.n_requested_mmu_pages)
vcpu->kvm->arch.n_free_mmu_pages =
vcpu->kvm->arch.n_requested_mmu_pages;
else
vcpu->kvm->arch.n_free_mmu_pages =
vcpu->kvm->arch.n_alloc_mmu_pages;
+ spin_unlock(&vcpu->kvm->mmu_lock);
/*
* When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
* Therefore we need to allocate shadow page tables in the first