diff options
author | Marcelo Tosatti <mtosatti@redhat.com> | 2009-06-11 12:07:41 -0300 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2009-09-10 08:32:55 +0300 |
commit | 4d88954d6246d7d43bb8903981731002179f1a1c (patch) | |
tree | 6629e1a9cd6968d9048e078775aeedff43ac133e | |
parent | e799794e02a368f79c3fae26aabaaadd0b7466ce (diff) |
KVM: MMU: make for_each_shadow_entry aware of largepages
This way there is no need to add explicit checks in every
for_each_shadow_entry user.
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
-rw-r--r-- | arch/x86/kvm/mmu.c | 5 |
1 files changed, 5 insertions, 0 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 780ce3fe791..e18f65bf2de 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -1302,6 +1302,11 @@ static bool shadow_walk_okay(struct kvm_shadow_walk_iterator *iterator) { if (iterator->level < PT_PAGE_TABLE_LEVEL) return false; + + if (iterator->level == PT_PAGE_TABLE_LEVEL) + if (is_large_pte(*iterator->sptep)) + return false; + iterator->index = SHADOW_PT_INDEX(iterator->addr, iterator->level); iterator->sptep = ((u64 *)__va(iterator->shadow_addr)) + iterator->index; return true; |