diff options
-rw-r--r-- | arch/powerpc/kvm/book3s_hv_rm_mmu.c | 23 | ||||
-rw-r--r-- | include/linux/kvm_host.h | 25 | ||||
-rw-r--r-- | virt/kvm/kvm_main.c | 21 |
3 files changed, 28 insertions, 41 deletions
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c index 5f3c60b89fa..def880aea63 100644 --- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c +++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c @@ -21,25 +21,6 @@ #include <asm/synch.h> #include <asm/ppc-opcode.h> -/* - * Since this file is built in even if KVM is a module, we need - * a local copy of this function for the case where kvm_main.c is - * modular. - */ -static struct kvm_memory_slot *builtin_gfn_to_memslot(struct kvm *kvm, - gfn_t gfn) -{ - struct kvm_memslots *slots; - struct kvm_memory_slot *memslot; - - slots = kvm_memslots(kvm); - kvm_for_each_memslot(memslot, slots) - if (gfn >= memslot->base_gfn && - gfn < memslot->base_gfn + memslot->npages) - return memslot; - return NULL; -} - /* Translate address of a vmalloc'd thing to a linear map address */ static void *real_vmalloc_addr(void *x) { @@ -99,7 +80,7 @@ static void remove_revmap_chain(struct kvm *kvm, long pte_index, rcbits = hpte_r & (HPTE_R_R | HPTE_R_C); ptel = rev->guest_rpte |= rcbits; gfn = hpte_rpn(ptel, hpte_page_size(hpte_v, ptel)); - memslot = builtin_gfn_to_memslot(kvm, gfn); + memslot = __gfn_to_memslot(kvm_memslots(kvm), gfn); if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) return; @@ -181,7 +162,7 @@ long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags, /* Find the memslot (if any) for this address */ gpa = (ptel & HPTE_R_RPN) & ~(psize - 1); gfn = gpa >> PAGE_SHIFT; - memslot = builtin_gfn_to_memslot(kvm, gfn); + memslot = __gfn_to_memslot(kvm_memslots(kvm), gfn); pa = 0; is_io = ~0ul; rmap = NULL; diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index eada8e69fe5..9698080c902 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -651,6 +651,31 @@ static inline void kvm_guest_exit(void) current->flags &= ~PF_VCPU; } +/* + * search_memslots() and __gfn_to_memslot() are here because they are + * used in non-modular code in arch/powerpc/kvm/book3s_hv_rm_mmu.c. + * gfn_to_memslot() itself isn't here as an inline because that would + * bloat other code too much. + */ +static inline struct kvm_memory_slot * +search_memslots(struct kvm_memslots *slots, gfn_t gfn) +{ + struct kvm_memory_slot *memslot; + + kvm_for_each_memslot(memslot, slots) + if (gfn >= memslot->base_gfn && + gfn < memslot->base_gfn + memslot->npages) + return memslot; + + return NULL; +} + +static inline struct kvm_memory_slot * +__gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn) +{ + return search_memslots(slots, gfn); +} + static inline int memslot_id(struct kvm *kvm, gfn_t gfn) { return gfn_to_memslot(kvm, gfn)->id; diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 9f32bffd37c..470e30520fe 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -640,19 +640,6 @@ static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot) } #endif /* !CONFIG_S390 */ -static struct kvm_memory_slot * -search_memslots(struct kvm_memslots *slots, gfn_t gfn) -{ - struct kvm_memory_slot *memslot; - - kvm_for_each_memslot(memslot, slots) - if (gfn >= memslot->base_gfn && - gfn < memslot->base_gfn + memslot->npages) - return memslot; - - return NULL; -} - static int cmp_memslot(const void *slot1, const void *slot2) { struct kvm_memory_slot *s1, *s2; @@ -1031,12 +1018,6 @@ int kvm_is_error_hva(unsigned long addr) } EXPORT_SYMBOL_GPL(kvm_is_error_hva); -static struct kvm_memory_slot *__gfn_to_memslot(struct kvm_memslots *slots, - gfn_t gfn) -{ - return search_memslots(slots, gfn); -} - struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) { return __gfn_to_memslot(kvm_memslots(kvm), gfn); @@ -1459,7 +1440,7 @@ int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, ghc->gpa = gpa; ghc->generation = slots->generation; - ghc->memslot = __gfn_to_memslot(slots, gfn); + ghc->memslot = gfn_to_memslot(kvm, gfn); ghc->hva = gfn_to_hva_many(ghc->memslot, gfn, NULL); if (!kvm_is_error_hva(ghc->hva)) ghc->hva += offset; |