aboutsummaryrefslogtreecommitdiff
path: root/arch/powerpc/kvm
diff options
context:
space:
mode:
authorAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>2014-05-06 21:24:18 +0530
committerAlexander Graf <agraf@suse.de>2014-05-30 14:26:24 +0200
commit792fc49787cb7df13f1c38d3e25c863e1c3a6bb2 (patch)
tree5f1708c709983a84b7c76d26a8a8b96f522c45d1 /arch/powerpc/kvm
parent9916d57e64a49d85bcffe272478f869b8fe1583a (diff)
KVM: PPC: BOOK3S: HV: Prefer CMA region for hash page table allocation
Today when KVM tries to reserve memory for the hash page table it allocates from the normal page allocator first. If that fails it falls back to CMA's reserved region. One of the side effects of this is that we could end up exhausting the page allocator and get linux into OOM conditions while we still have plenty of space available in CMA. This patch addresses this issue by first trying hash page table allocation from CMA's reserved region before falling back to the normal page allocator. So if we run out of memory, we really are out of memory. Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Signed-off-by: Alexander Graf <agraf@suse.de>
Diffstat (limited to 'arch/powerpc/kvm')
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c23
1 files changed, 6 insertions, 17 deletions
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index fb25ebc0af0..f32896ffd78 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -52,7 +52,7 @@ static void kvmppc_rmap_reset(struct kvm *kvm);
long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp)
{
- unsigned long hpt;
+ unsigned long hpt = 0;
struct revmap_entry *rev;
struct page *page = NULL;
long order = KVM_DEFAULT_HPT_ORDER;
@@ -64,22 +64,11 @@ long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp)
}
kvm->arch.hpt_cma_alloc = 0;
- /*
- * try first to allocate it from the kernel page allocator.
- * We keep the CMA reserved for failed allocation.
- */
- hpt = __get_free_pages(GFP_KERNEL | __GFP_ZERO | __GFP_REPEAT |
- __GFP_NOWARN, order - PAGE_SHIFT);
-
- /* Next try to allocate from the preallocated pool */
- if (!hpt) {
- VM_BUG_ON(order < KVM_CMA_CHUNK_ORDER);
- page = kvm_alloc_hpt(1 << (order - PAGE_SHIFT));
- if (page) {
- hpt = (unsigned long)pfn_to_kaddr(page_to_pfn(page));
- kvm->arch.hpt_cma_alloc = 1;
- } else
- --order;
+ VM_BUG_ON(order < KVM_CMA_CHUNK_ORDER);
+ page = kvm_alloc_hpt(1 << (order - PAGE_SHIFT));
+ if (page) {
+ hpt = (unsigned long)pfn_to_kaddr(page_to_pfn(page));
+ kvm->arch.hpt_cma_alloc = 1;
}
/* Lastly try successively smaller sizes from the page allocator */