aboutsummaryrefslogtreecommitdiff
path: root/arch/x86/xen/enlighten.c
diff options
context:
space:
mode:
authorKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2012-07-19 10:23:47 -0400
committerKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2012-08-21 14:44:50 -0400
commit59b294403e9814e7c1154043567f0d71bac7a511 (patch)
tree42f84556415209884f19ab58df234ae098f67c66 /arch/x86/xen/enlighten.c
parenta3118beb6a8cbe77ae3342125d920205871b0717 (diff)
xen/x86: Use memblock_reserve for sensitive areas.
instead of a big memblock_reserve. This way we can be more selective in freeing regions (and it also makes it easier to understand where is what). [v1: Move the auto_translate_physmap to proper line] [v2: Per Stefano suggestion add more comments] Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Diffstat (limited to 'arch/x86/xen/enlighten.c')
-rw-r--r--arch/x86/xen/enlighten.c48
1 files changed, 48 insertions, 0 deletions
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index ff962d4b821..e532eb50e8d 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -998,7 +998,54 @@ static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high)
return ret;
}
+/*
+ * If the MFN is not in the m2p (provided to us by the hypervisor) this
+ * function won't do anything. In practice this means that the XenBus
+ * MFN won't be available for the initial domain. */
+static void __init xen_reserve_mfn(unsigned long mfn)
+{
+ unsigned long pfn;
+
+ if (!mfn)
+ return;
+ pfn = mfn_to_pfn(mfn);
+ if (phys_to_machine_mapping_valid(pfn))
+ memblock_reserve(PFN_PHYS(pfn), PAGE_SIZE);
+}
+static void __init xen_reserve_internals(void)
+{
+ unsigned long size;
+
+ if (!xen_pv_domain())
+ return;
+
+ /* xen_start_info does not exist in the M2P, hence can't use
+ * xen_reserve_mfn. */
+ memblock_reserve(__pa(xen_start_info), PAGE_SIZE);
+
+ xen_reserve_mfn(PFN_DOWN(xen_start_info->shared_info));
+ xen_reserve_mfn(xen_start_info->store_mfn);
+ if (!xen_initial_domain())
+ xen_reserve_mfn(xen_start_info->console.domU.mfn);
+
+ if (xen_feature(XENFEAT_auto_translated_physmap))
+ return;
+
+ /*
+ * ALIGN up to compensate for the p2m_page pointing to an array that
+ * can partially filled (look in xen_build_dynamic_phys_to_machine).
+ */
+
+ size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
+
+ /* We could use xen_reserve_mfn here, but would end up looping quite
+ * a lot (and call memblock_reserve for each PAGE), so lets just use
+ * the easy way and reserve it wholesale. */
+ memblock_reserve(__pa(xen_start_info->mfn_list), size);
+
+ /* The pagetables are reserved in mmu.c */
+}
void xen_setup_shared_info(void)
{
if (!xen_feature(XENFEAT_auto_translated_physmap)) {
@@ -1362,6 +1409,7 @@ asmlinkage void __init xen_start_kernel(void)
xen_raw_console_write("mapping kernel into physical memory\n");
pgd = xen_setup_kernel_pagetable(pgd, xen_start_info->nr_pages);
+ xen_reserve_internals();
/* Allocate and initialize top and mid mfn levels for p2m structure */
xen_build_mfn_list_list();