aboutsummaryrefslogtreecommitdiff
path: root/arch/x86/xen
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2010-10-25 19:38:43 +0200
committerIngo Molnar <mingo@elte.hu>2010-10-25 19:38:52 +0200
commit7d7a48b76012eeee25871e1d69104dec4b6d740f (patch)
treec27e3d8bb2e80ad93c7b4a5c850e1ffbc090bcd2 /arch/x86/xen
parente4072a9a9d186fe86293effe8828faa4be75b4a4 (diff)
parent229aebb873e29726b91e076161649cf45154b0bf (diff)
Merge branch 'linus' into x86/urgent
Merge reason: We want to queue up a dependent fix. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/xen')
-rw-r--r--arch/x86/xen/debugfs.c1
-rw-r--r--arch/x86/xen/enlighten.c3
-rw-r--r--arch/x86/xen/mmu.c32
-rw-r--r--arch/x86/xen/pci-swiotlb-xen.c5
-rw-r--r--arch/x86/xen/setup.c3
-rw-r--r--arch/x86/xen/spinlock.c2
6 files changed, 33 insertions, 13 deletions
diff --git a/arch/x86/xen/debugfs.c b/arch/x86/xen/debugfs.c
index 1304bcec8ee..7c0fedd98ea 100644
--- a/arch/x86/xen/debugfs.c
+++ b/arch/x86/xen/debugfs.c
@@ -106,6 +106,7 @@ static const struct file_operations u32_array_fops = {
.open = u32_array_open,
.release= xen_array_release,
.read = u32_array_read,
+ .llseek = no_llseek,
};
struct dentry *xen_debugfs_create_u32_array(const char *name, mode_t mode,
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 44f80861382..3078dd5a806 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -30,6 +30,7 @@
#include <linux/console.h>
#include <linux/pci.h>
#include <linux/gfp.h>
+#include <linux/memblock.h>
#include <xen/xen.h>
#include <xen/interface/xen.h>
@@ -1183,6 +1184,8 @@ asmlinkage void __init xen_start_kernel(void)
local_irq_disable();
early_boot_irqs_off();
+ memblock_init();
+
xen_raw_console_write("mapping kernel into physical memory\n");
pgd = xen_setup_kernel_pagetable(pgd, xen_start_info->nr_pages);
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 42086ac406a..f72d18c6922 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -45,6 +45,7 @@
#include <linux/vmalloc.h>
#include <linux/module.h>
#include <linux/gfp.h>
+#include <linux/memblock.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
@@ -55,6 +56,7 @@
#include <asm/e820.h>
#include <asm/linkage.h>
#include <asm/page.h>
+#include <asm/init.h>
#include <asm/xen/hypercall.h>
#include <asm/xen/hypervisor.h>
@@ -359,7 +361,8 @@ void make_lowmem_page_readonly(void *vaddr)
unsigned int level;
pte = lookup_address(address, &level);
- BUG_ON(pte == NULL);
+ if (pte == NULL)
+ return; /* vaddr missing */
ptev = pte_wrprotect(*pte);
@@ -374,7 +377,8 @@ void make_lowmem_page_readwrite(void *vaddr)
unsigned int level;
pte = lookup_address(address, &level);
- BUG_ON(pte == NULL);
+ if (pte == NULL)
+ return; /* vaddr missing */
ptev = pte_mkwrite(*pte);
@@ -1508,13 +1512,25 @@ static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
#endif
}
-#ifdef CONFIG_X86_32
static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte)
{
+ unsigned long pfn = pte_pfn(pte);
+
+#ifdef CONFIG_X86_32
/* If there's an existing pte, then don't allow _PAGE_RW to be set */
if (pte_val_ma(*ptep) & _PAGE_PRESENT)
pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
pte_val_ma(pte));
+#endif
+
+ /*
+ * If the new pfn is within the range of the newly allocated
+ * kernel pagetable, and it isn't being mapped into an
+ * early_ioremap fixmap slot, make sure it is RO.
+ */
+ if (!is_early_ioremap_ptep(ptep) &&
+ pfn >= e820_table_start && pfn < e820_table_end)
+ pte = pte_wrprotect(pte);
return pte;
}
@@ -1527,7 +1543,6 @@ static __init void xen_set_pte_init(pte_t *ptep, pte_t pte)
xen_set_pte(ptep, pte);
}
-#endif
static void pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
{
@@ -1814,7 +1829,7 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
__xen_write_cr3(true, __pa(pgd));
xen_mc_issue(PARAVIRT_LAZY_CPU);
- reserve_early(__pa(xen_start_info->pt_base),
+ memblock_x86_reserve_range(__pa(xen_start_info->pt_base),
__pa(xen_start_info->pt_base +
xen_start_info->nr_pt_frames * PAGE_SIZE),
"XEN PAGETABLES");
@@ -1852,7 +1867,7 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(swapper_pg_dir)));
- reserve_early(__pa(xen_start_info->pt_base),
+ memblock_x86_reserve_range(__pa(xen_start_info->pt_base),
__pa(xen_start_info->pt_base +
xen_start_info->nr_pt_frames * PAGE_SIZE),
"XEN PAGETABLES");
@@ -1969,14 +1984,9 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = {
.alloc_pte = xen_alloc_pte_init,
.release_pte = xen_release_pte_init,
.alloc_pmd = xen_alloc_pmd_init,
- .alloc_pmd_clone = paravirt_nop,
.release_pmd = xen_release_pmd_init,
-#ifdef CONFIG_X86_64
- .set_pte = xen_set_pte,
-#else
.set_pte = xen_set_pte_init,
-#endif
.set_pte_at = xen_set_pte_at,
.set_pmd = xen_set_pmd_hyper,
diff --git a/arch/x86/xen/pci-swiotlb-xen.c b/arch/x86/xen/pci-swiotlb-xen.c
index a013ec9d0c5..22471001b74 100644
--- a/arch/x86/xen/pci-swiotlb-xen.c
+++ b/arch/x86/xen/pci-swiotlb-xen.c
@@ -5,6 +5,7 @@
#include <asm/xen/hypervisor.h>
#include <xen/xen.h>
+#include <asm/iommu_table.h>
int xen_swiotlb __read_mostly;
@@ -56,3 +57,7 @@ void __init pci_xen_swiotlb_init(void)
dma_ops = &xen_swiotlb_dma_ops;
}
}
+IOMMU_INIT_FINISH(pci_xen_swiotlb_detect,
+ 0,
+ pci_xen_swiotlb_init,
+ 0);
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index 328b0030542..9729c903404 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -8,6 +8,7 @@
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/pm.h>
+#include <linux/memblock.h>
#include <asm/elf.h>
#include <asm/vdso.h>
@@ -129,7 +130,7 @@ char * __init xen_memory_setup(void)
* - xen_start_info
* See comment above "struct start_info" in <xen/interface/xen.h>
*/
- reserve_early(__pa(xen_start_info->mfn_list),
+ memblock_x86_reserve_range(__pa(xen_start_info->mfn_list),
__pa(xen_start_info->pt_base),
"XEN START INFO");
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index e0500646585..23e061b9327 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -224,7 +224,7 @@ static noinline int xen_spin_lock_slow(struct arch_spinlock *lock, bool irq_enab
goto out;
}
- flags = __raw_local_save_flags();
+ flags = arch_local_save_flags();
if (irq_enable) {
ADD_STATS(taken_slow_irqenable, 1);
raw_local_irq_enable();