diff options
Diffstat (limited to 'arch/x86/xen/mmu.c')
| -rw-r--r-- | arch/x86/xen/mmu.c | 1565 | 
1 files changed, 773 insertions, 792 deletions
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 21ed8d7f75a..e8a1201c329 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -46,6 +46,10 @@  #include <linux/module.h>  #include <linux/gfp.h>  #include <linux/memblock.h> +#include <linux/seq_file.h> +#include <linux/crash_dump.h> + +#include <trace/events/xen.h>  #include <asm/pgtable.h>  #include <asm/tlbflush.h> @@ -58,6 +62,7 @@  #include <asm/page.h>  #include <asm/init.h>  #include <asm/pat.h> +#include <asm/smp.h>  #include <asm/xen/hypercall.h>  #include <asm/xen/hypervisor.h> @@ -74,68 +79,13 @@  #include "mmu.h"  #include "debugfs.h" -#define MMU_UPDATE_HISTO	30 -  /*   * Protects atomic reservation decrease/increase against concurrent increases. - * Also protects non-atomic updates of current_pages and driver_pages, and - * balloon lists. + * Also protects non-atomic updates of current_pages and balloon lists.   */  DEFINE_SPINLOCK(xen_reservation_lock); -#ifdef CONFIG_XEN_DEBUG_FS - -static struct { -	u32 pgd_update; -	u32 pgd_update_pinned; -	u32 pgd_update_batched; - -	u32 pud_update; -	u32 pud_update_pinned; -	u32 pud_update_batched; - -	u32 pmd_update; -	u32 pmd_update_pinned; -	u32 pmd_update_batched; - -	u32 pte_update; -	u32 pte_update_pinned; -	u32 pte_update_batched; - -	u32 mmu_update; -	u32 mmu_update_extended; -	u32 mmu_update_histo[MMU_UPDATE_HISTO]; - -	u32 prot_commit; -	u32 prot_commit_batched; - -	u32 set_pte_at; -	u32 set_pte_at_batched; -	u32 set_pte_at_pinned; -	u32 set_pte_at_current; -	u32 set_pte_at_kernel; -} mmu_stats; - -static u8 zero_stats; - -static inline void check_zero(void) -{ -	if (unlikely(zero_stats)) { -		memset(&mmu_stats, 0, sizeof(mmu_stats)); -		zero_stats = 0; -	} -} - -#define ADD_STATS(elem, val)			\ -	do { check_zero(); mmu_stats.elem += (val); } while(0) - -#else  /* !CONFIG_XEN_DEBUG_FS */ - -#define ADD_STATS(elem, val)	do { (void)(val); } while(0) - -#endif /* CONFIG_XEN_DEBUG_FS */ - - +#ifdef CONFIG_X86_32  /*   * Identity map, in addition to plain kernel map.  This needs to be   * large enough to allocate page table pages to allocate the rest. @@ -143,7 +93,7 @@ static inline void check_zero(void)   */  #define LEVEL1_IDENT_ENTRIES	(PTRS_PER_PTE * 4)  static RESERVE_BRK_ARRAY(pte_t, level1_ident_pgt, LEVEL1_IDENT_ENTRIES); - +#endif  #ifdef CONFIG_X86_64  /* l3 pud for userspace vsyscall mapping */  static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss; @@ -173,371 +123,6 @@ DEFINE_PER_CPU(unsigned long, xen_current_cr3);	 /* actual vcpu cr3 */   */  #define USER_LIMIT	((STACK_TOP_MAX + PGDIR_SIZE - 1) & PGDIR_MASK) -/* - * Xen leaves the responsibility for maintaining p2m mappings to the - * guests themselves, but it must also access and update the p2m array - * during suspend/resume when all the pages are reallocated. - * - * The p2m table is logically a flat array, but we implement it as a - * three-level tree to allow the address space to be sparse. - * - *                               Xen - *                                | - *     p2m_top              p2m_top_mfn - *       /  \                   /   \ - * p2m_mid p2m_mid	p2m_mid_mfn p2m_mid_mfn - *    / \      / \         /           / - *  p2m p2m p2m p2m p2m p2m p2m ... - * - * The p2m_mid_mfn pages are mapped by p2m_top_mfn_p. - * - * The p2m_top and p2m_top_mfn levels are limited to 1 page, so the - * maximum representable pseudo-physical address space is: - *  P2M_TOP_PER_PAGE * P2M_MID_PER_PAGE * P2M_PER_PAGE pages - * - * P2M_PER_PAGE depends on the architecture, as a mfn is always - * unsigned long (8 bytes on 64-bit, 4 bytes on 32), leading to - * 512 and 1024 entries respectively.  - */ - -unsigned long xen_max_p2m_pfn __read_mostly; - -#define P2M_PER_PAGE		(PAGE_SIZE / sizeof(unsigned long)) -#define P2M_MID_PER_PAGE	(PAGE_SIZE / sizeof(unsigned long *)) -#define P2M_TOP_PER_PAGE	(PAGE_SIZE / sizeof(unsigned long **)) - -#define MAX_P2M_PFN		(P2M_TOP_PER_PAGE * P2M_MID_PER_PAGE * P2M_PER_PAGE) - -/* Placeholders for holes in the address space */ -static RESERVE_BRK_ARRAY(unsigned long, p2m_missing, P2M_PER_PAGE); -static RESERVE_BRK_ARRAY(unsigned long *, p2m_mid_missing, P2M_MID_PER_PAGE); -static RESERVE_BRK_ARRAY(unsigned long, p2m_mid_missing_mfn, P2M_MID_PER_PAGE); - -static RESERVE_BRK_ARRAY(unsigned long **, p2m_top, P2M_TOP_PER_PAGE); -static RESERVE_BRK_ARRAY(unsigned long, p2m_top_mfn, P2M_TOP_PER_PAGE); -static RESERVE_BRK_ARRAY(unsigned long *, p2m_top_mfn_p, P2M_TOP_PER_PAGE); - -RESERVE_BRK(p2m_mid, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE))); -RESERVE_BRK(p2m_mid_mfn, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE))); - -static inline unsigned p2m_top_index(unsigned long pfn) -{ -	BUG_ON(pfn >= MAX_P2M_PFN); -	return pfn / (P2M_MID_PER_PAGE * P2M_PER_PAGE); -} - -static inline unsigned p2m_mid_index(unsigned long pfn) -{ -	return (pfn / P2M_PER_PAGE) % P2M_MID_PER_PAGE; -} - -static inline unsigned p2m_index(unsigned long pfn) -{ -	return pfn % P2M_PER_PAGE; -} - -static void p2m_top_init(unsigned long ***top) -{ -	unsigned i; - -	for (i = 0; i < P2M_TOP_PER_PAGE; i++) -		top[i] = p2m_mid_missing; -} - -static void p2m_top_mfn_init(unsigned long *top) -{ -	unsigned i; - -	for (i = 0; i < P2M_TOP_PER_PAGE; i++) -		top[i] = virt_to_mfn(p2m_mid_missing_mfn); -} - -static void p2m_top_mfn_p_init(unsigned long **top) -{ -	unsigned i; - -	for (i = 0; i < P2M_TOP_PER_PAGE; i++) -		top[i] = p2m_mid_missing_mfn; -} - -static void p2m_mid_init(unsigned long **mid) -{ -	unsigned i; - -	for (i = 0; i < P2M_MID_PER_PAGE; i++) -		mid[i] = p2m_missing; -} - -static void p2m_mid_mfn_init(unsigned long *mid) -{ -	unsigned i; - -	for (i = 0; i < P2M_MID_PER_PAGE; i++) -		mid[i] = virt_to_mfn(p2m_missing); -} - -static void p2m_init(unsigned long *p2m) -{ -	unsigned i; - -	for (i = 0; i < P2M_MID_PER_PAGE; i++) -		p2m[i] = INVALID_P2M_ENTRY; -} - -/* - * Build the parallel p2m_top_mfn and p2m_mid_mfn structures - * - * This is called both at boot time, and after resuming from suspend: - * - At boot time we're called very early, and must use extend_brk() - *   to allocate memory. - * - * - After resume we're called from within stop_machine, but the mfn - *   tree should alreay be completely allocated. - */ -void xen_build_mfn_list_list(void) -{ -	unsigned long pfn; - -	/* Pre-initialize p2m_top_mfn to be completely missing */ -	if (p2m_top_mfn == NULL) { -		p2m_mid_missing_mfn = extend_brk(PAGE_SIZE, PAGE_SIZE); -		p2m_mid_mfn_init(p2m_mid_missing_mfn); - -		p2m_top_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE); -		p2m_top_mfn_p_init(p2m_top_mfn_p); - -		p2m_top_mfn = extend_brk(PAGE_SIZE, PAGE_SIZE); -		p2m_top_mfn_init(p2m_top_mfn); -	} else { -		/* Reinitialise, mfn's all change after migration */ -		p2m_mid_mfn_init(p2m_mid_missing_mfn); -	} - -	for (pfn = 0; pfn < xen_max_p2m_pfn; pfn += P2M_PER_PAGE) { -		unsigned topidx = p2m_top_index(pfn); -		unsigned mididx = p2m_mid_index(pfn); -		unsigned long **mid; -		unsigned long *mid_mfn_p; - -		mid = p2m_top[topidx]; -		mid_mfn_p = p2m_top_mfn_p[topidx]; - -		/* Don't bother allocating any mfn mid levels if -		 * they're just missing, just update the stored mfn, -		 * since all could have changed over a migrate. -		 */ -		if (mid == p2m_mid_missing) { -			BUG_ON(mididx); -			BUG_ON(mid_mfn_p != p2m_mid_missing_mfn); -			p2m_top_mfn[topidx] = virt_to_mfn(p2m_mid_missing_mfn); -			pfn += (P2M_MID_PER_PAGE - 1) * P2M_PER_PAGE; -			continue; -		} - -		if (mid_mfn_p == p2m_mid_missing_mfn) { -			/* -			 * XXX boot-time only!  We should never find -			 * missing parts of the mfn tree after -			 * runtime.  extend_brk() will BUG if we call -			 * it too late. -			 */ -			mid_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE); -			p2m_mid_mfn_init(mid_mfn_p); - -			p2m_top_mfn_p[topidx] = mid_mfn_p; -		} - -		p2m_top_mfn[topidx] = virt_to_mfn(mid_mfn_p); -		mid_mfn_p[mididx] = virt_to_mfn(mid[mididx]); -	} -} - -void xen_setup_mfn_list_list(void) -{ -	BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info); - -	HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list = -		virt_to_mfn(p2m_top_mfn); -	HYPERVISOR_shared_info->arch.max_pfn = xen_max_p2m_pfn; -} - -/* Set up p2m_top to point to the domain-builder provided p2m pages */ -void __init xen_build_dynamic_phys_to_machine(void) -{ -	unsigned long *mfn_list = (unsigned long *)xen_start_info->mfn_list; -	unsigned long max_pfn = min(MAX_DOMAIN_PAGES, xen_start_info->nr_pages); -	unsigned long pfn; - -	xen_max_p2m_pfn = max_pfn; - -	p2m_missing = extend_brk(PAGE_SIZE, PAGE_SIZE); -	p2m_init(p2m_missing); - -	p2m_mid_missing = extend_brk(PAGE_SIZE, PAGE_SIZE); -	p2m_mid_init(p2m_mid_missing); - -	p2m_top = extend_brk(PAGE_SIZE, PAGE_SIZE); -	p2m_top_init(p2m_top); - -	/* -	 * The domain builder gives us a pre-constructed p2m array in -	 * mfn_list for all the pages initially given to us, so we just -	 * need to graft that into our tree structure. -	 */ -	for (pfn = 0; pfn < max_pfn; pfn += P2M_PER_PAGE) { -		unsigned topidx = p2m_top_index(pfn); -		unsigned mididx = p2m_mid_index(pfn); - -		if (p2m_top[topidx] == p2m_mid_missing) { -			unsigned long **mid = extend_brk(PAGE_SIZE, PAGE_SIZE); -			p2m_mid_init(mid); - -			p2m_top[topidx] = mid; -		} - -		p2m_top[topidx][mididx] = &mfn_list[pfn]; -	} -} - -unsigned long get_phys_to_machine(unsigned long pfn) -{ -	unsigned topidx, mididx, idx; - -	if (unlikely(pfn >= MAX_P2M_PFN)) -		return INVALID_P2M_ENTRY; - -	topidx = p2m_top_index(pfn); -	mididx = p2m_mid_index(pfn); -	idx = p2m_index(pfn); - -	return p2m_top[topidx][mididx][idx]; -} -EXPORT_SYMBOL_GPL(get_phys_to_machine); - -static void *alloc_p2m_page(void) -{ -	return (void *)__get_free_page(GFP_KERNEL | __GFP_REPEAT); -} - -static void free_p2m_page(void *p) -{ -	free_page((unsigned long)p); -} - -/*  - * Fully allocate the p2m structure for a given pfn.  We need to check - * that both the top and mid levels are allocated, and make sure the - * parallel mfn tree is kept in sync.  We may race with other cpus, so - * the new pages are installed with cmpxchg; if we lose the race then - * simply free the page we allocated and use the one that's there. - */ -static bool alloc_p2m(unsigned long pfn) -{ -	unsigned topidx, mididx; -	unsigned long ***top_p, **mid; -	unsigned long *top_mfn_p, *mid_mfn; - -	topidx = p2m_top_index(pfn); -	mididx = p2m_mid_index(pfn); - -	top_p = &p2m_top[topidx]; -	mid = *top_p; - -	if (mid == p2m_mid_missing) { -		/* Mid level is missing, allocate a new one */ -		mid = alloc_p2m_page(); -		if (!mid) -			return false; - -		p2m_mid_init(mid); - -		if (cmpxchg(top_p, p2m_mid_missing, mid) != p2m_mid_missing) -			free_p2m_page(mid); -	} - -	top_mfn_p = &p2m_top_mfn[topidx]; -	mid_mfn = p2m_top_mfn_p[topidx]; - -	BUG_ON(virt_to_mfn(mid_mfn) != *top_mfn_p); - -	if (mid_mfn == p2m_mid_missing_mfn) { -		/* Separately check the mid mfn level */ -		unsigned long missing_mfn; -		unsigned long mid_mfn_mfn; - -		mid_mfn = alloc_p2m_page(); -		if (!mid_mfn) -			return false; - -		p2m_mid_mfn_init(mid_mfn); - -		missing_mfn = virt_to_mfn(p2m_mid_missing_mfn); -		mid_mfn_mfn = virt_to_mfn(mid_mfn); -		if (cmpxchg(top_mfn_p, missing_mfn, mid_mfn_mfn) != missing_mfn) -			free_p2m_page(mid_mfn); -		else -			p2m_top_mfn_p[topidx] = mid_mfn; -	} - -	if (p2m_top[topidx][mididx] == p2m_missing) { -		/* p2m leaf page is missing */ -		unsigned long *p2m; - -		p2m = alloc_p2m_page(); -		if (!p2m) -			return false; - -		p2m_init(p2m); - -		if (cmpxchg(&mid[mididx], p2m_missing, p2m) != p2m_missing) -			free_p2m_page(p2m); -		else -			mid_mfn[mididx] = virt_to_mfn(p2m); -	} - -	return true; -} - -/* Try to install p2m mapping; fail if intermediate bits missing */ -bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn) -{ -	unsigned topidx, mididx, idx; - -	if (unlikely(pfn >= MAX_P2M_PFN)) { -		BUG_ON(mfn != INVALID_P2M_ENTRY); -		return true; -	} - -	topidx = p2m_top_index(pfn); -	mididx = p2m_mid_index(pfn); -	idx = p2m_index(pfn); - -	if (p2m_top[topidx][mididx] == p2m_missing) -		return mfn == INVALID_P2M_ENTRY; - -	p2m_top[topidx][mididx][idx] = mfn; - -	return true; -} - -bool set_phys_to_machine(unsigned long pfn, unsigned long mfn) -{ -	if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) { -		BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY); -		return true; -	} - -	if (unlikely(!__set_phys_to_machine(pfn, mfn)))  { -		if (!alloc_p2m(pfn)) -			return false; - -		if (!__set_phys_to_machine(pfn, mfn)) -			return false; -	} - -	return true; -} -  unsigned long arbitrary_virt_to_mfn(void *vaddr)  {  	xmaddr_t maddr = arbitrary_virt_to_machine(vaddr); @@ -566,6 +151,7 @@ xmaddr_t arbitrary_virt_to_machine(void *vaddr)  	offset = address & ~PAGE_MASK;  	return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset);  } +EXPORT_SYMBOL_GPL(arbitrary_virt_to_machine);  void make_lowmem_page_readonly(void *vaddr)  { @@ -607,21 +193,18 @@ static bool xen_page_pinned(void *ptr)  	return PagePinned(page);  } -static bool xen_iomap_pte(pte_t pte) -{ -	return pte_flags(pte) & _PAGE_IOMAP; -} -  void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid)  {  	struct multicall_space mcs;  	struct mmu_update *u; +	trace_xen_mmu_set_domain_pte(ptep, pteval, domid); +  	mcs = xen_mc_entry(sizeof(*u));  	u = mcs.args;  	/* ptep might be kmapped when using 32-bit HIGHPTE */ -	u->ptr = arbitrary_virt_to_machine(ptep).maddr; +	u->ptr = virt_to_machine(ptep).maddr;  	u->val = pte_val_ma(pteval);  	MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, domid); @@ -630,11 +213,6 @@ void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid)  }  EXPORT_SYMBOL_GPL(xen_set_domain_pte); -static void xen_set_iomap_pte(pte_t *ptep, pte_t pteval) -{ -	xen_set_domain_pte(ptep, pteval, DOMID_IO); -} -  static void xen_extend_mmu_update(const struct mmu_update *update)  {  	struct multicall_space mcs; @@ -643,27 +221,35 @@ static void xen_extend_mmu_update(const struct mmu_update *update)  	mcs = xen_mc_extend_args(__HYPERVISOR_mmu_update, sizeof(*u));  	if (mcs.mc != NULL) { -		ADD_STATS(mmu_update_extended, 1); -		ADD_STATS(mmu_update_histo[mcs.mc->args[1]], -1); -  		mcs.mc->args[1]++; - -		if (mcs.mc->args[1] < MMU_UPDATE_HISTO) -			ADD_STATS(mmu_update_histo[mcs.mc->args[1]], 1); -		else -			ADD_STATS(mmu_update_histo[0], 1);  	} else { -		ADD_STATS(mmu_update, 1);  		mcs = __xen_mc_entry(sizeof(*u));  		MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_SELF); -		ADD_STATS(mmu_update_histo[1], 1);  	}  	u = mcs.args;  	*u = *update;  } -void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val) +static void xen_extend_mmuext_op(const struct mmuext_op *op) +{ +	struct multicall_space mcs; +	struct mmuext_op *u; + +	mcs = xen_mc_extend_args(__HYPERVISOR_mmuext_op, sizeof(*u)); + +	if (mcs.mc != NULL) { +		mcs.mc->args[1]++; +	} else { +		mcs = __xen_mc_entry(sizeof(*u)); +		MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF); +	} + +	u = mcs.args; +	*u = *op; +} + +static void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)  {  	struct mmu_update u; @@ -676,16 +262,14 @@ void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)  	u.val = pmd_val_ma(val);  	xen_extend_mmu_update(&u); -	ADD_STATS(pmd_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU); -  	xen_mc_issue(PARAVIRT_LAZY_MMU);  	preempt_enable();  } -void xen_set_pmd(pmd_t *ptr, pmd_t val) +static void xen_set_pmd(pmd_t *ptr, pmd_t val)  { -	ADD_STATS(pmd_update, 1); +	trace_xen_mmu_set_pmd(ptr, val);  	/* If page is not pinned, we can just update the entry  	   directly */ @@ -694,8 +278,6 @@ void xen_set_pmd(pmd_t *ptr, pmd_t val)  		return;  	} -	ADD_STATS(pmd_update_pinned, 1); -  	xen_set_pmd_hyper(ptr, val);  } @@ -708,41 +290,60 @@ void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags)  	set_pte_vaddr(vaddr, mfn_pte(mfn, flags));  } -void xen_set_pte_at(struct mm_struct *mm, unsigned long addr, -		    pte_t *ptep, pte_t pteval) +static bool xen_batched_set_pte(pte_t *ptep, pte_t pteval)  { -	if (xen_iomap_pte(pteval)) { -		xen_set_iomap_pte(ptep, pteval); -		goto out; -	} +	struct mmu_update u; -	ADD_STATS(set_pte_at, 1); -//	ADD_STATS(set_pte_at_pinned, xen_page_pinned(ptep)); -	ADD_STATS(set_pte_at_current, mm == current->mm); -	ADD_STATS(set_pte_at_kernel, mm == &init_mm); +	if (paravirt_get_lazy_mode() != PARAVIRT_LAZY_MMU) +		return false; -	if (mm == current->mm || mm == &init_mm) { -		if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) { -			struct multicall_space mcs; -			mcs = xen_mc_entry(0); +	xen_mc_batch(); -			MULTI_update_va_mapping(mcs.mc, addr, pteval, 0); -			ADD_STATS(set_pte_at_batched, 1); -			xen_mc_issue(PARAVIRT_LAZY_MMU); -			goto out; -		} else -			if (HYPERVISOR_update_va_mapping(addr, pteval, 0) == 0) -				goto out; +	u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE; +	u.val = pte_val_ma(pteval); +	xen_extend_mmu_update(&u); + +	xen_mc_issue(PARAVIRT_LAZY_MMU); + +	return true; +} + +static inline void __xen_set_pte(pte_t *ptep, pte_t pteval) +{ +	if (!xen_batched_set_pte(ptep, pteval)) { +		/* +		 * Could call native_set_pte() here and trap and +		 * emulate the PTE write but with 32-bit guests this +		 * needs two traps (one for each of the two 32-bit +		 * words in the PTE) so do one hypercall directly +		 * instead. +		 */ +		struct mmu_update u; + +		u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE; +		u.val = pte_val_ma(pteval); +		HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF);  	} -	xen_set_pte(ptep, pteval); +} + +static void xen_set_pte(pte_t *ptep, pte_t pteval) +{ +	trace_xen_mmu_set_pte(ptep, pteval); +	__xen_set_pte(ptep, pteval); +} -out:	return; +static void xen_set_pte_at(struct mm_struct *mm, unsigned long addr, +		    pte_t *ptep, pte_t pteval) +{ +	trace_xen_mmu_set_pte_at(mm, addr, ptep, pteval); +	__xen_set_pte(ptep, pteval);  }  pte_t xen_ptep_modify_prot_start(struct mm_struct *mm,  				 unsigned long addr, pte_t *ptep)  {  	/* Just return the pte as-is.  We preserve the bits on commit */ +	trace_xen_mmu_ptep_modify_prot_start(mm, addr, ptep, *ptep);  	return *ptep;  } @@ -751,15 +352,13 @@ void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,  {  	struct mmu_update u; +	trace_xen_mmu_ptep_modify_prot_commit(mm, addr, ptep, pte);  	xen_mc_batch(); -	u.ptr = arbitrary_virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD; +	u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD;  	u.val = pte_val_ma(pte);  	xen_extend_mmu_update(&u); -	ADD_STATS(prot_commit, 1); -	ADD_STATS(prot_commit_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU); -  	xen_mc_issue(PARAVIRT_LAZY_MMU);  } @@ -768,8 +367,13 @@ static pteval_t pte_mfn_to_pfn(pteval_t val)  {  	if (val & _PAGE_PRESENT) {  		unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT; +		unsigned long pfn = mfn_to_pfn(mfn); +  		pteval_t flags = val & PTE_FLAGS_MASK; -		val = ((pteval_t)mfn_to_pfn(mfn) << PAGE_SHIFT) | flags; +		if (unlikely(pfn == ~0)) +			val = flags & ~_PAGE_PRESENT; +		else +			val = ((pteval_t)pfn << PAGE_SHIFT) | flags;  	}  	return val; @@ -780,8 +384,12 @@ static pteval_t pte_pfn_to_mfn(pteval_t val)  	if (val & _PAGE_PRESENT) {  		unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;  		pteval_t flags = val & PTE_FLAGS_MASK; -		unsigned long mfn = pfn_to_mfn(pfn); +		unsigned long mfn; +		if (!xen_feature(XENFEAT_auto_translated_physmap)) +			mfn = get_phys_to_machine(pfn); +		else +			mfn = pfn;  		/*  		 * If there's no mfn for the pfn, then just create an  		 * empty non-present pte.  Unfortunately this loses @@ -791,8 +399,18 @@ static pteval_t pte_pfn_to_mfn(pteval_t val)  		if (unlikely(mfn == INVALID_P2M_ENTRY)) {  			mfn = 0;  			flags = 0; +		} else { +			/* +			 * Paramount to do this test _after_ the +			 * INVALID_P2M_ENTRY as INVALID_P2M_ENTRY & +			 * IDENTITY_FRAME_BIT resolves to true. +			 */ +			mfn &= ~FOREIGN_FRAME_BIT; +			if (mfn & IDENTITY_FRAME_BIT) { +				mfn &= ~IDENTITY_FRAME_BIT; +				flags |= _PAGE_IOMAP; +			}  		} -  		val = ((pteval_t)mfn << PAGE_SHIFT) | flags;  	} @@ -813,16 +431,16 @@ static pteval_t iomap_pte(pteval_t val)  	return val;  } -pteval_t xen_pte_val(pte_t pte) +__visible pteval_t xen_pte_val(pte_t pte)  {  	pteval_t pteval = pte.pte; - +#if 0  	/* If this is a WC pte, convert back from Xen WC to Linux WC */  	if ((pteval & (_PAGE_PAT | _PAGE_PCD | _PAGE_PWT)) == _PAGE_PAT) {  		WARN_ON(!pat_enabled);  		pteval = (pteval & ~_PAGE_PAT) | _PAGE_PWT;  	} - +#endif  	if (xen_initial_domain() && (pteval & _PAGE_IOMAP))  		return pteval; @@ -830,7 +448,7 @@ pteval_t xen_pte_val(pte_t pte)  }  PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val); -pgdval_t xen_pgd_val(pgd_t pgd) +__visible pgdval_t xen_pgd_val(pgd_t pgd)  {  	return pte_mfn_to_pfn(pgd.pgd);  } @@ -850,8 +468,8 @@ PV_CALLEE_SAVE_REGS_THUNK(xen_pgd_val);   * 3        PCD PWT      UC       UC     UC   * 4    PAT              WB       WC     WB   * 5    PAT     PWT      WC       WP     WT - * 6    PAT PCD          UC-      UC     UC- - * 7    PAT PCD PWT      UC       UC     UC + * 6    PAT PCD          UC-      rsv    UC- + * 7    PAT PCD PWT      UC       rsv    UC   */  void xen_set_pat(u64 pat) @@ -861,10 +479,10 @@ void xen_set_pat(u64 pat)  	WARN_ON(pat != 0x0007010600070106ull);  } -pte_t xen_make_pte(pteval_t pte) +__visible pte_t xen_make_pte(pteval_t pte)  {  	phys_addr_t addr = (pte & PTE_PFN_MASK); - +#if 0  	/* If Linux is trying to set a WC pte, then map to the Xen WC.  	 * If _PAGE_PAT is set, then it probably means it is really  	 * _PAGE_PSE, so avoid fiddling with the PAT mapping and hope @@ -877,7 +495,7 @@ pte_t xen_make_pte(pteval_t pte)  		if ((pte & (_PAGE_PCD | _PAGE_PWT)) == _PAGE_PWT)  			pte = (pte & ~(_PAGE_PCD | _PAGE_PWT)) | _PAGE_PAT;  	} - +#endif  	/*  	 * Unprivileged domains are allowed to do IOMAPpings for  	 * PCI passthrough, but not map ISA space.  The ISA @@ -896,20 +514,20 @@ pte_t xen_make_pte(pteval_t pte)  }  PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte); -pgd_t xen_make_pgd(pgdval_t pgd) +__visible pgd_t xen_make_pgd(pgdval_t pgd)  {  	pgd = pte_pfn_to_mfn(pgd);  	return native_make_pgd(pgd);  }  PV_CALLEE_SAVE_REGS_THUNK(xen_make_pgd); -pmdval_t xen_pmd_val(pmd_t pmd) +__visible pmdval_t xen_pmd_val(pmd_t pmd)  {  	return pte_mfn_to_pfn(pmd.pmd);  }  PV_CALLEE_SAVE_REGS_THUNK(xen_pmd_val); -void xen_set_pud_hyper(pud_t *ptr, pud_t val) +static void xen_set_pud_hyper(pud_t *ptr, pud_t val)  {  	struct mmu_update u; @@ -922,16 +540,14 @@ void xen_set_pud_hyper(pud_t *ptr, pud_t val)  	u.val = pud_val_ma(val);  	xen_extend_mmu_update(&u); -	ADD_STATS(pud_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU); -  	xen_mc_issue(PARAVIRT_LAZY_MMU);  	preempt_enable();  } -void xen_set_pud(pud_t *ptr, pud_t val) +static void xen_set_pud(pud_t *ptr, pud_t val)  { -	ADD_STATS(pud_update, 1); +	trace_xen_mmu_set_pud(ptr, val);  	/* If page is not pinned, we can just update the entry  	   directly */ @@ -940,56 +556,31 @@ void xen_set_pud(pud_t *ptr, pud_t val)  		return;  	} -	ADD_STATS(pud_update_pinned, 1); -  	xen_set_pud_hyper(ptr, val);  } -void xen_set_pte(pte_t *ptep, pte_t pte) -{ -	if (xen_iomap_pte(pte)) { -		xen_set_iomap_pte(ptep, pte); -		return; -	} - -	ADD_STATS(pte_update, 1); -//	ADD_STATS(pte_update_pinned, xen_page_pinned(ptep)); -	ADD_STATS(pte_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU); - -#ifdef CONFIG_X86_PAE -	ptep->pte_high = pte.pte_high; -	smp_wmb(); -	ptep->pte_low = pte.pte_low; -#else -	*ptep = pte; -#endif -} -  #ifdef CONFIG_X86_PAE -void xen_set_pte_atomic(pte_t *ptep, pte_t pte) +static void xen_set_pte_atomic(pte_t *ptep, pte_t pte)  { -	if (xen_iomap_pte(pte)) { -		xen_set_iomap_pte(ptep, pte); -		return; -	} - +	trace_xen_mmu_set_pte_atomic(ptep, pte);  	set_64bit((u64 *)ptep, native_pte_val(pte));  } -void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) +static void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)  { -	ptep->pte_low = 0; -	smp_wmb();		/* make sure low gets written first */ -	ptep->pte_high = 0; +	trace_xen_mmu_pte_clear(mm, addr, ptep); +	if (!xen_batched_set_pte(ptep, native_make_pte(0))) +		native_pte_clear(mm, addr, ptep);  } -void xen_pmd_clear(pmd_t *pmdp) +static void xen_pmd_clear(pmd_t *pmdp)  { +	trace_xen_mmu_pmd_clear(pmdp);  	set_pmd(pmdp, __pmd(0));  }  #endif	/* CONFIG_X86_PAE */ -pmd_t xen_make_pmd(pmdval_t pmd) +__visible pmd_t xen_make_pmd(pmdval_t pmd)  {  	pmd = pte_pfn_to_mfn(pmd);  	return native_make_pmd(pmd); @@ -997,13 +588,13 @@ pmd_t xen_make_pmd(pmdval_t pmd)  PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd);  #if PAGETABLE_LEVELS == 4 -pudval_t xen_pud_val(pud_t pud) +__visible pudval_t xen_pud_val(pud_t pud)  {  	return pte_mfn_to_pfn(pud.pud);  }  PV_CALLEE_SAVE_REGS_THUNK(xen_pud_val); -pud_t xen_make_pud(pudval_t pud) +__visible pud_t xen_make_pud(pudval_t pud)  {  	pud = pte_pfn_to_mfn(pud); @@ -1011,7 +602,7 @@ pud_t xen_make_pud(pudval_t pud)  }  PV_CALLEE_SAVE_REGS_THUNK(xen_make_pud); -pgd_t *xen_get_user_pgd(pgd_t *pgd) +static pgd_t *xen_get_user_pgd(pgd_t *pgd)  {  	pgd_t *pgd_page = (pgd_t *)(((unsigned long)pgd) & PAGE_MASK);  	unsigned offset = pgd - pgd_page; @@ -1043,7 +634,7 @@ static void __xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)   *  2. It is always pinned   *  3. It has no user pagetable attached to it   */ -void __init xen_set_pgd_hyper(pgd_t *ptr, pgd_t val) +static void __init xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)  {  	preempt_disable(); @@ -1056,11 +647,11 @@ void __init xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)  	preempt_enable();  } -void xen_set_pgd(pgd_t *ptr, pgd_t val) +static void xen_set_pgd(pgd_t *ptr, pgd_t val)  {  	pgd_t *user_ptr = xen_get_user_pgd(ptr); -	ADD_STATS(pgd_update, 1); +	trace_xen_mmu_set_pgd(ptr, user_ptr, val);  	/* If page is not pinned, we can just update the entry  	   directly */ @@ -1073,9 +664,6 @@ void xen_set_pgd(pgd_t *ptr, pgd_t val)  		return;  	} -	ADD_STATS(pgd_update_pinned, 1); -	ADD_STATS(pgd_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU); -  	/* If it's pinned, then we can at least batch the kernel and  	   user updates together. */  	xen_mc_batch(); @@ -1208,8 +796,8 @@ static spinlock_t *xen_pte_lock(struct page *page, struct mm_struct *mm)  {  	spinlock_t *ptl = NULL; -#if USE_SPLIT_PTLOCKS -	ptl = __pte_lockptr(page); +#if USE_SPLIT_PTE_PTLOCKS +	ptl = ptlock_ptr(page);  	spin_lock_nest_lock(ptl, &mm->page_table_lock);  #endif @@ -1224,14 +812,12 @@ static void xen_pte_unlock(void *v)  static void xen_do_pin(unsigned level, unsigned long pfn)  { -	struct mmuext_op *op; -	struct multicall_space mcs; +	struct mmuext_op op; -	mcs = __xen_mc_entry(sizeof(*op)); -	op = mcs.args; -	op->cmd = level; -	op->arg1.mfn = pfn_to_mfn(pfn); -	MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF); +	op.cmd = level; +	op.arg1.mfn = pfn_to_mfn(pfn); + +	xen_extend_mmuext_op(&op);  }  static int xen_pin_page(struct mm_struct *mm, struct page *page, @@ -1299,6 +885,8 @@ static int xen_pin_page(struct mm_struct *mm, struct page *page,     read-only, and can be pinned. */  static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd)  { +	trace_xen_mmu_pgd_pin(mm, pgd); +  	xen_mc_batch();  	if (__xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT)) { @@ -1350,10 +938,9 @@ static void xen_pgd_pin(struct mm_struct *mm)   */  void xen_mm_pin_all(void)  { -	unsigned long flags;  	struct page *page; -	spin_lock_irqsave(&pgd_lock, flags); +	spin_lock(&pgd_lock);  	list_for_each_entry(page, &pgd_list, lru) {  		if (!PagePinned(page)) { @@ -1362,7 +949,7 @@ void xen_mm_pin_all(void)  		}  	} -	spin_unlock_irqrestore(&pgd_lock, flags); +	spin_unlock(&pgd_lock);  }  /* @@ -1370,7 +957,7 @@ void xen_mm_pin_all(void)   * that's before we have page structures to store the bits.  So do all   * the book-keeping now.   */ -static __init int xen_mark_pinned(struct mm_struct *mm, struct page *page, +static int __init xen_mark_pinned(struct mm_struct *mm, struct page *page,  				  enum pt_level level)  {  	SetPagePinned(page); @@ -1425,6 +1012,8 @@ static int xen_unpin_page(struct mm_struct *mm, struct page *page,  /* Release a pagetables pages back as normal RW */  static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd)  { +	trace_xen_mmu_pgd_unpin(mm, pgd); +  	xen_mc_batch();  	xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd))); @@ -1463,10 +1052,9 @@ static void xen_pgd_unpin(struct mm_struct *mm)   */  void xen_mm_unpin_all(void)  { -	unsigned long flags;  	struct page *page; -	spin_lock_irqsave(&pgd_lock, flags); +	spin_lock(&pgd_lock);  	list_for_each_entry(page, &pgd_list, lru) {  		if (PageSavePinned(page)) { @@ -1476,17 +1064,17 @@ void xen_mm_unpin_all(void)  		}  	} -	spin_unlock_irqrestore(&pgd_lock, flags); +	spin_unlock(&pgd_lock);  } -void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next) +static void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next)  {  	spin_lock(&next->page_table_lock);  	xen_pgd_pin(next);  	spin_unlock(&next->page_table_lock);  } -void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm) +static void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)  {  	spin_lock(&mm->page_table_lock);  	xen_pgd_pin(mm); @@ -1502,14 +1090,14 @@ static void drop_other_mm_ref(void *info)  	struct mm_struct *mm = info;  	struct mm_struct *active_mm; -	active_mm = percpu_read(cpu_tlbstate.active_mm); +	active_mm = this_cpu_read(cpu_tlbstate.active_mm); -	if (active_mm == mm) +	if (active_mm == mm && this_cpu_read(cpu_tlbstate.state) != TLBSTATE_OK)  		leave_mm(smp_processor_id());  	/* If this cpu still has a stale cr3 reference, then make sure  	   it has been flushed. */ -	if (percpu_read(xen_current_cr3) == __pa(mm->pgd)) +	if (this_cpu_read(xen_current_cr3) == __pa(mm->pgd))  		load_cr3(swapper_pg_dir);  } @@ -1573,7 +1161,7 @@ static void xen_drop_mm_ref(struct mm_struct *mm)   * pagetable because of lazy tlb flushing.  This means we need need to   * switch all CPUs off this pagetable before we can unpin it.   */ -void xen_exit_mmap(struct mm_struct *mm) +static void xen_exit_mmap(struct mm_struct *mm)  {  	get_cpu();		/* make sure we don't move around */  	xen_drop_mm_ref(mm); @@ -1588,38 +1176,134 @@ void xen_exit_mmap(struct mm_struct *mm)  	spin_unlock(&mm->page_table_lock);  } -static __init void xen_pagetable_setup_start(pgd_t *base) +static void xen_post_allocator_init(void); + +#ifdef CONFIG_X86_64 +static void __init xen_cleanhighmap(unsigned long vaddr, +				    unsigned long vaddr_end)  { +	unsigned long kernel_end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1; +	pmd_t *pmd = level2_kernel_pgt + pmd_index(vaddr); + +	/* NOTE: The loop is more greedy than the cleanup_highmap variant. +	 * We include the PMD passed in on _both_ boundaries. */ +	for (; vaddr <= vaddr_end && (pmd < (level2_kernel_pgt + PAGE_SIZE)); +			pmd++, vaddr += PMD_SIZE) { +		if (pmd_none(*pmd)) +			continue; +		if (vaddr < (unsigned long) _text || vaddr > kernel_end) +			set_pmd(pmd, __pmd(0)); +	} +	/* In case we did something silly, we should crash in this function +	 * instead of somewhere later and be confusing. */ +	xen_mc_flush();  } +static void __init xen_pagetable_p2m_copy(void) +{ +	unsigned long size; +	unsigned long addr; +	unsigned long new_mfn_list; -static void xen_post_allocator_init(void); +	if (xen_feature(XENFEAT_auto_translated_physmap)) +		return; + +	size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long)); + +	new_mfn_list = xen_revector_p2m_tree(); +	/* No memory or already called. */ +	if (!new_mfn_list || new_mfn_list == xen_start_info->mfn_list) +		return; + +	/* using __ka address and sticking INVALID_P2M_ENTRY! */ +	memset((void *)xen_start_info->mfn_list, 0xff, size); + +	/* We should be in __ka space. */ +	BUG_ON(xen_start_info->mfn_list < __START_KERNEL_map); +	addr = xen_start_info->mfn_list; +	/* We roundup to the PMD, which means that if anybody at this stage is +	 * using the __ka address of xen_start_info or xen_start_info->shared_info +	 * they are in going to crash. Fortunatly we have already revectored +	 * in xen_setup_kernel_pagetable and in xen_setup_shared_info. */ +	size = roundup(size, PMD_SIZE); +	xen_cleanhighmap(addr, addr + size); + +	size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long)); +	memblock_free(__pa(xen_start_info->mfn_list), size); +	/* And revector! Bye bye old array */ +	xen_start_info->mfn_list = new_mfn_list; + +	/* At this stage, cleanup_highmap has already cleaned __ka space +	 * from _brk_limit way up to the max_pfn_mapped (which is the end of +	 * the ramdisk). We continue on, erasing PMD entries that point to page +	 * tables - do note that they are accessible at this stage via __va. +	 * For good measure we also round up to the PMD - which means that if +	 * anybody is using __ka address to the initial boot-stack - and try +	 * to use it - they are going to crash. The xen_start_info has been +	 * taken care of already in xen_setup_kernel_pagetable. */ +	addr = xen_start_info->pt_base; +	size = roundup(xen_start_info->nr_pt_frames * PAGE_SIZE, PMD_SIZE); + +	xen_cleanhighmap(addr, addr + size); +	xen_start_info->pt_base = (unsigned long)__va(__pa(xen_start_info->pt_base)); +#ifdef DEBUG +	/* This is superflous and is not neccessary, but you know what +	 * lets do it. The MODULES_VADDR -> MODULES_END should be clear of +	 * anything at this stage. */ +	xen_cleanhighmap(MODULES_VADDR, roundup(MODULES_VADDR, PUD_SIZE) - 1); +#endif +} +#endif -static __init void xen_pagetable_setup_done(pgd_t *base) +static void __init xen_pagetable_init(void)  { +	paging_init();  	xen_setup_shared_info(); +#ifdef CONFIG_X86_64 +	xen_pagetable_p2m_copy(); +#endif  	xen_post_allocator_init();  } -  static void xen_write_cr2(unsigned long cr2)  { -	percpu_read(xen_vcpu)->arch.cr2 = cr2; +	this_cpu_read(xen_vcpu)->arch.cr2 = cr2;  }  static unsigned long xen_read_cr2(void)  { -	return percpu_read(xen_vcpu)->arch.cr2; +	return this_cpu_read(xen_vcpu)->arch.cr2;  }  unsigned long xen_read_cr2_direct(void)  { -	return percpu_read(xen_vcpu_info.arch.cr2); +	return this_cpu_read(xen_vcpu_info.arch.cr2);  } +void xen_flush_tlb_all(void) +{ +	struct mmuext_op *op; +	struct multicall_space mcs; + +	trace_xen_mmu_flush_tlb_all(0); + +	preempt_disable(); + +	mcs = xen_mc_entry(sizeof(*op)); + +	op = mcs.args; +	op->cmd = MMUEXT_TLB_FLUSH_ALL; +	MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF); + +	xen_mc_issue(PARAVIRT_LAZY_MMU); + +	preempt_enable(); +}  static void xen_flush_tlb(void)  {  	struct mmuext_op *op;  	struct multicall_space mcs; +	trace_xen_mmu_flush_tlb(0); +  	preempt_disable();  	mcs = xen_mc_entry(sizeof(*op)); @@ -1638,6 +1322,8 @@ static void xen_flush_tlb_single(unsigned long addr)  	struct mmuext_op *op;  	struct multicall_space mcs; +	trace_xen_mmu_flush_tlb_single(addr); +  	preempt_disable();  	mcs = xen_mc_entry(sizeof(*op)); @@ -1652,14 +1338,21 @@ static void xen_flush_tlb_single(unsigned long addr)  }  static void xen_flush_tlb_others(const struct cpumask *cpus, -				 struct mm_struct *mm, unsigned long va) +				 struct mm_struct *mm, unsigned long start, +				 unsigned long end)  {  	struct {  		struct mmuext_op op; +#ifdef CONFIG_SMP +		DECLARE_BITMAP(mask, num_processors); +#else  		DECLARE_BITMAP(mask, NR_CPUS); +#endif  	} *args;  	struct multicall_space mcs; +	trace_xen_mmu_flush_tlb_others(cpus, mm, start, end); +  	if (cpumask_empty(cpus))  		return;		/* nothing to do */ @@ -1671,11 +1364,10 @@ static void xen_flush_tlb_others(const struct cpumask *cpus,  	cpumask_and(to_cpumask(args->mask), cpus, cpu_online_mask);  	cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask)); -	if (va == TLB_FLUSH_ALL) { -		args->op.cmd = MMUEXT_TLB_FLUSH_MULTI; -	} else { +	args->op.cmd = MMUEXT_TLB_FLUSH_MULTI; +	if (end != TLB_FLUSH_ALL && (end - start) <= PAGE_SIZE) {  		args->op.cmd = MMUEXT_INVLPG_MULTI; -		args->op.arg1.linear_addr = va; +		args->op.arg1.linear_addr = start;  	}  	MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF); @@ -1685,20 +1377,21 @@ static void xen_flush_tlb_others(const struct cpumask *cpus,  static unsigned long xen_read_cr3(void)  { -	return percpu_read(xen_cr3); +	return this_cpu_read(xen_cr3);  }  static void set_current_cr3(void *v)  { -	percpu_write(xen_current_cr3, (unsigned long)v); +	this_cpu_write(xen_current_cr3, (unsigned long)v);  }  static void __xen_write_cr3(bool kernel, unsigned long cr3)  { -	struct mmuext_op *op; -	struct multicall_space mcs; +	struct mmuext_op op;  	unsigned long mfn; +	trace_xen_mmu_write_cr3(kernel, cr3); +  	if (cr3)  		mfn = pfn_to_mfn(PFN_DOWN(cr3));  	else @@ -1706,23 +1399,19 @@ static void __xen_write_cr3(bool kernel, unsigned long cr3)  	WARN_ON(mfn == 0 && kernel); -	mcs = __xen_mc_entry(sizeof(*op)); - -	op = mcs.args; -	op->cmd = kernel ? MMUEXT_NEW_BASEPTR : MMUEXT_NEW_USER_BASEPTR; -	op->arg1.mfn = mfn; +	op.cmd = kernel ? MMUEXT_NEW_BASEPTR : MMUEXT_NEW_USER_BASEPTR; +	op.arg1.mfn = mfn; -	MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF); +	xen_extend_mmuext_op(&op);  	if (kernel) { -		percpu_write(xen_cr3, cr3); +		this_cpu_write(xen_cr3, cr3);  		/* Update xen_current_cr3 once the batch has actually  		   been submitted. */  		xen_mc_callback(set_current_cr3, (void *)cr3);  	}  } -  static void xen_write_cr3(unsigned long cr3)  {  	BUG_ON(preemptible()); @@ -1731,7 +1420,7 @@ static void xen_write_cr3(unsigned long cr3)  	/* Update while interrupts are disabled, so its atomic with  	   respect to ipis */ -	percpu_write(xen_cr3, cr3); +	this_cpu_write(xen_cr3, cr3);  	__xen_write_cr3(true, cr3); @@ -1748,6 +1437,43 @@ static void xen_write_cr3(unsigned long cr3)  	xen_mc_issue(PARAVIRT_LAZY_CPU);  /* interrupts restored */  } +#ifdef CONFIG_X86_64 +/* + * At the start of the day - when Xen launches a guest, it has already + * built pagetables for the guest. We diligently look over them + * in xen_setup_kernel_pagetable and graft as appropiate them in the + * init_level4_pgt and its friends. Then when we are happy we load + * the new init_level4_pgt - and continue on. + * + * The generic code starts (start_kernel) and 'init_mem_mapping' sets + * up the rest of the pagetables. When it has completed it loads the cr3. + * N.B. that baremetal would start at 'start_kernel' (and the early + * #PF handler would create bootstrap pagetables) - so we are running + * with the same assumptions as what to do when write_cr3 is executed + * at this point. + * + * Since there are no user-page tables at all, we have two variants + * of xen_write_cr3 - the early bootup (this one), and the late one + * (xen_write_cr3). The reason we have to do that is that in 64-bit + * the Linux kernel and user-space are both in ring 3 while the + * hypervisor is in ring 0. + */ +static void __init xen_write_cr3_init(unsigned long cr3) +{ +	BUG_ON(preemptible()); + +	xen_mc_batch();  /* disables interrupts */ + +	/* Update while interrupts are disabled, so its atomic with +	   respect to ipis */ +	this_cpu_write(xen_cr3, cr3); + +	__xen_write_cr3(true, cr3); + +	xen_mc_issue(PARAVIRT_LAZY_CPU);  /* interrupts restored */ +} +#endif +  static int xen_pgd_alloc(struct mm_struct *mm)  {  	pgd_t *pgd = mm->pgd; @@ -1768,7 +1494,7 @@ static int xen_pgd_alloc(struct mm_struct *mm)  		page->private = (unsigned long)user_pgd;  		if (user_pgd != NULL) { -			user_pgd[pgd_index(VSYSCALL_START)] = +			user_pgd[pgd_index(VSYSCALL_ADDR)] =  				__pgd(__pa(level3_user_vsyscall) | _PAGE_TABLE);  			ret = 0;  		} @@ -1790,36 +1516,45 @@ static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)  #endif  } -static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte) -{ -	unsigned long pfn = pte_pfn(pte); -  #ifdef CONFIG_X86_32 +static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte) +{  	/* If there's an existing pte, then don't allow _PAGE_RW to be set */  	if (pte_val_ma(*ptep) & _PAGE_PRESENT)  		pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &  			       pte_val_ma(pte)); -#endif - -	/* -	 * If the new pfn is within the range of the newly allocated -	 * kernel pagetable, and it isn't being mapped into an -	 * early_ioremap fixmap slot, make sure it is RO. -	 */ -	if (!is_early_ioremap_ptep(ptep) && -	    pfn >= e820_table_start && pfn < e820_table_end) -		pte = pte_wrprotect(pte);  	return pte;  } +#else /* CONFIG_X86_64 */ +static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte) +{ +	return pte; +} +#endif /* CONFIG_X86_64 */ -/* Init-time set_pte while constructing initial pagetables, which -   doesn't allow RO pagetable pages to be remapped RW */ -static __init void xen_set_pte_init(pte_t *ptep, pte_t pte) +/* + * Init-time set_pte while constructing initial pagetables, which + * doesn't allow RO page table pages to be remapped RW. + * + * If there is no MFN for this PFN then this page is initially + * ballooned out so clear the PTE (as in decrease_reservation() in + * drivers/xen/balloon.c). + * + * Many of these PTE updates are done on unpinned and writable pages + * and doing a hypercall for these is unnecessary and expensive.  At + * this point it is not possible to tell if a page is pinned or not, + * so always write the PTE directly and rely on Xen trapping and + * emulating any updates as necessary. + */ +static void __init xen_set_pte_init(pte_t *ptep, pte_t pte)  { -	pte = mask_rw_pte(ptep, pte); +	if (pte_mfn(pte) != INVALID_P2M_ENTRY) +		pte = mask_rw_pte(ptep, pte); +	else +		pte = __pte_ma(0); -	xen_set_pte(ptep, pte); +	native_set_pte(ptep, pte);  }  static void pin_pagetable_pfn(unsigned cmd, unsigned long pfn) @@ -1833,7 +1568,7 @@ static void pin_pagetable_pfn(unsigned cmd, unsigned long pfn)  /* Early in boot, while setting up the initial pagetable, assume     everything is pinned. */ -static __init void xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn) +static void __init xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn)  {  #ifdef CONFIG_FLATMEM  	BUG_ON(mem_map);	/* should only be used early */ @@ -1843,7 +1578,7 @@ static __init void xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn)  }  /* Used for pmd and pud */ -static __init void xen_alloc_pmd_init(struct mm_struct *mm, unsigned long pfn) +static void __init xen_alloc_pmd_init(struct mm_struct *mm, unsigned long pfn)  {  #ifdef CONFIG_FLATMEM  	BUG_ON(mem_map);	/* should only be used early */ @@ -1853,30 +1588,63 @@ static __init void xen_alloc_pmd_init(struct mm_struct *mm, unsigned long pfn)  /* Early release_pte assumes that all pts are pinned, since there's     only init_mm and anything attached to that is pinned. */ -static __init void xen_release_pte_init(unsigned long pfn) +static void __init xen_release_pte_init(unsigned long pfn)  {  	pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);  	make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));  } -static __init void xen_release_pmd_init(unsigned long pfn) +static void __init xen_release_pmd_init(unsigned long pfn)  {  	make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));  } +static inline void __pin_pagetable_pfn(unsigned cmd, unsigned long pfn) +{ +	struct multicall_space mcs; +	struct mmuext_op *op; + +	mcs = __xen_mc_entry(sizeof(*op)); +	op = mcs.args; +	op->cmd = cmd; +	op->arg1.mfn = pfn_to_mfn(pfn); + +	MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF); +} + +static inline void __set_pfn_prot(unsigned long pfn, pgprot_t prot) +{ +	struct multicall_space mcs; +	unsigned long addr = (unsigned long)__va(pfn << PAGE_SHIFT); + +	mcs = __xen_mc_entry(0); +	MULTI_update_va_mapping(mcs.mc, (unsigned long)addr, +				pfn_pte(pfn, prot), 0); +} +  /* This needs to make sure the new pte page is pinned iff its being     attached to a pinned pagetable. */ -static void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn, unsigned level) +static inline void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn, +				    unsigned level)  { -	struct page *page = pfn_to_page(pfn); +	bool pinned = PagePinned(virt_to_page(mm->pgd)); + +	trace_xen_mmu_alloc_ptpage(mm, pfn, level, pinned); + +	if (pinned) { +		struct page *page = pfn_to_page(pfn); -	if (PagePinned(virt_to_page(mm->pgd))) {  		SetPagePinned(page);  		if (!PageHighMem(page)) { -			make_lowmem_page_readonly(__va(PFN_PHYS((unsigned long)pfn))); -			if (level == PT_PTE && USE_SPLIT_PTLOCKS) -				pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn); +			xen_mc_batch(); + +			__set_pfn_prot(pfn, PAGE_KERNEL_RO); + +			if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS) +				__pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn); + +			xen_mc_issue(PARAVIRT_LAZY_MMU);  		} else {  			/* make sure there are no stray mappings of  			   this page */ @@ -1896,15 +1664,23 @@ static void xen_alloc_pmd(struct mm_struct *mm, unsigned long pfn)  }  /* This should never happen until we're OK to use struct page */ -static void xen_release_ptpage(unsigned long pfn, unsigned level) +static inline void xen_release_ptpage(unsigned long pfn, unsigned level)  {  	struct page *page = pfn_to_page(pfn); +	bool pinned = PagePinned(page); -	if (PagePinned(page)) { +	trace_xen_mmu_release_ptpage(pfn, level, pinned); + +	if (pinned) {  		if (!PageHighMem(page)) { -			if (level == PT_PTE && USE_SPLIT_PTLOCKS) -				pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn); -			make_lowmem_page_readwrite(__va(PFN_PHYS(pfn))); +			xen_mc_batch(); + +			if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS) +				__pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn); + +			__set_pfn_prot(pfn, PAGE_KERNEL); + +			xen_mc_issue(PARAVIRT_LAZY_MMU);  		}  		ClearPagePinned(page);  	} @@ -1976,16 +1752,24 @@ static void *m2v(phys_addr_t maddr)  }  /* Set the page permissions on an identity-mapped pages */ -static void set_page_prot(void *addr, pgprot_t prot) +static void set_page_prot_flags(void *addr, pgprot_t prot, unsigned long flags)  {  	unsigned long pfn = __pa(addr) >> PAGE_SHIFT;  	pte_t pte = pfn_pte(pfn, prot); -	if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, 0)) +	/* For PVH no need to set R/O or R/W to pin them or unpin them. */ +	if (xen_feature(XENFEAT_auto_translated_physmap)) +		return; + +	if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, flags))  		BUG();  } - -static __init void xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn) +static void set_page_prot(void *addr, pgprot_t prot) +{ +	return set_page_prot_flags(addr, prot, UVMF_NONE); +} +#ifdef CONFIG_X86_32 +static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)  {  	unsigned pmdidx, pteidx;  	unsigned ident_pte; @@ -2017,8 +1801,10 @@ static __init void xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)  		for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) {  			pte_t pte; +#ifdef CONFIG_X86_32  			if (pfn > max_pfn_mapped)  				max_pfn_mapped = pfn; +#endif  			if (!pte_none(pte_page[pteidx]))  				continue; @@ -2033,6 +1819,22 @@ static __init void xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)  	set_page_prot(pmd, PAGE_KERNEL_RO);  } +#endif +void __init xen_setup_machphys_mapping(void) +{ +	struct xen_machphys_mapping mapping; + +	if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) { +		machine_to_phys_mapping = (unsigned long *)mapping.v_start; +		machine_to_phys_nr = mapping.max_mfn + 1; +	} else { +		machine_to_phys_nr = MACH2PHYS_NR_ENTRIES; +	} +#ifdef CONFIG_X86_32 +	WARN_ON((machine_to_phys_mapping + (machine_to_phys_nr - 1)) +		< machine_to_phys_mapping); +#endif +}  #ifdef CONFIG_X86_64  static void convert_pfn_mfn(void *v) @@ -2045,9 +1847,22 @@ static void convert_pfn_mfn(void *v)  	for (i = 0; i < PTRS_PER_PTE; i++)  		pte[i] = xen_make_pte(pte[i].pte);  } - +static void __init check_pt_base(unsigned long *pt_base, unsigned long *pt_end, +				 unsigned long addr) +{ +	if (*pt_base == PFN_DOWN(__pa(addr))) { +		set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG); +		clear_page((void *)addr); +		(*pt_base)++; +	} +	if (*pt_end == PFN_DOWN(__pa(addr))) { +		set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG); +		clear_page((void *)addr); +		(*pt_end)--; +	} +}  /* - * Set up the inital kernel pagetable. + * Set up the initial kernel pagetable.   *   * We can construct this by grafting the Xen provided pagetable into   * head_64.S's preconstructed pagetables.  We copy the Xen L2's into @@ -2056,107 +1871,177 @@ static void convert_pfn_mfn(void *v)   * but that's enough to get __va working.  We need to fill in the rest   * of the physical mapping once some sort of allocator has been set   * up. + * NOTE: for PVH, the page tables are native.   */ -__init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, -					 unsigned long max_pfn) +void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)  {  	pud_t *l3;  	pmd_t *l2; +	unsigned long addr[3]; +	unsigned long pt_base, pt_end; +	unsigned i; + +	/* max_pfn_mapped is the last pfn mapped in the initial memory +	 * mappings. Considering that on Xen after the kernel mappings we +	 * have the mappings of some pages that don't exist in pfn space, we +	 * set max_pfn_mapped to the last real pfn mapped. */ +	max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list)); + +	pt_base = PFN_DOWN(__pa(xen_start_info->pt_base)); +	pt_end = pt_base + xen_start_info->nr_pt_frames;  	/* Zap identity mapping */  	init_level4_pgt[0] = __pgd(0); -	/* Pre-constructed entries are in pfn, so convert to mfn */ -	convert_pfn_mfn(init_level4_pgt); -	convert_pfn_mfn(level3_ident_pgt); -	convert_pfn_mfn(level3_kernel_pgt); - +	if (!xen_feature(XENFEAT_auto_translated_physmap)) { +		/* Pre-constructed entries are in pfn, so convert to mfn */ +		/* L4[272] -> level3_ident_pgt +		 * L4[511] -> level3_kernel_pgt */ +		convert_pfn_mfn(init_level4_pgt); + +		/* L3_i[0] -> level2_ident_pgt */ +		convert_pfn_mfn(level3_ident_pgt); +		/* L3_k[510] -> level2_kernel_pgt +		 * L3_i[511] -> level2_fixmap_pgt */ +		convert_pfn_mfn(level3_kernel_pgt); +	} +	/* We get [511][511] and have Xen's version of level2_kernel_pgt */  	l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);  	l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud); -	memcpy(level2_ident_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD); -	memcpy(level2_kernel_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD); - +	addr[0] = (unsigned long)pgd; +	addr[1] = (unsigned long)l3; +	addr[2] = (unsigned long)l2; +	/* Graft it onto L4[272][0]. Note that we creating an aliasing problem: +	 * Both L4[272][0] and L4[511][511] have entries that point to the same +	 * L2 (PMD) tables. Meaning that if you modify it in __va space +	 * it will be also modified in the __ka space! (But if you just +	 * modify the PMD table to point to other PTE's or none, then you +	 * are OK - which is what cleanup_highmap does) */ +	copy_page(level2_ident_pgt, l2); +	/* Graft it onto L4[511][511] */ +	copy_page(level2_kernel_pgt, l2); + +	/* Get [511][510] and graft that in level2_fixmap_pgt */  	l3 = m2v(pgd[pgd_index(__START_KERNEL_map + PMD_SIZE)].pgd);  	l2 = m2v(l3[pud_index(__START_KERNEL_map + PMD_SIZE)].pud); -	memcpy(level2_fixmap_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD); - -	/* Set up identity map */ -	xen_map_identity_early(level2_ident_pgt, max_pfn); +	copy_page(level2_fixmap_pgt, l2); +	/* Note that we don't do anything with level1_fixmap_pgt which +	 * we don't need. */ +	if (!xen_feature(XENFEAT_auto_translated_physmap)) { +		/* Make pagetable pieces RO */ +		set_page_prot(init_level4_pgt, PAGE_KERNEL_RO); +		set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO); +		set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO); +		set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO); +		set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO); +		set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO); +		set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO); + +		/* Pin down new L4 */ +		pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE, +				  PFN_DOWN(__pa_symbol(init_level4_pgt))); + +		/* Unpin Xen-provided one */ +		pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd))); -	/* Make pagetable pieces RO */ -	set_page_prot(init_level4_pgt, PAGE_KERNEL_RO); -	set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO); -	set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO); -	set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO); -	set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO); -	set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO); +		/* +		 * At this stage there can be no user pgd, and no page +		 * structure to attach it to, so make sure we just set kernel +		 * pgd. +		 */ +		xen_mc_batch(); +		__xen_write_cr3(true, __pa(init_level4_pgt)); +		xen_mc_issue(PARAVIRT_LAZY_CPU); +	} else +		native_write_cr3(__pa(init_level4_pgt)); + +	/* We can't that easily rip out L3 and L2, as the Xen pagetables are +	 * set out this way: [L4], [L1], [L2], [L3], [L1], [L1] ...  for +	 * the initial domain. For guests using the toolstack, they are in: +	 * [L4], [L3], [L2], [L1], [L1], order .. So for dom0 we can only +	 * rip out the [L4] (pgd), but for guests we shave off three pages. +	 */ +	for (i = 0; i < ARRAY_SIZE(addr); i++) +		check_pt_base(&pt_base, &pt_end, addr[i]); -	/* Pin down new L4 */ -	pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE, -			  PFN_DOWN(__pa_symbol(init_level4_pgt))); +	/* Our (by three pages) smaller Xen pagetable that we are using */ +	memblock_reserve(PFN_PHYS(pt_base), (pt_end - pt_base) * PAGE_SIZE); +	/* Revector the xen_start_info */ +	xen_start_info = (struct start_info *)__va(__pa(xen_start_info)); +} +#else	/* !CONFIG_X86_64 */ +static RESERVE_BRK_ARRAY(pmd_t, initial_kernel_pmd, PTRS_PER_PMD); +static RESERVE_BRK_ARRAY(pmd_t, swapper_kernel_pmd, PTRS_PER_PMD); -	/* Unpin Xen-provided one */ -	pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd))); +static void __init xen_write_cr3_init(unsigned long cr3) +{ +	unsigned long pfn = PFN_DOWN(__pa(swapper_pg_dir)); -	/* Switch over */ -	pgd = init_level4_pgt; +	BUG_ON(read_cr3() != __pa(initial_page_table)); +	BUG_ON(cr3 != __pa(swapper_pg_dir));  	/* -	 * At this stage there can be no user pgd, and no page -	 * structure to attach it to, so make sure we just set kernel -	 * pgd. +	 * We are switching to swapper_pg_dir for the first time (from +	 * initial_page_table) and therefore need to mark that page +	 * read-only and then pin it. +	 * +	 * Xen disallows sharing of kernel PMDs for PAE +	 * guests. Therefore we must copy the kernel PMD from +	 * initial_page_table into a new kernel PMD to be used in +	 * swapper_pg_dir.  	 */ -	xen_mc_batch(); -	__xen_write_cr3(true, __pa(pgd)); -	xen_mc_issue(PARAVIRT_LAZY_CPU); +	swapper_kernel_pmd = +		extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE); +	copy_page(swapper_kernel_pmd, initial_kernel_pmd); +	swapper_pg_dir[KERNEL_PGD_BOUNDARY] = +		__pgd(__pa(swapper_kernel_pmd) | _PAGE_PRESENT); +	set_page_prot(swapper_kernel_pmd, PAGE_KERNEL_RO); + +	set_page_prot(swapper_pg_dir, PAGE_KERNEL_RO); +	xen_write_cr3(cr3); +	pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, pfn); -	memblock_x86_reserve_range(__pa(xen_start_info->pt_base), -		      __pa(xen_start_info->pt_base + -			   xen_start_info->nr_pt_frames * PAGE_SIZE), -		      "XEN PAGETABLES"); +	pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, +			  PFN_DOWN(__pa(initial_page_table))); +	set_page_prot(initial_page_table, PAGE_KERNEL); +	set_page_prot(initial_kernel_pmd, PAGE_KERNEL); -	return pgd; +	pv_mmu_ops.write_cr3 = &xen_write_cr3;  } -#else	/* !CONFIG_X86_64 */ -static RESERVE_BRK_ARRAY(pmd_t, level2_kernel_pgt, PTRS_PER_PMD); -__init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, -					 unsigned long max_pfn) +void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)  {  	pmd_t *kernel_pmd; -	level2_kernel_pgt = extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE); +	initial_kernel_pmd = +		extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);  	max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) +  				  xen_start_info->nr_pt_frames * PAGE_SIZE +  				  512*1024);  	kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd); -	memcpy(level2_kernel_pgt, kernel_pmd, sizeof(pmd_t) * PTRS_PER_PMD); +	copy_page(initial_kernel_pmd, kernel_pmd); -	xen_map_identity_early(level2_kernel_pgt, max_pfn); +	xen_map_identity_early(initial_kernel_pmd, max_pfn); -	memcpy(swapper_pg_dir, pgd, sizeof(pgd_t) * PTRS_PER_PGD); -	set_pgd(&swapper_pg_dir[KERNEL_PGD_BOUNDARY], -			__pgd(__pa(level2_kernel_pgt) | _PAGE_PRESENT)); +	copy_page(initial_page_table, pgd); +	initial_page_table[KERNEL_PGD_BOUNDARY] = +		__pgd(__pa(initial_kernel_pmd) | _PAGE_PRESENT); -	set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO); -	set_page_prot(swapper_pg_dir, PAGE_KERNEL_RO); +	set_page_prot(initial_kernel_pmd, PAGE_KERNEL_RO); +	set_page_prot(initial_page_table, PAGE_KERNEL_RO);  	set_page_prot(empty_zero_page, PAGE_KERNEL_RO);  	pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd))); -	xen_write_cr3(__pa(swapper_pg_dir)); - -	pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(swapper_pg_dir))); - -	memblock_x86_reserve_range(__pa(xen_start_info->pt_base), -		      __pa(xen_start_info->pt_base + -			   xen_start_info->nr_pt_frames * PAGE_SIZE), -		      "XEN PAGETABLES"); +	pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, +			  PFN_DOWN(__pa(initial_page_table))); +	xen_write_cr3(__pa(initial_page_table)); -	return swapper_pg_dir; +	memblock_reserve(__pa(xen_start_info->pt_base), +			 xen_start_info->nr_pt_frames * PAGE_SIZE);  }  #endif	/* CONFIG_X86_64 */ @@ -2170,17 +2055,14 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)  	switch (idx) {  	case FIX_BTMAP_END ... FIX_BTMAP_BEGIN: -#ifdef CONFIG_X86_F00F_BUG -	case FIX_F00F_IDT: -#endif +	case FIX_RO_IDT:  #ifdef CONFIG_X86_32  	case FIX_WP_TEST: -	case FIX_VDSO:  # ifdef CONFIG_HIGHMEM  	case FIX_KMAP_BEGIN ... FIX_KMAP_END:  # endif  #else -	case VSYSCALL_LAST_PAGE ... VSYSCALL_FIRST_PAGE: +	case VSYSCALL_PAGE:  #endif  	case FIX_TEXT_POKE0:  	case FIX_TEXT_POKE1: @@ -2221,38 +2103,18 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)  #ifdef CONFIG_X86_64  	/* Replicate changes to map the vsyscall page into the user  	   pagetable vsyscall mapping. */ -	if (idx >= VSYSCALL_LAST_PAGE && idx <= VSYSCALL_FIRST_PAGE) { +	if (idx == VSYSCALL_PAGE) {  		unsigned long vaddr = __fix_to_virt(idx);  		set_pte_vaddr_pud(level3_user_vsyscall, vaddr, pte);  	}  #endif  } -__init void xen_ident_map_ISA(void) +static void __init xen_post_allocator_init(void)  { -	unsigned long pa; - -	/* -	 * If we're dom0, then linear map the ISA machine addresses into -	 * the kernel's address space. -	 */ -	if (!xen_initial_domain()) +	if (xen_feature(XENFEAT_auto_translated_physmap))  		return; -	xen_raw_printk("Xen: setup ISA identity maps\n"); - -	for (pa = ISA_START_ADDRESS; pa < ISA_END_ADDRESS; pa += PAGE_SIZE) { -		pte_t pte = mfn_pte(PFN_DOWN(pa), PAGE_KERNEL_IO); - -		if (HYPERVISOR_update_va_mapping(PAGE_OFFSET + pa, pte, 0)) -			BUG(); -	} - -	xen_flush_tlb(); -} - -static __init void xen_post_allocator_init(void) -{  	pv_mmu_ops.set_pte = xen_set_pte;  	pv_mmu_ops.set_pmd = xen_set_pmd;  	pv_mmu_ops.set_pud = xen_set_pud; @@ -2272,6 +2134,7 @@ static __init void xen_post_allocator_init(void)  #endif  #ifdef CONFIG_X86_64 +	pv_mmu_ops.write_cr3 = &xen_write_cr3;  	SetPagePinned(virt_to_page(level3_user_vsyscall));  #endif  	xen_mark_init_mm_pinned(); @@ -2285,12 +2148,12 @@ static void xen_leave_lazy_mmu(void)  	preempt_enable();  } -static const struct pv_mmu_ops xen_mmu_ops __initdata = { +static const struct pv_mmu_ops xen_mmu_ops __initconst = {  	.read_cr2 = xen_read_cr2,  	.write_cr2 = xen_write_cr2,  	.read_cr3 = xen_read_cr3, -	.write_cr3 = xen_write_cr3, +	.write_cr3 = xen_write_cr3_init,  	.flush_tlb_user = xen_flush_tlb,  	.flush_tlb_kernel = xen_flush_tlb, @@ -2347,6 +2210,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = {  	.lazy_mode = {  		.enter = paravirt_enter_lazy_mmu,  		.leave = xen_leave_lazy_mmu, +		.flush = paravirt_flush_lazy_mmu,  	},  	.set_fixmap = xen_set_fixmap, @@ -2354,11 +2218,17 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = {  void __init xen_init_mmu_ops(void)  { -	x86_init.paging.pagetable_setup_start = xen_pagetable_setup_start; -	x86_init.paging.pagetable_setup_done = xen_pagetable_setup_done; -	pv_mmu_ops = xen_mmu_ops; +	x86_init.paging.pagetable_init = xen_pagetable_init; -	vmap_lazy_unmap = false; +	/* Optimization - we can use the HVM one but it has no idea which +	 * VCPUs are descheduled - which means that it will needlessly IPI +	 * them. Xen knows so let it do the job. +	 */ +	if (xen_feature(XENFEAT_auto_translated_physmap)) { +		pv_mmu_ops.flush_tlb_others = xen_flush_tlb_others; +		return; +	} +	pv_mmu_ops = xen_mmu_ops;  	memset(dummy_mapping, 0xff, PAGE_SIZE);  } @@ -2383,7 +2253,7 @@ static void xen_zap_pfn_range(unsigned long vaddr, unsigned int order,  			in_frames[i] = virt_to_mfn(vaddr);  		MULTI_update_va_mapping(mcs.mc, vaddr, VOID_PTE, 0); -		set_phys_to_machine(virt_to_pfn(vaddr), INVALID_P2M_ENTRY); +		__set_phys_to_machine(virt_to_pfn(vaddr), INVALID_P2M_ENTRY);  		if (out_frames)  			out_frames[i] = virt_to_pfn(vaddr); @@ -2479,12 +2349,14 @@ static int xen_exchange_memory(unsigned long extents_in, unsigned int order_in,  	return success;  } -int xen_create_contiguous_region(unsigned long vstart, unsigned int order, -				 unsigned int address_bits) +int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order, +				 unsigned int address_bits, +				 dma_addr_t *dma_handle)  {  	unsigned long *in_frames = discontig_frames, out_frame;  	unsigned long  flags;  	int            success; +	unsigned long vstart = (unsigned long)phys_to_virt(pstart);  	/*  	 * Currently an auto-translated guest will not perform I/O, nor will @@ -2519,15 +2391,17 @@ int xen_create_contiguous_region(unsigned long vstart, unsigned int order,  	spin_unlock_irqrestore(&xen_reservation_lock, flags); +	*dma_handle = virt_to_machine(vstart).maddr;  	return success ? 0 : -ENOMEM;  }  EXPORT_SYMBOL_GPL(xen_create_contiguous_region); -void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order) +void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order)  {  	unsigned long *out_frames = discontig_frames, in_frame;  	unsigned long  flags;  	int success; +	unsigned long vstart;  	if (xen_feature(XENFEAT_auto_translated_physmap))  		return; @@ -2535,6 +2409,7 @@ void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order)  	if (unlikely(order > MAX_CONTIG_ORDER))  		return; +	vstart = (unsigned long)phys_to_virt(pstart);  	memset((void *) vstart, 0, PAGE_SIZE << order);  	spin_lock_irqsave(&xen_reservation_lock, flags); @@ -2560,6 +2435,43 @@ void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order)  EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);  #ifdef CONFIG_XEN_PVHVM +#ifdef CONFIG_PROC_VMCORE +/* + * This function is used in two contexts: + * - the kdump kernel has to check whether a pfn of the crashed kernel + *   was a ballooned page. vmcore is using this function to decide + *   whether to access a pfn of the crashed kernel. + * - the kexec kernel has to check whether a pfn was ballooned by the + *   previous kernel. If the pfn is ballooned, handle it properly. + * Returns 0 if the pfn is not backed by a RAM page, the caller may + * handle the pfn special in this case. + */ +static int xen_oldmem_pfn_is_ram(unsigned long pfn) +{ +	struct xen_hvm_get_mem_type a = { +		.domid = DOMID_SELF, +		.pfn = pfn, +	}; +	int ram; + +	if (HYPERVISOR_hvm_op(HVMOP_get_mem_type, &a)) +		return -ENXIO; + +	switch (a.mem_type) { +		case HVMMEM_mmio_dm: +			ram = 0; +			break; +		case HVMMEM_ram_rw: +		case HVMMEM_ram_ro: +		default: +			ram = 1; +			break; +	} + +	return ram; +} +#endif +  static void xen_hvm_exit_mmap(struct mm_struct *mm)  {  	struct xen_hvm_pagetable_dying a; @@ -2590,6 +2502,98 @@ void __init xen_hvm_init_mmu_ops(void)  {  	if (is_pagetable_dying_supported())  		pv_mmu_ops.exit_mmap = xen_hvm_exit_mmap; +#ifdef CONFIG_PROC_VMCORE +	register_oldmem_pfn_is_ram(&xen_oldmem_pfn_is_ram); +#endif +} +#endif + +#ifdef CONFIG_XEN_PVH +/* + * Map foreign gfn (fgfn), to local pfn (lpfn). This for the user + * space creating new guest on pvh dom0 and needing to map domU pages. + */ +static int xlate_add_to_p2m(unsigned long lpfn, unsigned long fgfn, +			    unsigned int domid) +{ +	int rc, err = 0; +	xen_pfn_t gpfn = lpfn; +	xen_ulong_t idx = fgfn; + +	struct xen_add_to_physmap_range xatp = { +		.domid = DOMID_SELF, +		.foreign_domid = domid, +		.size = 1, +		.space = XENMAPSPACE_gmfn_foreign, +	}; +	set_xen_guest_handle(xatp.idxs, &idx); +	set_xen_guest_handle(xatp.gpfns, &gpfn); +	set_xen_guest_handle(xatp.errs, &err); + +	rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap_range, &xatp); +	if (rc < 0) +		return rc; +	return err; +} + +static int xlate_remove_from_p2m(unsigned long spfn, int count) +{ +	struct xen_remove_from_physmap xrp; +	int i, rc; + +	for (i = 0; i < count; i++) { +		xrp.domid = DOMID_SELF; +		xrp.gpfn = spfn+i; +		rc = HYPERVISOR_memory_op(XENMEM_remove_from_physmap, &xrp); +		if (rc) +			break; +	} +	return rc; +} + +struct xlate_remap_data { +	unsigned long fgfn; /* foreign domain's gfn */ +	pgprot_t prot; +	domid_t  domid; +	int index; +	struct page **pages; +}; + +static int xlate_map_pte_fn(pte_t *ptep, pgtable_t token, unsigned long addr, +			    void *data) +{ +	int rc; +	struct xlate_remap_data *remap = data; +	unsigned long pfn = page_to_pfn(remap->pages[remap->index++]); +	pte_t pteval = pte_mkspecial(pfn_pte(pfn, remap->prot)); + +	rc = xlate_add_to_p2m(pfn, remap->fgfn, remap->domid); +	if (rc) +		return rc; +	native_set_pte(ptep, pteval); + +	return 0; +} + +static int xlate_remap_gfn_range(struct vm_area_struct *vma, +				 unsigned long addr, unsigned long mfn, +				 int nr, pgprot_t prot, unsigned domid, +				 struct page **pages) +{ +	int err; +	struct xlate_remap_data pvhdata; + +	BUG_ON(!pages); + +	pvhdata.fgfn = mfn; +	pvhdata.prot = prot; +	pvhdata.domid = domid; +	pvhdata.index = 0; +	pvhdata.pages = pages; +	err = apply_to_page_range(vma->vm_mm, addr, nr << PAGE_SHIFT, +				  xlate_map_pte_fn, &pvhdata); +	flush_tlb_all(); +	return err;  }  #endif @@ -2605,9 +2609,9 @@ static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token,  				 unsigned long addr, void *data)  {  	struct remap_data *rmd = data; -	pte_t pte = pte_mkspecial(pfn_pte(rmd->mfn++, rmd->prot)); +	pte_t pte = pte_mkspecial(mfn_pte(rmd->mfn++, rmd->prot)); -	rmd->mmu_update->ptr = arbitrary_virt_to_machine(ptep).maddr; +	rmd->mmu_update->ptr = virt_to_machine(ptep).maddr;  	rmd->mmu_update->val = pte_val_ma(pte);  	rmd->mmu_update++; @@ -2616,8 +2620,10 @@ static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token,  int xen_remap_domain_mfn_range(struct vm_area_struct *vma,  			       unsigned long addr, -			       unsigned long mfn, int nr, -			       pgprot_t prot, unsigned domid) +			       xen_pfn_t mfn, int nr, +			       pgprot_t prot, unsigned domid, +			       struct page **pages) +  {  	struct remap_data rmd;  	struct mmu_update mmu_update[REMAP_BATCH_SIZE]; @@ -2625,9 +2631,17 @@ int xen_remap_domain_mfn_range(struct vm_area_struct *vma,  	unsigned long range;  	int err = 0; -	prot = __pgprot(pgprot_val(prot) | _PAGE_IOMAP); +	BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO))); -	vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP; +	if (xen_feature(XENFEAT_auto_translated_physmap)) { +#ifdef CONFIG_XEN_PVH +		/* We need to update the local page tables and the xen HAP */ +		return xlate_remap_gfn_range(vma, addr, mfn, nr, prot, +					     domid, pages); +#else +		return -EINVAL; +#endif +        }  	rmd.mfn = mfn;  	rmd.prot = prot; @@ -2642,8 +2656,8 @@ int xen_remap_domain_mfn_range(struct vm_area_struct *vma,  		if (err)  			goto out; -		err = -EFAULT; -		if (HYPERVISOR_mmu_update(mmu_update, batch, NULL, domid) < 0) +		err = HYPERVISOR_mmu_update(mmu_update, batch, NULL, domid); +		if (err < 0)  			goto out;  		nr -= batch; @@ -2653,71 +2667,38 @@ int xen_remap_domain_mfn_range(struct vm_area_struct *vma,  	err = 0;  out: -	flush_tlb_all(); +	xen_flush_tlb_all();  	return err;  }  EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range); -#ifdef CONFIG_XEN_DEBUG_FS - -static struct dentry *d_mmu_debug; - -static int __init xen_mmu_debugfs(void) +/* Returns: 0 success */ +int xen_unmap_domain_mfn_range(struct vm_area_struct *vma, +			       int numpgs, struct page **pages)  { -	struct dentry *d_xen = xen_init_debugfs(); - -	if (d_xen == NULL) -		return -ENOMEM; +	if (!pages || !xen_feature(XENFEAT_auto_translated_physmap)) +		return 0; -	d_mmu_debug = debugfs_create_dir("mmu", d_xen); - -	debugfs_create_u8("zero_stats", 0644, d_mmu_debug, &zero_stats); - -	debugfs_create_u32("pgd_update", 0444, d_mmu_debug, &mmu_stats.pgd_update); -	debugfs_create_u32("pgd_update_pinned", 0444, d_mmu_debug, -			   &mmu_stats.pgd_update_pinned); -	debugfs_create_u32("pgd_update_batched", 0444, d_mmu_debug, -			   &mmu_stats.pgd_update_pinned); - -	debugfs_create_u32("pud_update", 0444, d_mmu_debug, &mmu_stats.pud_update); -	debugfs_create_u32("pud_update_pinned", 0444, d_mmu_debug, -			   &mmu_stats.pud_update_pinned); -	debugfs_create_u32("pud_update_batched", 0444, d_mmu_debug, -			   &mmu_stats.pud_update_pinned); - -	debugfs_create_u32("pmd_update", 0444, d_mmu_debug, &mmu_stats.pmd_update); -	debugfs_create_u32("pmd_update_pinned", 0444, d_mmu_debug, -			   &mmu_stats.pmd_update_pinned); -	debugfs_create_u32("pmd_update_batched", 0444, d_mmu_debug, -			   &mmu_stats.pmd_update_pinned); - -	debugfs_create_u32("pte_update", 0444, d_mmu_debug, &mmu_stats.pte_update); -//	debugfs_create_u32("pte_update_pinned", 0444, d_mmu_debug, -//			   &mmu_stats.pte_update_pinned); -	debugfs_create_u32("pte_update_batched", 0444, d_mmu_debug, -			   &mmu_stats.pte_update_pinned); - -	debugfs_create_u32("mmu_update", 0444, d_mmu_debug, &mmu_stats.mmu_update); -	debugfs_create_u32("mmu_update_extended", 0444, d_mmu_debug, -			   &mmu_stats.mmu_update_extended); -	xen_debugfs_create_u32_array("mmu_update_histo", 0444, d_mmu_debug, -				     mmu_stats.mmu_update_histo, 20); - -	debugfs_create_u32("set_pte_at", 0444, d_mmu_debug, &mmu_stats.set_pte_at); -	debugfs_create_u32("set_pte_at_batched", 0444, d_mmu_debug, -			   &mmu_stats.set_pte_at_batched); -	debugfs_create_u32("set_pte_at_current", 0444, d_mmu_debug, -			   &mmu_stats.set_pte_at_current); -	debugfs_create_u32("set_pte_at_kernel", 0444, d_mmu_debug, -			   &mmu_stats.set_pte_at_kernel); - -	debugfs_create_u32("prot_commit", 0444, d_mmu_debug, &mmu_stats.prot_commit); -	debugfs_create_u32("prot_commit_batched", 0444, d_mmu_debug, -			   &mmu_stats.prot_commit_batched); +#ifdef CONFIG_XEN_PVH +	while (numpgs--) { +		/* +		 * The mmu has already cleaned up the process mmu +		 * resources at this point (lookup_address will return +		 * NULL). +		 */ +		unsigned long pfn = page_to_pfn(pages[numpgs]); +		xlate_remove_from_p2m(pfn, 1); +	} +	/* +	 * We don't need to flush tlbs because as part of +	 * xlate_remove_from_p2m, the hypervisor will do tlb flushes +	 * after removing the p2m entries from the EPT/NPT +	 */  	return 0; +#else +	return -EINVAL; +#endif  } -fs_initcall(xen_mmu_debugfs); - -#endif	/* CONFIG_XEN_DEBUG_FS */ +EXPORT_SYMBOL_GPL(xen_unmap_domain_mfn_range);  | 
