diff options
Diffstat (limited to 'arch/arm/mm/mmu.c')
| -rw-r--r-- | arch/arm/mm/mmu.c | 465 | 
1 files changed, 308 insertions, 157 deletions
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index b1d17eeb59b..6e3ba8d112a 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -22,16 +22,20 @@  #include <asm/cputype.h>  #include <asm/sections.h>  #include <asm/cachetype.h> +#include <asm/sections.h>  #include <asm/setup.h>  #include <asm/smp_plat.h>  #include <asm/tlb.h>  #include <asm/highmem.h>  #include <asm/system_info.h>  #include <asm/traps.h> +#include <asm/procinfo.h> +#include <asm/memory.h>  #include <asm/mach/arch.h>  #include <asm/mach/map.h>  #include <asm/mach/pci.h> +#include <asm/fixmap.h>  #include "mm.h"  #include "tcm.h" @@ -114,28 +118,54 @@ static struct cachepolicy cache_policies[] __initdata = {  };  #ifdef CONFIG_CPU_CP15 +static unsigned long initial_pmd_value __initdata = 0; +  /* - * These are useful for identifying cache coherency - * problems by allowing the cache or the cache and - * writebuffer to be turned off.  (Note: the write - * buffer should not be on and the cache off). + * Initialise the cache_policy variable with the initial state specified + * via the "pmd" value.  This is used to ensure that on ARMv6 and later, + * the C code sets the page tables up with the same policy as the head + * assembly code, which avoids an illegal state where the TLBs can get + * confused.  See comments in early_cachepolicy() for more information.   */ -static int __init early_cachepolicy(char *p) +void __init init_default_cache_policy(unsigned long pmd)  {  	int i; +	initial_pmd_value = pmd; + +	pmd &= PMD_SECT_TEX(1) | PMD_SECT_BUFFERABLE | PMD_SECT_CACHEABLE; + +	for (i = 0; i < ARRAY_SIZE(cache_policies); i++) +		if (cache_policies[i].pmd == pmd) { +			cachepolicy = i; +			break; +		} + +	if (i == ARRAY_SIZE(cache_policies)) +		pr_err("ERROR: could not find cache policy\n"); +} + +/* + * These are useful for identifying cache coherency problems by allowing + * the cache or the cache and writebuffer to be turned off.  (Note: the + * write buffer should not be on and the cache off). + */ +static int __init early_cachepolicy(char *p) +{ +	int i, selected = -1; +  	for (i = 0; i < ARRAY_SIZE(cache_policies); i++) {  		int len = strlen(cache_policies[i].policy);  		if (memcmp(p, cache_policies[i].policy, len) == 0) { -			cachepolicy = i; -			cr_alignment &= ~cache_policies[i].cr_mask; -			cr_no_alignment &= ~cache_policies[i].cr_mask; +			selected = i;  			break;  		}  	} -	if (i == ARRAY_SIZE(cache_policies)) -		printk(KERN_ERR "ERROR: unknown or unsupported cache policy\n"); + +	if (selected == -1) +		pr_err("ERROR: unknown or unsupported cache policy\n"); +  	/*  	 * This restriction is partly to do with the way we boot; it is  	 * unpredictable to have memory mapped using two different sets of @@ -143,12 +173,18 @@ static int __init early_cachepolicy(char *p)  	 * change these attributes once the initial assembly has setup the  	 * page tables.  	 */ -	if (cpu_architecture() >= CPU_ARCH_ARMv6) { -		printk(KERN_WARNING "Only cachepolicy=writeback supported on ARMv6 and later\n"); -		cachepolicy = CPOLICY_WRITEBACK; +	if (cpu_architecture() >= CPU_ARCH_ARMv6 && selected != cachepolicy) { +		pr_warn("Only cachepolicy=%s supported on ARMv6 and later\n", +			cache_policies[cachepolicy].policy); +		return 0; +	} + +	if (selected != cachepolicy) { +		unsigned long cr = __clear_cr(cache_policies[selected].cr_mask); +		cachepolicy = selected; +		flush_cache_all(); +		set_cr(cr);  	} -	flush_cache_all(); -	set_cr(cr_alignment);  	return 0;  }  early_param("cachepolicy", early_cachepolicy); @@ -183,35 +219,6 @@ static int __init early_ecc(char *p)  early_param("ecc", early_ecc);  #endif -static int __init noalign_setup(char *__unused) -{ -	cr_alignment &= ~CR_A; -	cr_no_alignment &= ~CR_A; -	set_cr(cr_alignment); -	return 1; -} -__setup("noalign", noalign_setup); - -#ifndef CONFIG_SMP -void adjust_cr(unsigned long mask, unsigned long set) -{ -	unsigned long flags; - -	mask &= ~CR_A; - -	set &= mask; - -	local_irq_save(flags); - -	cr_no_alignment = (cr_no_alignment & ~mask) | set; -	cr_alignment = (cr_alignment & ~mask) | set; - -	set_cr((get_cr() & ~mask) | set); - -	local_irq_restore(flags); -} -#endif -  #else /* ifdef CONFIG_CPU_CP15 */  static int __init early_cachepolicy(char *p) @@ -229,12 +236,16 @@ __setup("noalign", noalign_setup);  #endif /* ifdef CONFIG_CPU_CP15 / else */  #define PROT_PTE_DEVICE		L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN +#define PROT_PTE_S2_DEVICE	PROT_PTE_DEVICE  #define PROT_SECT_DEVICE	PMD_TYPE_SECT|PMD_SECT_AP_WRITE  static struct mem_type mem_types[] = {  	[MT_DEVICE] = {		  /* Strongly ordered / ARMv6 shared device */  		.prot_pte	= PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |  				  L_PTE_SHARED, +		.prot_pte_s2	= s2_policy(PROT_PTE_S2_DEVICE) | +				  s2_policy(L_PTE_S2_MT_DEV_SHARED) | +				  L_PTE_SHARED,  		.prot_l1	= PMD_TYPE_TABLE,  		.prot_sect	= PROT_SECT_DEVICE | PMD_SECT_S,  		.domain		= DOMAIN_IO, @@ -285,36 +296,43 @@ static struct mem_type mem_types[] = {  		.prot_l1   = PMD_TYPE_TABLE,  		.domain    = DOMAIN_USER,  	}, -	[MT_MEMORY] = { +	[MT_MEMORY_RWX] = {  		.prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,  		.prot_l1   = PMD_TYPE_TABLE,  		.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,  		.domain    = DOMAIN_KERNEL,  	}, +	[MT_MEMORY_RW] = { +		.prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | +			     L_PTE_XN, +		.prot_l1   = PMD_TYPE_TABLE, +		.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, +		.domain    = DOMAIN_KERNEL, +	},  	[MT_ROM] = {  		.prot_sect = PMD_TYPE_SECT,  		.domain    = DOMAIN_KERNEL,  	}, -	[MT_MEMORY_NONCACHED] = { +	[MT_MEMORY_RWX_NONCACHED] = {  		.prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |  				L_PTE_MT_BUFFERABLE,  		.prot_l1   = PMD_TYPE_TABLE,  		.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,  		.domain    = DOMAIN_KERNEL,  	}, -	[MT_MEMORY_DTCM] = { +	[MT_MEMORY_RW_DTCM] = {  		.prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |  				L_PTE_XN,  		.prot_l1   = PMD_TYPE_TABLE,  		.prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,  		.domain    = DOMAIN_KERNEL,  	}, -	[MT_MEMORY_ITCM] = { +	[MT_MEMORY_RWX_ITCM] = {  		.prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,  		.prot_l1   = PMD_TYPE_TABLE,  		.domain    = DOMAIN_KERNEL,  	}, -	[MT_MEMORY_SO] = { +	[MT_MEMORY_RW_SO] = {  		.prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |  				L_PTE_MT_UNCACHED | L_PTE_XN,  		.prot_l1   = PMD_TYPE_TABLE, @@ -323,7 +341,8 @@ static struct mem_type mem_types[] = {  		.domain    = DOMAIN_KERNEL,  	},  	[MT_MEMORY_DMA_READY] = { -		.prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY, +		.prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | +				L_PTE_XN,  		.prot_l1   = PMD_TYPE_TABLE,  		.domain    = DOMAIN_KERNEL,  	}, @@ -335,6 +354,44 @@ const struct mem_type *get_mem_type(unsigned int type)  }  EXPORT_SYMBOL(get_mem_type); +#define PTE_SET_FN(_name, pteop) \ +static int pte_set_##_name(pte_t *ptep, pgtable_t token, unsigned long addr, \ +			void *data) \ +{ \ +	pte_t pte = pteop(*ptep); \ +\ +	set_pte_ext(ptep, pte, 0); \ +	return 0; \ +} \ + +#define SET_MEMORY_FN(_name, callback) \ +int set_memory_##_name(unsigned long addr, int numpages) \ +{ \ +	unsigned long start = addr; \ +	unsigned long size = PAGE_SIZE*numpages; \ +	unsigned end = start + size; \ +\ +	if (start < MODULES_VADDR || start >= MODULES_END) \ +		return -EINVAL;\ +\ +	if (end < MODULES_VADDR || end >= MODULES_END) \ +		return -EINVAL; \ +\ +	apply_to_page_range(&init_mm, start, size, callback, NULL); \ +	flush_tlb_kernel_range(start, end); \ +	return 0;\ +} + +PTE_SET_FN(ro, pte_wrprotect) +PTE_SET_FN(rw, pte_mkwrite) +PTE_SET_FN(x, pte_mkexec) +PTE_SET_FN(nx, pte_mknexec) + +SET_MEMORY_FN(ro, pte_set_ro) +SET_MEMORY_FN(rw, pte_set_rw) +SET_MEMORY_FN(x, pte_set_x) +SET_MEMORY_FN(nx, pte_set_nx) +  /*   * Adjust the PMD section entries according to the CPU in use.   */ @@ -361,8 +418,17 @@ static void __init build_mem_type_table(void)  			cachepolicy = CPOLICY_WRITEBACK;  		ecc_mask = 0;  	} -	if (is_smp()) -		cachepolicy = CPOLICY_WRITEALLOC; + +	if (is_smp()) { +		if (cachepolicy != CPOLICY_WRITEALLOC) { +			pr_warn("Forcing write-allocate cache policy for SMP\n"); +			cachepolicy = CPOLICY_WRITEALLOC; +		} +		if (!(initial_pmd_value & PMD_SECT_S)) { +			pr_warn("Forcing shared mappings for SMP\n"); +			initial_pmd_value |= PMD_SECT_S; +		} +	}  	/*  	 * Strip out features not present on earlier architectures. @@ -408,6 +474,9 @@ static void __init build_mem_type_table(void)  			mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN;  			mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN;  			mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN; + +			/* Also setup NX memory mapping */ +			mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_XN;  		}  		if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {  			/* @@ -456,7 +525,18 @@ static void __init build_mem_type_table(void)  	cp = &cache_policies[cachepolicy];  	vecs_pgprot = kern_pgprot = user_pgprot = cp->pte;  	s2_pgprot = cp->pte_s2; -	hyp_device_pgprot = s2_device_pgprot = mem_types[MT_DEVICE].prot_pte; +	hyp_device_pgprot = mem_types[MT_DEVICE].prot_pte; +	s2_device_pgprot = mem_types[MT_DEVICE].prot_pte_s2; + +	/* +	 * We don't use domains on ARMv6 (since this causes problems with +	 * v6/v7 kernels), so we must use a separate memory type for user +	 * r/o, kernel r/w to map the vectors page. +	 */ +#ifndef CONFIG_ARM_LPAE +	if (cpu_arch == CPU_ARCH_ARMv6) +		vecs_pgprot |= L_PTE_MT_VECTORS; +#endif  	/*  	 * ARMv6 and above have extended page tables. @@ -472,11 +552,12 @@ static void __init build_mem_type_table(void)  		mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;  #endif -		if (is_smp()) { -			/* -			 * Mark memory with the "shared" attribute -			 * for SMP systems -			 */ +		/* +		 * If the initial page tables were created with the S bit +		 * set, then we need to do the same here for the same +		 * reasons given in early_cachepolicy(). +		 */ +		if (initial_pmd_value & PMD_SECT_S) {  			user_pgprot |= L_PTE_SHARED;  			kern_pgprot |= L_PTE_SHARED;  			vecs_pgprot |= L_PTE_SHARED; @@ -485,11 +566,13 @@ static void __init build_mem_type_table(void)  			mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;  			mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;  			mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED; -			mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; -			mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED; +			mem_types[MT_MEMORY_RWX].prot_sect |= PMD_SECT_S; +			mem_types[MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED; +			mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S; +			mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_SHARED;  			mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED; -			mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S; -			mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED; +			mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_S; +			mem_types[MT_MEMORY_RWX_NONCACHED].prot_pte |= L_PTE_SHARED;  		}  	} @@ -500,15 +583,15 @@ static void __init build_mem_type_table(void)  	if (cpu_arch >= CPU_ARCH_ARMv6) {  		if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {  			/* Non-cacheable Normal is XCB = 001 */ -			mem_types[MT_MEMORY_NONCACHED].prot_sect |= +			mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |=  				PMD_SECT_BUFFERED;  		} else {  			/* For both ARMv6 and non-TEX-remapping ARMv7 */ -			mem_types[MT_MEMORY_NONCACHED].prot_sect |= +			mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |=  				PMD_SECT_TEX(1);  		}  	} else { -		mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE; +		mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;  	}  #ifdef CONFIG_ARM_LPAE @@ -541,10 +624,12 @@ static void __init build_mem_type_table(void)  	mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;  	mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask; -	mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd; -	mem_types[MT_MEMORY].prot_pte |= kern_pgprot; +	mem_types[MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd; +	mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot; +	mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd; +	mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot;  	mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot; -	mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask; +	mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= ecc_mask;  	mem_types[MT_ROM].prot_sect |= cp->pmd;  	switch (cp->pmd) { @@ -556,8 +641,8 @@ static void __init build_mem_type_table(void)  		mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB;  		break;  	} -	printk("Memory policy: ECC %sabled, Data cache %s\n", -		ecc_mask ? "en" : "dis", cp->policy); +	pr_info("Memory policy: %sData cache %s\n", +		ecc_mask ? "ECC enabled, " : "", cp->policy);  	for (i = 0; i < ARRAY_SIZE(mem_types); i++) {  		struct mem_type *t = &mem_types[i]; @@ -990,74 +1075,47 @@ phys_addr_t arm_lowmem_limit __initdata = 0;  void __init sanity_check_meminfo(void)  {  	phys_addr_t memblock_limit = 0; -	int i, j, highmem = 0; +	int highmem = 0;  	phys_addr_t vmalloc_limit = __pa(vmalloc_min - 1) + 1; +	struct memblock_region *reg; -	for (i = 0, j = 0; i < meminfo.nr_banks; i++) { -		struct membank *bank = &meminfo.bank[j]; -		phys_addr_t size_limit; - -		*bank = meminfo.bank[i]; -		size_limit = bank->size; +	for_each_memblock(memory, reg) { +		phys_addr_t block_start = reg->base; +		phys_addr_t block_end = reg->base + reg->size; +		phys_addr_t size_limit = reg->size; -		if (bank->start >= vmalloc_limit) +		if (reg->base >= vmalloc_limit)  			highmem = 1;  		else -			size_limit = vmalloc_limit - bank->start; +			size_limit = vmalloc_limit - reg->base; -		bank->highmem = highmem; -#ifdef CONFIG_HIGHMEM -		/* -		 * Split those memory banks which are partially overlapping -		 * the vmalloc area greatly simplifying things later. -		 */ -		if (!highmem && bank->size > size_limit) { -			if (meminfo.nr_banks >= NR_BANKS) { -				printk(KERN_CRIT "NR_BANKS too low, " -						 "ignoring high memory\n"); -			} else { -				memmove(bank + 1, bank, -					(meminfo.nr_banks - i) * sizeof(*bank)); -				meminfo.nr_banks++; -				i++; -				bank[1].size -= size_limit; -				bank[1].start = vmalloc_limit; -				bank[1].highmem = highmem = 1; -				j++; +		if (!IS_ENABLED(CONFIG_HIGHMEM) || cache_is_vipt_aliasing()) { + +			if (highmem) { +				pr_notice("Ignoring RAM at %pa-%pa (!CONFIG_HIGHMEM)\n", +					&block_start, &block_end); +				memblock_remove(reg->base, reg->size); +				continue;  			} -			bank->size = size_limit; -		} -#else -		/* -		 * Highmem banks not allowed with !CONFIG_HIGHMEM. -		 */ -		if (highmem) { -			printk(KERN_NOTICE "Ignoring RAM at %.8llx-%.8llx " -			       "(!CONFIG_HIGHMEM).\n", -			       (unsigned long long)bank->start, -			       (unsigned long long)bank->start + bank->size - 1); -			continue; -		} -		/* -		 * Check whether this memory bank would partially overlap -		 * the vmalloc area. -		 */ -		if (bank->size > size_limit) { -			printk(KERN_NOTICE "Truncating RAM at %.8llx-%.8llx " -			       "to -%.8llx (vmalloc region overlap).\n", -			       (unsigned long long)bank->start, -			       (unsigned long long)bank->start + bank->size - 1, -			       (unsigned long long)bank->start + size_limit - 1); -			bank->size = size_limit; +			if (reg->size > size_limit) { +				phys_addr_t overlap_size = reg->size - size_limit; + +				pr_notice("Truncating RAM at %pa-%pa to -%pa", +				      &block_start, &block_end, &vmalloc_limit); +				memblock_remove(vmalloc_limit, overlap_size); +				block_end = vmalloc_limit; +			}  		} -#endif -		if (!bank->highmem) { -			phys_addr_t bank_end = bank->start + bank->size; -			if (bank_end > arm_lowmem_limit) -				arm_lowmem_limit = bank_end; +		if (!highmem) { +			if (block_end > arm_lowmem_limit) { +				if (reg->size > size_limit) +					arm_lowmem_limit = vmalloc_limit; +				else +					arm_lowmem_limit = block_end; +			}  			/*  			 * Find the first non-section-aligned page, and point @@ -1073,35 +1131,15 @@ void __init sanity_check_meminfo(void)  			 * occurs before any free memory is mapped.  			 */  			if (!memblock_limit) { -				if (!IS_ALIGNED(bank->start, SECTION_SIZE)) -					memblock_limit = bank->start; -				else if (!IS_ALIGNED(bank_end, SECTION_SIZE)) -					memblock_limit = bank_end; +				if (!IS_ALIGNED(block_start, SECTION_SIZE)) +					memblock_limit = block_start; +				else if (!IS_ALIGNED(block_end, SECTION_SIZE)) +					memblock_limit = arm_lowmem_limit;  			} -		} -		j++; -	} -#ifdef CONFIG_HIGHMEM -	if (highmem) { -		const char *reason = NULL; -		if (cache_is_vipt_aliasing()) { -			/* -			 * Interactions between kmap and other mappings -			 * make highmem support with aliasing VIPT caches -			 * rather difficult. -			 */ -			reason = "with VIPT aliasing cache"; -		} -		if (reason) { -			printk(KERN_CRIT "HIGHMEM is not supported %s, ignoring high memory\n", -				reason); -			while (j > 0 && meminfo.bank[j - 1].highmem) -				j--;  		}  	} -#endif -	meminfo.nr_banks = j; +  	high_memory = __va(arm_lowmem_limit - 1) + 1;  	/* @@ -1288,12 +1326,17 @@ static void __init kmap_init(void)  #ifdef CONFIG_HIGHMEM  	pkmap_page_table = early_pte_alloc(pmd_off_k(PKMAP_BASE),  		PKMAP_BASE, _PAGE_KERNEL_TABLE); + +	fixmap_page_table = early_pte_alloc(pmd_off_k(FIXADDR_START), +		FIXADDR_START, _PAGE_KERNEL_TABLE);  #endif  }  static void __init map_lowmem(void)  {  	struct memblock_region *reg; +	unsigned long kernel_x_start = round_down(__pa(_stext), SECTION_SIZE); +	unsigned long kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);  	/* Map all the lowmem memory banks. */  	for_each_memblock(memory, reg) { @@ -1306,15 +1349,123 @@ static void __init map_lowmem(void)  		if (start >= end)  			break; -		map.pfn = __phys_to_pfn(start); -		map.virtual = __phys_to_virt(start); -		map.length = end - start; -		map.type = MT_MEMORY; +		if (end < kernel_x_start || start >= kernel_x_end) { +			map.pfn = __phys_to_pfn(start); +			map.virtual = __phys_to_virt(start); +			map.length = end - start; +			map.type = MT_MEMORY_RWX; -		create_mapping(&map); +			create_mapping(&map); +		} else { +			/* This better cover the entire kernel */ +			if (start < kernel_x_start) { +				map.pfn = __phys_to_pfn(start); +				map.virtual = __phys_to_virt(start); +				map.length = kernel_x_start - start; +				map.type = MT_MEMORY_RW; + +				create_mapping(&map); +			} + +			map.pfn = __phys_to_pfn(kernel_x_start); +			map.virtual = __phys_to_virt(kernel_x_start); +			map.length = kernel_x_end - kernel_x_start; +			map.type = MT_MEMORY_RWX; + +			create_mapping(&map); + +			if (kernel_x_end < end) { +				map.pfn = __phys_to_pfn(kernel_x_end); +				map.virtual = __phys_to_virt(kernel_x_end); +				map.length = end - kernel_x_end; +				map.type = MT_MEMORY_RW; + +				create_mapping(&map); +			} +		}  	}  } +#ifdef CONFIG_ARM_LPAE +/* + * early_paging_init() recreates boot time page table setup, allowing machines + * to switch over to a high (>4G) address space on LPAE systems + */ +void __init early_paging_init(const struct machine_desc *mdesc, +			      struct proc_info_list *procinfo) +{ +	pmdval_t pmdprot = procinfo->__cpu_mm_mmu_flags; +	unsigned long map_start, map_end; +	pgd_t *pgd0, *pgdk; +	pud_t *pud0, *pudk, *pud_start; +	pmd_t *pmd0, *pmdk; +	phys_addr_t phys; +	int i; + +	if (!(mdesc->init_meminfo)) +		return; + +	/* remap kernel code and data */ +	map_start = init_mm.start_code & PMD_MASK; +	map_end   = ALIGN(init_mm.brk, PMD_SIZE); + +	/* get a handle on things... */ +	pgd0 = pgd_offset_k(0); +	pud_start = pud0 = pud_offset(pgd0, 0); +	pmd0 = pmd_offset(pud0, 0); + +	pgdk = pgd_offset_k(map_start); +	pudk = pud_offset(pgdk, map_start); +	pmdk = pmd_offset(pudk, map_start); + +	mdesc->init_meminfo(); + +	/* Run the patch stub to update the constants */ +	fixup_pv_table(&__pv_table_begin, +		(&__pv_table_end - &__pv_table_begin) << 2); + +	/* +	 * Cache cleaning operations for self-modifying code +	 * We should clean the entries by MVA but running a +	 * for loop over every pv_table entry pointer would +	 * just complicate the code. +	 */ +	flush_cache_louis(); +	dsb(ishst); +	isb(); + +	/* remap level 1 table */ +	for (i = 0; i < PTRS_PER_PGD; pud0++, i++) { +		set_pud(pud0, +			__pud(__pa(pmd0) | PMD_TYPE_TABLE | L_PGD_SWAPPER)); +		pmd0 += PTRS_PER_PMD; +	} + +	/* remap pmds for kernel mapping */ +	phys = __pa(map_start); +	do { +		*pmdk++ = __pmd(phys | pmdprot); +		phys += PMD_SIZE; +	} while (phys < map_end); + +	flush_cache_all(); +	cpu_switch_mm(pgd0, &init_mm); +	cpu_set_ttbr(1, __pa(pgd0) + TTBR1_OFFSET); +	local_flush_bp_all(); +	local_flush_tlb_all(); +} + +#else + +void __init early_paging_init(const struct machine_desc *mdesc, +			      struct proc_info_list *procinfo) +{ +	if (mdesc->init_meminfo) +		mdesc->init_meminfo(); +} + +#endif +  /*   * paging_init() sets up the page tables, initialises the zone memory   * maps, and sets up the zero page, bad page and bad page tables.  | 
