diff options
Diffstat (limited to 'arch/powerpc/mm/tlb_nohash.c')
| -rw-r--r-- | arch/powerpc/mm/tlb_nohash.c | 246 | 
1 files changed, 194 insertions, 52 deletions
diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c index 36c0c449a89..92cb18d52ea 100644 --- a/arch/powerpc/mm/tlb_nohash.c +++ b/arch/powerpc/mm/tlb_nohash.c @@ -28,6 +28,7 @@   */  #include <linux/kernel.h> +#include <linux/export.h>  #include <linux/mm.h>  #include <linux/init.h>  #include <linux/highmem.h> @@ -35,14 +36,55 @@  #include <linux/preempt.h>  #include <linux/spinlock.h>  #include <linux/memblock.h> +#include <linux/of_fdt.h> +#include <linux/hugetlb.h>  #include <asm/tlbflush.h>  #include <asm/tlb.h>  #include <asm/code-patching.h> +#include <asm/hugetlb.h> +#include <asm/paca.h>  #include "mmu_decl.h" -#ifdef CONFIG_PPC_BOOK3E +/* + * This struct lists the sw-supported page sizes.  The hardawre MMU may support + * other sizes not listed here.   The .ind field is only used on MMUs that have + * indirect page table entries. + */ +#ifdef CONFIG_PPC_BOOK3E_MMU +#ifdef CONFIG_PPC_FSL_BOOK3E +struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = { +	[MMU_PAGE_4K] = { +		.shift	= 12, +		.enc	= BOOK3E_PAGESZ_4K, +	}, +	[MMU_PAGE_2M] = { +		.shift	= 21, +		.enc	= BOOK3E_PAGESZ_2M, +	}, +	[MMU_PAGE_4M] = { +		.shift	= 22, +		.enc	= BOOK3E_PAGESZ_4M, +	}, +	[MMU_PAGE_16M] = { +		.shift	= 24, +		.enc	= BOOK3E_PAGESZ_16M, +	}, +	[MMU_PAGE_64M] = { +		.shift	= 26, +		.enc	= BOOK3E_PAGESZ_64M, +	}, +	[MMU_PAGE_256M] = { +		.shift	= 28, +		.enc	= BOOK3E_PAGESZ_256M, +	}, +	[MMU_PAGE_1G] = { +		.shift	= 30, +		.enc	= BOOK3E_PAGESZ_1GB, +	}, +}; +#else  struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {  	[MMU_PAGE_4K] = {  		.shift	= 12, @@ -76,6 +118,8 @@ struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {  		.enc	= BOOK3E_PAGESZ_1GB,  	},  }; +#endif /* CONFIG_FSL_BOOKE */ +  static inline int mmu_get_tsize(int psize)  {  	return mmu_psize_defs[psize].enc; @@ -86,7 +130,7 @@ static inline int mmu_get_tsize(int psize)  	/* This isn't used on !Book3E for now */  	return 0;  } -#endif +#endif /* CONFIG_PPC_BOOK3E_MMU */  /* The variables below are currently only used on 64-bit Book3E   * though this will probably be made common with other nohash @@ -97,11 +141,26 @@ static inline int mmu_get_tsize(int psize)  int mmu_linear_psize;		/* Page size used for the linear mapping */  int mmu_pte_psize;		/* Page size used for PTE pages */  int mmu_vmemmap_psize;		/* Page size used for the virtual mem map */ -int book3e_htw_enabled;		/* Is HW tablewalk enabled ? */ +int book3e_htw_mode;		/* HW tablewalk?  Value is PPC_HTW_* */  unsigned long linear_map_top;	/* Top of linear mapping */ + +/* + * Number of bytes to add to SPRN_SPRG_TLB_EXFRAME on crit/mcheck/debug + * exceptions.  This is used for bolted and e6500 TLB miss handlers which + * do not modify this SPRG in the TLB miss code; for other TLB miss handlers, + * this is set to zero. + */ +int extlb_level_exc; +  #endif /* CONFIG_PPC64 */ +#ifdef CONFIG_PPC_FSL_BOOK3E +/* next_tlbcam_idx is used to round-robin tlbcam entry assignment */ +DEFINE_PER_CPU(int, next_tlbcam_idx); +EXPORT_PER_CPU_SYMBOL(next_tlbcam_idx); +#endif +  /*   * Base TLB flushing operations:   * @@ -259,6 +318,11 @@ void __flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,  void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)  { +#ifdef CONFIG_HUGETLB_PAGE +	if (vma && is_vm_hugetlb_page(vma)) +		flush_hugetlb_page(vma, vmaddr); +#endif +  	__flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr,  			 mmu_get_tsize(mmu_virtual_psize), 0);  } @@ -266,6 +330,17 @@ EXPORT_SYMBOL(flush_tlb_page);  #endif /* CONFIG_SMP */ +#ifdef CONFIG_PPC_47x +void __init early_init_mmu_47x(void) +{ +#ifdef CONFIG_SMP +	unsigned long root = of_get_flat_dt_root(); +	if (of_get_flat_dt_prop(root, "cooperative-partition", NULL)) +		mmu_clear_feature(MMU_FTR_USE_TLBIVAX_BCAST); +#endif /* CONFIG_SMP */ +} +#endif /* CONFIG_PPC_47x */ +  /*   * Flush kernel TLB entries in the given range   */ @@ -299,9 +374,6 @@ EXPORT_SYMBOL(flush_tlb_range);  void tlb_flush(struct mmu_gather *tlb)  {  	flush_tlb_mm(tlb->mm); - -	/* Push out batch of freed page tables */ -	pte_free_finish();  }  /* @@ -319,7 +391,7 @@ void tlb_flush_pgtable(struct mmu_gather *tlb, unsigned long address)  {  	int tsize = mmu_psize_defs[mmu_pte_psize].enc; -	if (book3e_htw_enabled) { +	if (book3e_htw_mode != PPC_HTW_NONE) {  		unsigned long start = address & PMD_MASK;  		unsigned long end = address + PMD_SIZE;  		unsigned long size = 1UL << mmu_psize_defs[mmu_pte_psize].shift; @@ -356,9 +428,9 @@ static void setup_page_sizes(void)  #ifdef CONFIG_PPC_FSL_BOOK3E  	unsigned int mmucfg = mfspr(SPRN_MMUCFG); +	int fsl_mmu = mmu_has_feature(MMU_FTR_TYPE_FSL_E); -	if (((mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V1) && -		(mmu_has_feature(MMU_FTR_TYPE_FSL_E))) { +	if (fsl_mmu && (mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V1) {  		unsigned int tlb1cfg = mfspr(SPRN_TLB1CFG);  		unsigned int min_pg, max_pg; @@ -372,7 +444,7 @@ static void setup_page_sizes(void)  			def = &mmu_psize_defs[psize];  			shift = def->shift; -			if (shift == 0) +			if (shift == 0 || shift & 1)  				continue;  			/* adjust to be in terms of 4^shift Kb */ @@ -382,7 +454,40 @@ static void setup_page_sizes(void)  				def->flags |= MMU_PAGE_SIZE_DIRECT;  		} -		goto no_indirect; +		goto out; +	} + +	if (fsl_mmu && (mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V2) { +		u32 tlb1cfg, tlb1ps; + +		tlb0cfg = mfspr(SPRN_TLB0CFG); +		tlb1cfg = mfspr(SPRN_TLB1CFG); +		tlb1ps = mfspr(SPRN_TLB1PS); +		eptcfg = mfspr(SPRN_EPTCFG); + +		if ((tlb1cfg & TLBnCFG_IND) && (tlb0cfg & TLBnCFG_PT)) +			book3e_htw_mode = PPC_HTW_E6500; + +		/* +		 * We expect 4K subpage size and unrestricted indirect size. +		 * The lack of a restriction on indirect size is a Freescale +		 * extension, indicated by PSn = 0 but SPSn != 0. +		 */ +		if (eptcfg != 2) +			book3e_htw_mode = PPC_HTW_NONE; + +		for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { +			struct mmu_psize_def *def = &mmu_psize_defs[psize]; + +			if (tlb1ps & (1U << (def->shift - 10))) { +				def->flags |= MMU_PAGE_SIZE_DIRECT; + +				if (book3e_htw_mode && psize == MMU_PAGE_2M) +					def->flags |= MMU_PAGE_SIZE_INDIRECT; +			} +		} + +		goto out;  	}  #endif @@ -399,8 +504,11 @@ static void setup_page_sizes(void)  	}  	/* Indirect page sizes supported ? */ -	if ((tlb0cfg & TLBnCFG_IND) == 0) -		goto no_indirect; +	if ((tlb0cfg & TLBnCFG_IND) == 0 || +	    (tlb0cfg & TLBnCFG_PT) == 0) +		goto out; + +	book3e_htw_mode = PPC_HTW_IBM;  	/* Now, we only deal with one IND page size for each  	 * direct size. Hopefully all implementations today are @@ -425,8 +533,8 @@ static void setup_page_sizes(void)  				def->ind = ps + 10;  		}  	} - no_indirect: +out:  	/* Cleanup array and print summary */  	pr_info("MMU: Supported page sizes\n");  	for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { @@ -448,36 +556,26 @@ static void setup_page_sizes(void)  static void setup_mmu_htw(void)  { -	extern unsigned int interrupt_base_book3e; -	extern unsigned int exc_data_tlb_miss_htw_book3e; -	extern unsigned int exc_instruction_tlb_miss_htw_book3e; - -	unsigned int *ibase = &interrupt_base_book3e; +	/* +	 * If we want to use HW tablewalk, enable it by patching the TLB miss +	 * handlers to branch to the one dedicated to it. +	 */ -	/* Check if HW tablewalk is present, and if yes, enable it by: -	 * -	 * - patching the TLB miss handlers to branch to the -	 *   one dedicates to it -	 * -	 * - setting the global book3e_htw_enabled -       	 */ -	unsigned int tlb0cfg = mfspr(SPRN_TLB0CFG); - -	if ((tlb0cfg & TLBnCFG_IND) && -	    (tlb0cfg & TLBnCFG_PT)) { -		/* Our exceptions vectors start with a NOP and -then- a branch -		 * to deal with single stepping from userspace which stops on -		 * the second instruction. Thus we need to patch the second -		 * instruction of the exception, not the first one -		 */ -		patch_branch(ibase + (0x1c0 / 4) + 1, -			     (unsigned long)&exc_data_tlb_miss_htw_book3e, 0); -		patch_branch(ibase + (0x1e0 / 4) + 1, -			     (unsigned long)&exc_instruction_tlb_miss_htw_book3e, 0); -		book3e_htw_enabled = 1; +	switch (book3e_htw_mode) { +	case PPC_HTW_IBM: +		patch_exception(0x1c0, exc_data_tlb_miss_htw_book3e); +		patch_exception(0x1e0, exc_instruction_tlb_miss_htw_book3e); +		break; +#ifdef CONFIG_PPC_FSL_BOOK3E +	case PPC_HTW_E6500: +		extlb_level_exc = EX_TLB_SIZE; +		patch_exception(0x1c0, exc_data_tlb_miss_e6500_book3e); +		patch_exception(0x1e0, exc_instruction_tlb_miss_e6500_book3e); +		break; +#endif  	} -	pr_info("MMU: Book3E Page Tables %s\n", -		book3e_htw_enabled ? "Enabled" : "Disabled"); +	pr_info("MMU: Book3E HW tablewalk %s\n", +		book3e_htw_mode != PPC_HTW_NONE ? "enabled" : "not supported");  }  /* @@ -498,8 +596,13 @@ static void __early_init_mmu(int boot_cpu)  	/* XXX This should be decided at runtime based on supported  	 * page sizes in the TLB, but for now let's assume 16M is  	 * always there and a good fit (which it probably is) +	 * +	 * Freescale booke only supports 4K pages in TLB0, so use that.  	 */ -	mmu_vmemmap_psize = MMU_PAGE_16M; +	if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) +		mmu_vmemmap_psize = MMU_PAGE_4K; +	else +		mmu_vmemmap_psize = MMU_PAGE_16M;  	/* XXX This code only checks for TLB 0 capabilities and doesn't  	 *     check what page size combos are supported by the HW. It @@ -517,8 +620,16 @@ static void __early_init_mmu(int boot_cpu)  	/* Set MAS4 based on page table setting */  	mas4 = 0x4 << MAS4_WIMGED_SHIFT; -	if (book3e_htw_enabled) { -		mas4 |= mas4 | MAS4_INDD; +	switch (book3e_htw_mode) { +	case PPC_HTW_E6500: +		mas4 |= MAS4_INDD; +		mas4 |= BOOK3E_PAGESZ_2M << MAS4_TSIZED_SHIFT; +		mas4 |= MAS4_TLBSELD(1); +		mmu_pte_psize = MMU_PAGE_2M; +		break; + +	case PPC_HTW_IBM: +		mas4 |= MAS4_INDD;  #ifdef CONFIG_PPC_64K_PAGES  		mas4 |=	BOOK3E_PAGESZ_256M << MAS4_TSIZED_SHIFT;  		mmu_pte_psize = MMU_PAGE_256M; @@ -526,13 +637,16 @@ static void __early_init_mmu(int boot_cpu)  		mas4 |=	BOOK3E_PAGESZ_1M << MAS4_TSIZED_SHIFT;  		mmu_pte_psize = MMU_PAGE_1M;  #endif -	} else { +		break; + +	case PPC_HTW_NONE:  #ifdef CONFIG_PPC_64K_PAGES  		mas4 |=	BOOK3E_PAGESZ_64K << MAS4_TSIZED_SHIFT;  #else  		mas4 |=	BOOK3E_PAGESZ_4K << MAS4_TSIZED_SHIFT;  #endif  		mmu_pte_psize = mmu_virtual_psize; +		break;  	}  	mtspr(SPRN_MAS4, mas4); @@ -551,7 +665,13 @@ static void __early_init_mmu(int boot_cpu)  		/* limit memory so we dont have linear faults */  		memblock_enforce_memory_limit(linear_map_top); -		memblock_analyze(); + +		if (book3e_htw_mode == PPC_HTW_NONE) { +			extlb_level_exc = EX_TLB_SIZE; +			patch_exception(0x1c0, exc_data_tlb_miss_bolted_book3e); +			patch_exception(0x1e0, +				exc_instruction_tlb_miss_bolted_book3e); +		}  	}  #endif @@ -568,7 +688,7 @@ void __init early_init_mmu(void)  	__early_init_mmu(1);  } -void __cpuinit early_init_mmu_secondary(void) +void early_init_mmu_secondary(void)  {  	__early_init_mmu(0);  } @@ -576,15 +696,37 @@ void __cpuinit early_init_mmu_secondary(void)  void setup_initial_memory_limit(phys_addr_t first_memblock_base,  				phys_addr_t first_memblock_size)  { -	/* On Embedded 64-bit, we adjust the RMA size to match +	/* On non-FSL Embedded 64-bit, we adjust the RMA size to match  	 * the bolted TLB entry. We know for now that only 1G  	 * entries are supported though that may eventually -	 * change. We crop it to the size of the first MEMBLOCK to +	 * change. +	 * +	 * on FSL Embedded 64-bit, we adjust the RMA size to match the +	 * first bolted TLB entry size.  We still limit max to 1G even if +	 * the TLB could cover more.  This is due to what the early init +	 * code is setup to do. +	 * +	 * We crop it to the size of the first MEMBLOCK to  	 * avoid going over total available memory just in case...  	 */ -	ppc64_rma_size = min_t(u64, first_memblock_size, 0x40000000); +#ifdef CONFIG_PPC_FSL_BOOK3E +	if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) { +		unsigned long linear_sz; +		linear_sz = calc_cam_sz(first_memblock_size, PAGE_OFFSET, +					first_memblock_base); +		ppc64_rma_size = min_t(u64, linear_sz, 0x40000000); +	} else +#endif +		ppc64_rma_size = min_t(u64, first_memblock_size, 0x40000000);  	/* Finally limit subsequent allocations */ -	memblock_set_current_limit(ppc64_memblock_base + ppc64_rma_size); +	memblock_set_current_limit(first_memblock_base + ppc64_rma_size); +} +#else /* ! CONFIG_PPC64 */ +void __init early_init_mmu(void) +{ +#ifdef CONFIG_PPC_47x +	early_init_mmu_47x(); +#endif  }  #endif /* CONFIG_PPC64 */  | 
