diff options
Diffstat (limited to 'arch/powerpc/mm/hash_utils_64.c')
| -rw-r--r-- | arch/powerpc/mm/hash_utils_64.c | 595 | 
1 files changed, 401 insertions, 194 deletions
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index 83f534d862d..88fdd9d2507 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -27,11 +27,13 @@  #include <linux/proc_fs.h>  #include <linux/stat.h>  #include <linux/sysctl.h> +#include <linux/export.h>  #include <linux/ctype.h>  #include <linux/cache.h>  #include <linux/init.h>  #include <linux/signal.h>  #include <linux/memblock.h> +#include <linux/context_tracking.h>  #include <asm/processor.h>  #include <asm/pgtable.h> @@ -39,11 +41,9 @@  #include <asm/mmu_context.h>  #include <asm/page.h>  #include <asm/types.h> -#include <asm/system.h>  #include <asm/uaccess.h>  #include <asm/machdep.h>  #include <asm/prom.h> -#include <asm/abs_addr.h>  #include <asm/tlbflush.h>  #include <asm/io.h>  #include <asm/eeh.h> @@ -53,6 +53,10 @@  #include <asm/sections.h>  #include <asm/spu.h>  #include <asm/udbg.h> +#include <asm/code-patching.h> +#include <asm/fadump.h> +#include <asm/firmware.h> +#include <asm/tm.h>  #ifdef DEBUG  #define DBG(fmt...) udbg_printf(fmt) @@ -104,9 +108,6 @@ int mmu_kernel_ssize = MMU_SEGSIZE_256M;  int mmu_highuser_ssize = MMU_SEGSIZE_256M;  u16 mmu_slb_size = 64;  EXPORT_SYMBOL_GPL(mmu_slb_size); -#ifdef CONFIG_HUGETLB_PAGE -unsigned int HPAGE_SHIFT; -#endif  #ifdef CONFIG_PPC_64K_PAGES  int mmu_ci_restrictions;  #endif @@ -126,7 +127,7 @@ static struct mmu_psize_def mmu_psize_defaults_old[] = {  	[MMU_PAGE_4K] = {  		.shift	= 12,  		.sllp	= 0, -		.penc	= 0, +		.penc   = {[MMU_PAGE_4K] = 0, [1 ... MMU_PAGE_COUNT - 1] = -1},  		.avpnm	= 0,  		.tlbiel = 0,  	}, @@ -140,14 +141,15 @@ static struct mmu_psize_def mmu_psize_defaults_gp[] = {  	[MMU_PAGE_4K] = {  		.shift	= 12,  		.sllp	= 0, -		.penc	= 0, +		.penc   = {[MMU_PAGE_4K] = 0, [1 ... MMU_PAGE_COUNT - 1] = -1},  		.avpnm	= 0,  		.tlbiel = 1,  	},  	[MMU_PAGE_16M] = {  		.shift	= 24,  		.sllp	= SLB_VSID_L, -		.penc	= 0, +		.penc   = {[0 ... MMU_PAGE_16M - 1] = -1, [MMU_PAGE_16M] = 0, +			    [MMU_PAGE_16M + 1 ... MMU_PAGE_COUNT - 1] = -1 },  		.avpnm	= 0x1UL,  		.tlbiel = 0,  	}, @@ -167,9 +169,10 @@ static unsigned long htab_convert_pte_flags(unsigned long pteflags)  	if ((pteflags & _PAGE_USER) && !((pteflags & _PAGE_RW) &&  					 (pteflags & _PAGE_DIRTY)))  		rflags |= 1; - -	/* Always add C */ -	return rflags | HPTE_R_C; +	/* +	 * Always add "C" bit for perf. Memory coherence is always enabled +	 */ +	return rflags | HPTE_R_C | HPTE_R_M;  }  int htab_bolt_mapping(unsigned long vstart, unsigned long vend, @@ -192,19 +195,42 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,  	     vaddr += step, paddr += step) {  		unsigned long hash, hpteg;  		unsigned long vsid = get_kernel_vsid(vaddr, ssize); -		unsigned long va = hpt_va(vaddr, vsid, ssize); +		unsigned long vpn  = hpt_vpn(vaddr, vsid, ssize);  		unsigned long tprot = prot; +		/* +		 * If we hit a bad address return error. +		 */ +		if (!vsid) +			return -1;  		/* Make kernel text executable */  		if (overlaps_kernel_text(vaddr, vaddr + step))  			tprot &= ~HPTE_R_N; -		hash = hpt_hash(va, shift, ssize); +		/* Make kvm guest trampolines executable */ +		if (overlaps_kvm_tmp(vaddr, vaddr + step)) +			tprot &= ~HPTE_R_N; + +		/* +		 * If relocatable, check if it overlaps interrupt vectors that +		 * are copied down to real 0. For relocatable kernel +		 * (e.g. kdump case) we copy interrupt vectors down to real +		 * address 0. Mark that region as executable. This is +		 * because on p8 system with relocation on exception feature +		 * enabled, exceptions are raised with MMU (IR=DR=1) ON. Hence +		 * in order to execute the interrupt handlers in virtual +		 * mode the vector region need to be marked as executable. +		 */ +		if ((PHYSICAL_START > MEMORY_START) && +			overlaps_interrupt_vector_text(vaddr, vaddr + step)) +				tprot &= ~HPTE_R_N; + +		hash = hpt_hash(vpn, shift, ssize);  		hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);  		BUG_ON(!ppc_md.hpte_insert); -		ret = ppc_md.hpte_insert(hpteg, va, paddr, tprot, -					 HPTE_V_BOLTED, psize, ssize); +		ret = ppc_md.hpte_insert(hpteg, vpn, paddr, tprot, +					 HPTE_V_BOLTED, psize, psize, ssize);  		if (ret < 0)  			break; @@ -243,26 +269,25 @@ static int __init htab_dt_scan_seg_sizes(unsigned long node,  					 const char *uname, int depth,  					 void *data)  { -	char *type = of_get_flat_dt_prop(node, "device_type", NULL); -	u32 *prop; -	unsigned long size = 0; +	const char *type = of_get_flat_dt_prop(node, "device_type", NULL); +	const __be32 *prop; +	int size = 0;  	/* We are scanning "cpu" nodes only */  	if (type == NULL || strcmp(type, "cpu") != 0)  		return 0; -	prop = (u32 *)of_get_flat_dt_prop(node, "ibm,processor-segment-sizes", -					  &size); +	prop = of_get_flat_dt_prop(node, "ibm,processor-segment-sizes", &size);  	if (prop == NULL)  		return 0;  	for (; size >= 4; size -= 4, ++prop) { -		if (prop[0] == 40) { +		if (be32_to_cpu(prop[0]) == 40) {  			DBG("1T segment support detected\n"); -			cur_cpu_spec->cpu_features |= CPU_FTR_1T_SEGMENT; +			cur_cpu_spec->mmu_features |= MMU_FTR_1T_SEGMENT;  			return 1;  		}  	} -	cur_cpu_spec->cpu_features &= ~CPU_FTR_NO_SLBIE_B; +	cur_cpu_spec->mmu_features &= ~MMU_FTR_NO_SLBIE_B;  	return 0;  } @@ -271,79 +296,103 @@ static void __init htab_init_seg_sizes(void)  	of_scan_flat_dt(htab_dt_scan_seg_sizes, NULL);  } +static int __init get_idx_from_shift(unsigned int shift) +{ +	int idx = -1; + +	switch (shift) { +	case 0xc: +		idx = MMU_PAGE_4K; +		break; +	case 0x10: +		idx = MMU_PAGE_64K; +		break; +	case 0x14: +		idx = MMU_PAGE_1M; +		break; +	case 0x18: +		idx = MMU_PAGE_16M; +		break; +	case 0x22: +		idx = MMU_PAGE_16G; +		break; +	} +	return idx; +} +  static int __init htab_dt_scan_page_sizes(unsigned long node,  					  const char *uname, int depth,  					  void *data)  { -	char *type = of_get_flat_dt_prop(node, "device_type", NULL); -	u32 *prop; -	unsigned long size = 0; +	const char *type = of_get_flat_dt_prop(node, "device_type", NULL); +	const __be32 *prop; +	int size = 0;  	/* We are scanning "cpu" nodes only */  	if (type == NULL || strcmp(type, "cpu") != 0)  		return 0; -	prop = (u32 *)of_get_flat_dt_prop(node, -					  "ibm,segment-page-sizes", &size); +	prop = of_get_flat_dt_prop(node, "ibm,segment-page-sizes", &size);  	if (prop != NULL) { -		DBG("Page sizes from device-tree:\n"); +		pr_info("Page sizes from device-tree:\n");  		size /= 4; -		cur_cpu_spec->cpu_features &= ~(CPU_FTR_16M_PAGE); +		cur_cpu_spec->mmu_features &= ~(MMU_FTR_16M_PAGE);  		while(size > 0) { -			unsigned int shift = prop[0]; -			unsigned int slbenc = prop[1]; -			unsigned int lpnum = prop[2]; -			unsigned int lpenc = 0; +			unsigned int base_shift = be32_to_cpu(prop[0]); +			unsigned int slbenc = be32_to_cpu(prop[1]); +			unsigned int lpnum = be32_to_cpu(prop[2]);  			struct mmu_psize_def *def; -			int idx = -1; +			int idx, base_idx;  			size -= 3; prop += 3; -			while(size > 0 && lpnum) { -				if (prop[0] == shift) -					lpenc = prop[1]; -				prop += 2; size -= 2; -				lpnum--; -			} -			switch(shift) { -			case 0xc: -				idx = MMU_PAGE_4K; -				break; -			case 0x10: -				idx = MMU_PAGE_64K; -				break; -			case 0x14: -				idx = MMU_PAGE_1M; -				break; -			case 0x18: -				idx = MMU_PAGE_16M; -				cur_cpu_spec->cpu_features |= CPU_FTR_16M_PAGE; -				break; -			case 0x22: -				idx = MMU_PAGE_16G; -				break; -			} -			if (idx < 0) +			base_idx = get_idx_from_shift(base_shift); +			if (base_idx < 0) { +				/* +				 * skip the pte encoding also +				 */ +				prop += lpnum * 2; size -= lpnum * 2;  				continue; -			def = &mmu_psize_defs[idx]; -			def->shift = shift; -			if (shift <= 23) +			} +			def = &mmu_psize_defs[base_idx]; +			if (base_idx == MMU_PAGE_16M) +				cur_cpu_spec->mmu_features |= MMU_FTR_16M_PAGE; + +			def->shift = base_shift; +			if (base_shift <= 23)  				def->avpnm = 0;  			else -				def->avpnm = (1 << (shift - 23)) - 1; +				def->avpnm = (1 << (base_shift - 23)) - 1;  			def->sllp = slbenc; -			def->penc = lpenc; -			/* We don't know for sure what's up with tlbiel, so +			/* +			 * We don't know for sure what's up with tlbiel, so  			 * for now we only set it for 4K and 64K pages  			 */ -			if (idx == MMU_PAGE_4K || idx == MMU_PAGE_64K) +			if (base_idx == MMU_PAGE_4K || base_idx == MMU_PAGE_64K)  				def->tlbiel = 1;  			else  				def->tlbiel = 0; -			DBG(" %d: shift=%02x, sllp=%04lx, avpnm=%08lx, " -			    "tlbiel=%d, penc=%d\n", -			    idx, shift, def->sllp, def->avpnm, def->tlbiel, -			    def->penc); +			while (size > 0 && lpnum) { +				unsigned int shift = be32_to_cpu(prop[0]); +				int penc  = be32_to_cpu(prop[1]); + +				prop += 2; size -= 2; +				lpnum--; + +				idx = get_idx_from_shift(shift); +				if (idx < 0) +					continue; + +				if (penc == -1) +					pr_err("Invalid penc for base_shift=%d " +					       "shift=%d\n", base_shift, shift); + +				def->penc[idx] = penc; +				pr_info("base_shift=%d: shift=%d, sllp=0x%04lx," +					" avpnm=0x%08lx, tlbiel=%d, penc=%d\n", +					base_shift, shift, def->sllp, +					def->avpnm, def->tlbiel, def->penc[idx]); +			}  		}  		return 1;  	} @@ -357,9 +406,9 @@ static int __init htab_dt_scan_page_sizes(unsigned long node,  static int __init htab_dt_scan_hugepage_blocks(unsigned long node,  					const char *uname, int depth,  					void *data) { -	char *type = of_get_flat_dt_prop(node, "device_type", NULL); -	unsigned long *addr_prop; -	u32 *page_count_prop; +	const char *type = of_get_flat_dt_prop(node, "device_type", NULL); +	const __be64 *addr_prop; +	const __be32 *page_count_prop;  	unsigned int expected_pages;  	long unsigned int phys_addr;  	long unsigned int block_size; @@ -373,12 +422,12 @@ static int __init htab_dt_scan_hugepage_blocks(unsigned long node,  	page_count_prop = of_get_flat_dt_prop(node, "ibm,expected#pages", NULL);  	if (page_count_prop == NULL)  		return 0; -	expected_pages = (1 << page_count_prop[0]); +	expected_pages = (1 << be32_to_cpu(page_count_prop[0]));  	addr_prop = of_get_flat_dt_prop(node, "reg", NULL);  	if (addr_prop == NULL)  		return 0; -	phys_addr = addr_prop[0]; -	block_size = addr_prop[1]; +	phys_addr = be64_to_cpu(addr_prop[0]); +	block_size = be64_to_cpu(addr_prop[1]);  	if (block_size != (16 * GB))  		return 0;  	printk(KERN_INFO "Huge page(16GB) memory: " @@ -392,10 +441,39 @@ static int __init htab_dt_scan_hugepage_blocks(unsigned long node,  }  #endif /* CONFIG_HUGETLB_PAGE */ +static void mmu_psize_set_default_penc(void) +{ +	int bpsize, apsize; +	for (bpsize = 0; bpsize < MMU_PAGE_COUNT; bpsize++) +		for (apsize = 0; apsize < MMU_PAGE_COUNT; apsize++) +			mmu_psize_defs[bpsize].penc[apsize] = -1; +} + +#ifdef CONFIG_PPC_64K_PAGES + +static bool might_have_hea(void) +{ +	/* +	 * The HEA ethernet adapter requires awareness of the +	 * GX bus. Without that awareness we can easily assume +	 * we will never see an HEA ethernet device. +	 */ +#ifdef CONFIG_IBMEBUS +	return !cpu_has_feature(CPU_FTR_ARCH_207S); +#else +	return false; +#endif +} + +#endif /* #ifdef CONFIG_PPC_64K_PAGES */ +  static void __init htab_init_page_sizes(void)  {  	int rc; +	/* se the invalid penc to -1 */ +	mmu_psize_set_default_penc(); +  	/* Default to 4K pages only */  	memcpy(mmu_psize_defs, mmu_psize_defaults_old,  	       sizeof(mmu_psize_defaults_old)); @@ -411,7 +489,7 @@ static void __init htab_init_page_sizes(void)  	 * Not in the device-tree, let's fallback on known size  	 * list for 16M capable GP & GR  	 */ -	if (cpu_has_feature(CPU_FTR_16M_PAGE)) +	if (mmu_has_feature(MMU_FTR_16M_PAGE))  		memcpy(mmu_psize_defs, mmu_psize_defaults_gp,  		       sizeof(mmu_psize_defaults_gp));   found: @@ -441,12 +519,13 @@ static void __init htab_init_page_sizes(void)  		mmu_vmalloc_psize = MMU_PAGE_64K;  		if (mmu_linear_psize == MMU_PAGE_4K)  			mmu_linear_psize = MMU_PAGE_64K; -		if (cpu_has_feature(CPU_FTR_CI_LARGE_PAGE)) { +		if (mmu_has_feature(MMU_FTR_CI_LARGE_PAGE)) {  			/* -			 * Don't use 64k pages for ioremap on pSeries, since -			 * that would stop us accessing the HEA ethernet. +			 * When running on pSeries using 64k pages for ioremap +			 * would stop us accessing the HEA ethernet. So if we +			 * have the chance of ever seeing one, stay at 4k.  			 */ -			if (!machine_is(pseries)) +			if (!might_have_hea() || !machine_is(pseries))  				mmu_io_psize = MMU_PAGE_64K;  		} else  			mmu_ci_restrictions = 1; @@ -490,17 +569,17 @@ static int __init htab_dt_scan_pftsize(unsigned long node,  				       const char *uname, int depth,  				       void *data)  { -	char *type = of_get_flat_dt_prop(node, "device_type", NULL); -	u32 *prop; +	const char *type = of_get_flat_dt_prop(node, "device_type", NULL); +	const __be32 *prop;  	/* We are scanning "cpu" nodes only */  	if (type == NULL || strcmp(type, "cpu") != 0)  		return 0; -	prop = (u32 *)of_get_flat_dt_prop(node, "ibm,pft-size", NULL); +	prop = of_get_flat_dt_prop(node, "ibm,pft-size", NULL);  	if (prop != NULL) {  		/* pft_size[0] is the NUMA CEC cookie */ -		ppc64_pft_size = prop[1]; +		ppc64_pft_size = be32_to_cpu(prop[1]);  		return 1;  	}  	return 0; @@ -533,11 +612,11 @@ static unsigned long __init htab_get_table_size(void)  }  #ifdef CONFIG_MEMORY_HOTPLUG -void create_section_mapping(unsigned long start, unsigned long end) +int create_section_mapping(unsigned long start, unsigned long end)  { -	BUG_ON(htab_bolt_mapping(start, end, __pa(start), +	return htab_bolt_mapping(start, end, __pa(start),  				 pgprot_val(PAGE_KERNEL), mmu_linear_psize, -				 mmu_kernel_ssize)); +				 mmu_kernel_ssize);  }  int remove_section_mapping(unsigned long start, unsigned long end) @@ -547,39 +626,44 @@ int remove_section_mapping(unsigned long start, unsigned long end)  }  #endif /* CONFIG_MEMORY_HOTPLUG */ -static inline void make_bl(unsigned int *insn_addr, void *func) -{ -	unsigned long funcp = *((unsigned long *)func); -	int offset = funcp - (unsigned long)insn_addr; - -	*insn_addr = (unsigned int)(0x48000001 | (offset & 0x03fffffc)); -	flush_icache_range((unsigned long)insn_addr, 4+ -			   (unsigned long)insn_addr); -} +extern u32 htab_call_hpte_insert1[]; +extern u32 htab_call_hpte_insert2[]; +extern u32 htab_call_hpte_remove[]; +extern u32 htab_call_hpte_updatepp[]; +extern u32 ht64_call_hpte_insert1[]; +extern u32 ht64_call_hpte_insert2[]; +extern u32 ht64_call_hpte_remove[]; +extern u32 ht64_call_hpte_updatepp[];  static void __init htab_finish_init(void)  { -	extern unsigned int *htab_call_hpte_insert1; -	extern unsigned int *htab_call_hpte_insert2; -	extern unsigned int *htab_call_hpte_remove; -	extern unsigned int *htab_call_hpte_updatepp; -  #ifdef CONFIG_PPC_HAS_HASH_64K -	extern unsigned int *ht64_call_hpte_insert1; -	extern unsigned int *ht64_call_hpte_insert2; -	extern unsigned int *ht64_call_hpte_remove; -	extern unsigned int *ht64_call_hpte_updatepp; - -	make_bl(ht64_call_hpte_insert1, ppc_md.hpte_insert); -	make_bl(ht64_call_hpte_insert2, ppc_md.hpte_insert); -	make_bl(ht64_call_hpte_remove, ppc_md.hpte_remove); -	make_bl(ht64_call_hpte_updatepp, ppc_md.hpte_updatepp); +	patch_branch(ht64_call_hpte_insert1, +		ppc_function_entry(ppc_md.hpte_insert), +		BRANCH_SET_LINK); +	patch_branch(ht64_call_hpte_insert2, +		ppc_function_entry(ppc_md.hpte_insert), +		BRANCH_SET_LINK); +	patch_branch(ht64_call_hpte_remove, +		ppc_function_entry(ppc_md.hpte_remove), +		BRANCH_SET_LINK); +	patch_branch(ht64_call_hpte_updatepp, +		ppc_function_entry(ppc_md.hpte_updatepp), +		BRANCH_SET_LINK);  #endif /* CONFIG_PPC_HAS_HASH_64K */ -	make_bl(htab_call_hpte_insert1, ppc_md.hpte_insert); -	make_bl(htab_call_hpte_insert2, ppc_md.hpte_insert); -	make_bl(htab_call_hpte_remove, ppc_md.hpte_remove); -	make_bl(htab_call_hpte_updatepp, ppc_md.hpte_updatepp); +	patch_branch(htab_call_hpte_insert1, +		ppc_function_entry(ppc_md.hpte_insert), +		BRANCH_SET_LINK); +	patch_branch(htab_call_hpte_insert2, +		ppc_function_entry(ppc_md.hpte_insert), +		BRANCH_SET_LINK); +	patch_branch(htab_call_hpte_remove, +		ppc_function_entry(ppc_md.hpte_remove), +		BRANCH_SET_LINK); +	patch_branch(htab_call_hpte_updatepp, +		ppc_function_entry(ppc_md.hpte_updatepp), +		BRANCH_SET_LINK);  }  static void __init htab_initialize(void) @@ -598,7 +682,7 @@ static void __init htab_initialize(void)  	/* Initialize page sizes */  	htab_init_page_sizes(); -	if (cpu_has_feature(CPU_FTR_1T_SEGMENT)) { +	if (mmu_has_feature(MMU_FTR_1T_SEGMENT)) {  		mmu_kernel_ssize = MMU_SEGSIZE_1T;  		mmu_highuser_ssize = MMU_SEGSIZE_1T;  		printk(KERN_INFO "Using 1TB segments\n"); @@ -617,6 +701,16 @@ static void __init htab_initialize(void)  		/* Using a hypervisor which owns the htab */  		htab_address = NULL;  		_SDR1 = 0;  +#ifdef CONFIG_FA_DUMP +		/* +		 * If firmware assisted dump is active firmware preserves +		 * the contents of htab along with entire partition memory. +		 * Clear the htab if firmware assisted dump is active so +		 * that we dont end up using old mappings. +		 */ +		if (is_fadump_active() && ppc_md.hpte_clear_all) +			ppc_md.hpte_clear_all(); +#endif  	} else {  		/* Find storage for the HPT.  Must be contiguous in  		 * the absolute address space. On cell we want it to be @@ -632,7 +726,7 @@ static void __init htab_initialize(void)  		DBG("Hash table allocated at %lx, size: %lx\n", table,  		    htab_size_bytes); -		htab_address = abs_to_virt(table); +		htab_address = __va(table);  		/* htab absolute addr + encoded htabsize */  		_SDR1 = table + __ilog2(pteg_count) - 11; @@ -737,26 +831,24 @@ void __init early_init_mmu(void)  	 */  	htab_initialize(); -	/* Initialize stab / SLB management except on iSeries -	 */ -	if (cpu_has_feature(CPU_FTR_SLB)) +	/* Initialize stab / SLB management */ +	if (mmu_has_feature(MMU_FTR_SLB))  		slb_initialize(); -	else if (!firmware_has_feature(FW_FEATURE_ISERIES)) +	else  		stab_initialize(get_paca()->stab_real);  }  #ifdef CONFIG_SMP -void __cpuinit early_init_mmu_secondary(void) +void early_init_mmu_secondary(void)  {  	/* Initialize hash table for that CPU */  	if (!firmware_has_feature(FW_FEATURE_LPAR))  		mtspr(SPRN_SDR1, _SDR1);  	/* Initialize STAB/SLB. We use a virtual address as it works -	 * in real mode on pSeries and we want a virutal address on -	 * iSeries anyway +	 * in real mode on pSeries.  	 */ -	if (cpu_has_feature(CPU_FTR_SLB)) +	if (mmu_has_feature(MMU_FTR_SLB))  		slb_initialize();  	else  		stab_initialize(get_paca()->stab_addr); @@ -789,16 +881,19 @@ unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap)  #ifdef CONFIG_PPC_MM_SLICES  unsigned int get_paca_psize(unsigned long addr)  { -	unsigned long index, slices; +	u64 lpsizes; +	unsigned char *hpsizes; +	unsigned long index, mask_index;  	if (addr < SLICE_LOW_TOP) { -		slices = get_paca()->context.low_slices_psize; +		lpsizes = get_paca()->context.low_slices_psize;  		index = GET_LOW_SLICE_INDEX(addr); -	} else { -		slices = get_paca()->context.high_slices_psize; -		index = GET_HIGH_SLICE_INDEX(addr); +		return (lpsizes >> (index * 4)) & 0xF;  	} -	return (slices >> (index * 4)) & 0xF; +	hpsizes = get_paca()->context.high_slices_psize; +	index = GET_HIGH_SLICE_INDEX(addr); +	mask_index = index & 0x1; +	return (hpsizes[index >> 1] >> (mask_index * 4)) & 0xF;  }  #else @@ -844,7 +939,7 @@ static int subpage_protection(struct mm_struct *mm, unsigned long ea)  	if (ea >= spt->maxaddr)  		return 0; -	if (ea < 0x100000000) { +	if (ea < 0x100000000UL) {  		/* addresses below 4GB use spt->low_prot */  		sbpm = spt->low_prot;  	} else { @@ -874,14 +969,30 @@ static inline int subpage_protection(struct mm_struct *mm, unsigned long ea)  void hash_failure_debug(unsigned long ea, unsigned long access,  			unsigned long vsid, unsigned long trap, -			int ssize, int psize, unsigned long pte) +			int ssize, int psize, int lpsize, unsigned long pte)  {  	if (!printk_ratelimit())  		return;  	pr_info("mm: Hashing failure ! EA=0x%lx access=0x%lx current=%s\n",  		ea, access, current->comm); -	pr_info("    trap=0x%lx vsid=0x%lx ssize=%d psize=%d pte=0x%lx\n", -		trap, vsid, ssize, psize, pte); +	pr_info("    trap=0x%lx vsid=0x%lx ssize=%d base psize=%d psize %d pte=0x%lx\n", +		trap, vsid, ssize, psize, lpsize, pte); +} + +static void check_paca_psize(unsigned long ea, struct mm_struct *mm, +			     int psize, bool user_region) +{ +	if (user_region) { +		if (psize != get_paca_psize(ea)) { +			get_paca()->context = mm->context; +			slb_flush_and_rebolt(); +		} +	} else if (get_paca()->vmalloc_sllp != +		   mmu_psize_defs[mmu_vmalloc_psize].sllp) { +		get_paca()->vmalloc_sllp = +			mmu_psize_defs[mmu_vmalloc_psize].sllp; +		slb_vmalloc_update(); +	}  }  /* Result code is: @@ -892,6 +1003,7 @@ void hash_failure_debug(unsigned long ea, unsigned long access,   */  int hash_page(unsigned long ea, unsigned long access, unsigned long trap)  { +	enum ctx_state prev_state = exception_enter();  	pgd_t *pgdir;  	unsigned long vsid;  	struct mm_struct *mm; @@ -904,11 +1016,6 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)  	DBG_LOW("hash_page(ea=%016lx, access=%lx, trap=%lx\n",  		ea, access, trap); -	if ((ea & ~REGION_MASK) >= PGTABLE_RANGE) { -		DBG_LOW(" out of pgtable range !\n"); - 		return 1; -	} -  	/* Get region & vsid */   	switch (REGION_ID(ea)) {  	case USER_REGION_ID: @@ -916,7 +1023,8 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)  		mm = current->mm;  		if (! mm) {  			DBG_LOW(" user region with no mm !\n"); -			return 1; +			rc = 1; +			goto bail;  		}  		psize = get_slice_psize(mm, ea);  		ssize = user_segment_size(ea); @@ -935,14 +1043,23 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)  		/* Not a valid range  		 * Send the problem up to do_page_fault   		 */ -		return 1; +		rc = 1; +		goto bail;  	}  	DBG_LOW(" mm=%p, mm->pgdir=%p, vsid=%016lx\n", mm, mm->pgd, vsid); +	/* Bad address. */ +	if (!vsid) { +		DBG_LOW("Bad address!\n"); +		rc = 1; +		goto bail; +	}  	/* Get pgdir */  	pgdir = mm->pgd; -	if (pgdir == NULL) -		return 1; +	if (pgdir == NULL) { +		rc = 1; +		goto bail; +	}  	/* Check CPU locality */  	tmp = cpumask_of(smp_processor_id()); @@ -965,7 +1082,8 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)  	ptep = find_linux_pte_or_hugepte(pgdir, ea, &hugeshift);  	if (ptep == NULL || !pte_present(*ptep)) {  		DBG_LOW(" no PTE !\n"); -		return 1; +		rc = 1; +		goto bail;  	}  	/* Add _PAGE_PRESENT to the required access perm */ @@ -976,14 +1094,32 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)  	 */  	if (access & ~pte_val(*ptep)) {  		DBG_LOW(" no access !\n"); -		return 1; +		rc = 1; +		goto bail;  	} +	if (hugeshift) { +		if (pmd_trans_huge(*(pmd_t *)ptep)) +			rc = __hash_page_thp(ea, access, vsid, (pmd_t *)ptep, +					     trap, local, ssize, psize);  #ifdef CONFIG_HUGETLB_PAGE -	if (hugeshift) -		return __hash_page_huge(ea, access, vsid, ptep, trap, local, -					ssize, hugeshift, psize); -#endif /* CONFIG_HUGETLB_PAGE */ +		else +			rc = __hash_page_huge(ea, access, vsid, ptep, trap, +					      local, ssize, hugeshift, psize); +#else +		else { +			/* +			 * if we have hugeshift, and is not transhuge with +			 * hugetlb disabled, something is really wrong. +			 */ +			rc = 1; +			WARN_ON(1); +		} +#endif +		check_paca_psize(ea, mm, psize, user_region); + +		goto bail; +	}  #ifndef CONFIG_PPC_64K_PAGES  	DBG_LOW(" i-pte: %016lx\n", pte_val(*ptep)); @@ -1022,17 +1158,8 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)  #endif  		}  	} -	if (user_region) { -		if (psize != get_paca_psize(ea)) { -			get_paca()->context = mm->context; -			slb_flush_and_rebolt(); -		} -	} else if (get_paca()->vmalloc_sllp != -		   mmu_psize_defs[mmu_vmalloc_psize].sllp) { -		get_paca()->vmalloc_sllp = -			mmu_psize_defs[mmu_vmalloc_psize].sllp; -		slb_vmalloc_update(); -	} + +	check_paca_psize(ea, mm, psize, user_region);  #endif /* CONFIG_PPC_64K_PAGES */  #ifdef CONFIG_PPC_HAS_HASH_64K @@ -1054,7 +1181,7 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)  	 */  	if (rc == -1)  		hash_failure_debug(ea, access, vsid, trap, ssize, psize, -				   pte_val(*ptep)); +				   psize, pte_val(*ptep));  #ifndef CONFIG_PPC_64K_PAGES  	DBG_LOW(" o-pte: %016lx\n", pte_val(*ptep));  #else @@ -1062,6 +1189,9 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)  		pte_val(*(ptep + PTRS_PER_PTE)));  #endif  	DBG_LOW(" -> rc=%d\n", rc); + +bail: +	exception_exit(prev_state);  	return rc;  }  EXPORT_SYMBOL_GPL(hash_page); @@ -1069,8 +1199,9 @@ EXPORT_SYMBOL_GPL(hash_page);  void hash_preload(struct mm_struct *mm, unsigned long ea,  		  unsigned long access, unsigned long trap)  { +	int hugepage_shift;  	unsigned long vsid; -	void *pgdir; +	pgd_t *pgdir;  	pte_t *ptep;  	unsigned long flags;  	int rc, ssize, local = 0; @@ -1090,10 +1221,27 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,  	pgdir = mm->pgd;  	if (pgdir == NULL)  		return; -	ptep = find_linux_pte(pgdir, ea); -	if (!ptep) + +	/* Get VSID */ +	ssize = user_segment_size(ea); +	vsid = get_vsid(mm->context.id, ea, ssize); +	if (!vsid)  		return; +	/* +	 * Hash doesn't like irqs. Walking linux page table with irq disabled +	 * saves us from holding multiple locks. +	 */ +	local_irq_save(flags); + +	/* +	 * THP pages use update_mmu_cache_pmd. We don't do +	 * hash preload there. Hence can ignore THP here +	 */ +	ptep = find_linux_pte_or_hugepte(pgdir, ea, &hugepage_shift); +	if (!ptep) +		goto out_exit; +	WARN_ON(hugepage_shift);  #ifdef CONFIG_PPC_64K_PAGES  	/* If either _PAGE_4K_PFN or _PAGE_NO_CACHE is set (and we are on  	 * a 64K kernel), then we don't preload, hash_page() will take @@ -1102,16 +1250,9 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,  	 * page size demotion here  	 */  	if (pte_val(*ptep) & (_PAGE_4K_PFN | _PAGE_NO_CACHE)) -		return; +		goto out_exit;  #endif /* CONFIG_PPC_64K_PAGES */ -	/* Get VSID */ -	ssize = user_segment_size(ea); -	vsid = get_vsid(mm->context.id, ea, ssize); - -	/* Hash doesn't like irqs */ -	local_irq_save(flags); -  	/* Is that local to this CPU ? */  	if (cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))  		local = 1; @@ -1123,37 +1264,59 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,  	else  #endif /* CONFIG_PPC_HAS_HASH_64K */  		rc = __hash_page_4K(ea, access, vsid, ptep, trap, local, ssize, -				    subpage_protection(pgdir, ea)); +				    subpage_protection(mm, ea));  	/* Dump some info in case of hash insertion failure, they should  	 * never happen so it is really useful to know if/when they do  	 */  	if (rc == -1)  		hash_failure_debug(ea, access, vsid, trap, ssize, -				   mm->context.user_psize, pte_val(*ptep)); - +				   mm->context.user_psize, +				   mm->context.user_psize, +				   pte_val(*ptep)); +out_exit:  	local_irq_restore(flags);  }  /* WARNING: This is called from hash_low_64.S, if you change this prototype,   *          do not forget to update the assembly call site !   */ -void flush_hash_page(unsigned long va, real_pte_t pte, int psize, int ssize, +void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize, int ssize,  		     int local)  {  	unsigned long hash, index, shift, hidx, slot; -	DBG_LOW("flush_hash_page(va=%016lx)\n", va); -	pte_iterate_hashed_subpages(pte, psize, va, index, shift) { -		hash = hpt_hash(va, shift, ssize); +	DBG_LOW("flush_hash_page(vpn=%016lx)\n", vpn); +	pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) { +		hash = hpt_hash(vpn, shift, ssize);  		hidx = __rpte_to_hidx(pte, index);  		if (hidx & _PTEIDX_SECONDARY)  			hash = ~hash;  		slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;  		slot += hidx & _PTEIDX_GROUP_IX;  		DBG_LOW(" sub %ld: hash=%lx, hidx=%lx\n", index, slot, hidx); -		ppc_md.hpte_invalidate(slot, va, psize, ssize, local); +		/* +		 * We use same base page size and actual psize, because we don't +		 * use these functions for hugepage +		 */ +		ppc_md.hpte_invalidate(slot, vpn, psize, psize, ssize, local);  	} pte_iterate_hashed_end(); + +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM +	/* Transactions are not aborted by tlbiel, only tlbie. +	 * Without, syncing a page back to a block device w/ PIO could pick up +	 * transactional data (bad!) so we force an abort here.  Before the +	 * sync the page will be made read-only, which will flush_hash_page. +	 * BIG ISSUE here: if the kernel uses a page from userspace without +	 * unmapping it first, it may see the speculated version. +	 */ +	if (local && cpu_has_feature(CPU_FTR_TM) && +	    current->thread.regs && +	    MSR_TM_ACTIVE(current->thread.regs->msr)) { +		tm_enable(); +		tm_abort(TM_CAUSE_TLBI); +	} +#endif  }  void flush_hash_range(unsigned long number, int local) @@ -1166,7 +1329,7 @@ void flush_hash_range(unsigned long number, int local)  			&__get_cpu_var(ppc64_tlb_batch);  		for (i = 0; i < number; i++) -			flush_hash_page(batch->vaddr[i], batch->pte[i], +			flush_hash_page(batch->vpn[i], batch->pte[i],  					batch->psize, batch->ssize, local);  	}  } @@ -1177,6 +1340,8 @@ void flush_hash_range(unsigned long number, int local)   */  void low_hash_fault(struct pt_regs *regs, unsigned long address, int rc)  { +	enum ctx_state prev_state = exception_enter(); +  	if (user_mode(regs)) {  #ifdef CONFIG_PPC_SUBPAGE_PROT  		if (rc == -2) @@ -1186,23 +1351,64 @@ void low_hash_fault(struct pt_regs *regs, unsigned long address, int rc)  			_exception(SIGBUS, regs, BUS_ADRERR, address);  	} else  		bad_page_fault(regs, address, SIGBUS); + +	exception_exit(prev_state); +} + +long hpte_insert_repeating(unsigned long hash, unsigned long vpn, +			   unsigned long pa, unsigned long rflags, +			   unsigned long vflags, int psize, int ssize) +{ +	unsigned long hpte_group; +	long slot; + +repeat: +	hpte_group = ((hash & htab_hash_mask) * +		       HPTES_PER_GROUP) & ~0x7UL; + +	/* Insert into the hash table, primary slot */ +	slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags, vflags, +				  psize, psize, ssize); + +	/* Primary is full, try the secondary */ +	if (unlikely(slot == -1)) { +		hpte_group = ((~hash & htab_hash_mask) * +			      HPTES_PER_GROUP) & ~0x7UL; +		slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags, +					  vflags | HPTE_V_SECONDARY, +					  psize, psize, ssize); +		if (slot == -1) { +			if (mftb() & 0x1) +				hpte_group = ((hash & htab_hash_mask) * +					      HPTES_PER_GROUP)&~0x7UL; + +			ppc_md.hpte_remove(hpte_group); +			goto repeat; +		} +	} + +	return slot;  }  #ifdef CONFIG_DEBUG_PAGEALLOC  static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi)  { -	unsigned long hash, hpteg; +	unsigned long hash;  	unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize); -	unsigned long va = hpt_va(vaddr, vsid, mmu_kernel_ssize); +	unsigned long vpn = hpt_vpn(vaddr, vsid, mmu_kernel_ssize);  	unsigned long mode = htab_convert_pte_flags(PAGE_KERNEL); -	int ret; +	long ret; + +	hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize); + +	/* Don't create HPTE entries for bad address */ +	if (!vsid) +		return; -	hash = hpt_hash(va, PAGE_SHIFT, mmu_kernel_ssize); -	hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); +	ret = hpte_insert_repeating(hash, vpn, __pa(vaddr), mode, +				    HPTE_V_BOLTED, +				    mmu_linear_psize, mmu_kernel_ssize); -	ret = ppc_md.hpte_insert(hpteg, va, __pa(vaddr), -				 mode, HPTE_V_BOLTED, -				 mmu_linear_psize, mmu_kernel_ssize);  	BUG_ON (ret < 0);  	spin_lock(&linear_map_hash_lock);  	BUG_ON(linear_map_hash_slots[lmi] & 0x80); @@ -1214,9 +1420,9 @@ static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi)  {  	unsigned long hash, hidx, slot;  	unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize); -	unsigned long va = hpt_va(vaddr, vsid, mmu_kernel_ssize); +	unsigned long vpn = hpt_vpn(vaddr, vsid, mmu_kernel_ssize); -	hash = hpt_hash(va, PAGE_SHIFT, mmu_kernel_ssize); +	hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize);  	spin_lock(&linear_map_hash_lock);  	BUG_ON(!(linear_map_hash_slots[lmi] & 0x80));  	hidx = linear_map_hash_slots[lmi] & 0x7f; @@ -1226,7 +1432,8 @@ static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi)  		hash = ~hash;  	slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;  	slot += hidx & _PTEIDX_GROUP_IX; -	ppc_md.hpte_invalidate(slot, va, mmu_linear_psize, mmu_kernel_ssize, 0); +	ppc_md.hpte_invalidate(slot, vpn, mmu_linear_psize, mmu_linear_psize, +			       mmu_kernel_ssize, 0);  }  void kernel_map_pages(struct page *page, int numpages, int enable)  | 
