diff options
Diffstat (limited to 'arch/powerpc/mm/hash_utils_64.c')
| -rw-r--r-- | arch/powerpc/mm/hash_utils_64.c | 167 |
1 files changed, 104 insertions, 63 deletions
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index bde8b558975..88fdd9d2507 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -169,9 +169,10 @@ static unsigned long htab_convert_pte_flags(unsigned long pteflags) if ((pteflags & _PAGE_USER) && !((pteflags & _PAGE_RW) && (pteflags & _PAGE_DIRTY))) rflags |= 1; - - /* Always add C */ - return rflags | HPTE_R_C; + /* + * Always add "C" bit for perf. Memory coherence is always enabled + */ + return rflags | HPTE_R_C | HPTE_R_M; } int htab_bolt_mapping(unsigned long vstart, unsigned long vend, @@ -206,6 +207,24 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend, if (overlaps_kernel_text(vaddr, vaddr + step)) tprot &= ~HPTE_R_N; + /* Make kvm guest trampolines executable */ + if (overlaps_kvm_tmp(vaddr, vaddr + step)) + tprot &= ~HPTE_R_N; + + /* + * If relocatable, check if it overlaps interrupt vectors that + * are copied down to real 0. For relocatable kernel + * (e.g. kdump case) we copy interrupt vectors down to real + * address 0. Mark that region as executable. This is + * because on p8 system with relocation on exception feature + * enabled, exceptions are raised with MMU (IR=DR=1) ON. Hence + * in order to execute the interrupt handlers in virtual + * mode the vector region need to be marked as executable. + */ + if ((PHYSICAL_START > MEMORY_START) && + overlaps_interrupt_vector_text(vaddr, vaddr + step)) + tprot &= ~HPTE_R_N; + hash = hpt_hash(vpn, shift, ssize); hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); @@ -250,20 +269,19 @@ static int __init htab_dt_scan_seg_sizes(unsigned long node, const char *uname, int depth, void *data) { - char *type = of_get_flat_dt_prop(node, "device_type", NULL); - u32 *prop; - unsigned long size = 0; + const char *type = of_get_flat_dt_prop(node, "device_type", NULL); + const __be32 *prop; + int size = 0; /* We are scanning "cpu" nodes only */ if (type == NULL || strcmp(type, "cpu") != 0) return 0; - prop = (u32 *)of_get_flat_dt_prop(node, "ibm,processor-segment-sizes", - &size); + prop = of_get_flat_dt_prop(node, "ibm,processor-segment-sizes", &size); if (prop == NULL) return 0; for (; size >= 4; size -= 4, ++prop) { - if (prop[0] == 40) { + if (be32_to_cpu(prop[0]) == 40) { DBG("1T segment support detected\n"); cur_cpu_spec->mmu_features |= MMU_FTR_1T_SEGMENT; return 1; @@ -306,24 +324,23 @@ static int __init htab_dt_scan_page_sizes(unsigned long node, const char *uname, int depth, void *data) { - char *type = of_get_flat_dt_prop(node, "device_type", NULL); - u32 *prop; - unsigned long size = 0; + const char *type = of_get_flat_dt_prop(node, "device_type", NULL); + const __be32 *prop; + int size = 0; /* We are scanning "cpu" nodes only */ if (type == NULL || strcmp(type, "cpu") != 0) return 0; - prop = (u32 *)of_get_flat_dt_prop(node, - "ibm,segment-page-sizes", &size); + prop = of_get_flat_dt_prop(node, "ibm,segment-page-sizes", &size); if (prop != NULL) { pr_info("Page sizes from device-tree:\n"); size /= 4; cur_cpu_spec->mmu_features &= ~(MMU_FTR_16M_PAGE); while(size > 0) { - unsigned int base_shift = prop[0]; - unsigned int slbenc = prop[1]; - unsigned int lpnum = prop[2]; + unsigned int base_shift = be32_to_cpu(prop[0]); + unsigned int slbenc = be32_to_cpu(prop[1]); + unsigned int lpnum = be32_to_cpu(prop[2]); struct mmu_psize_def *def; int idx, base_idx; @@ -356,8 +373,8 @@ static int __init htab_dt_scan_page_sizes(unsigned long node, def->tlbiel = 0; while (size > 0 && lpnum) { - unsigned int shift = prop[0]; - int penc = prop[1]; + unsigned int shift = be32_to_cpu(prop[0]); + int penc = be32_to_cpu(prop[1]); prop += 2; size -= 2; lpnum--; @@ -389,9 +406,9 @@ static int __init htab_dt_scan_page_sizes(unsigned long node, static int __init htab_dt_scan_hugepage_blocks(unsigned long node, const char *uname, int depth, void *data) { - char *type = of_get_flat_dt_prop(node, "device_type", NULL); - unsigned long *addr_prop; - u32 *page_count_prop; + const char *type = of_get_flat_dt_prop(node, "device_type", NULL); + const __be64 *addr_prop; + const __be32 *page_count_prop; unsigned int expected_pages; long unsigned int phys_addr; long unsigned int block_size; @@ -405,12 +422,12 @@ static int __init htab_dt_scan_hugepage_blocks(unsigned long node, page_count_prop = of_get_flat_dt_prop(node, "ibm,expected#pages", NULL); if (page_count_prop == NULL) return 0; - expected_pages = (1 << page_count_prop[0]); + expected_pages = (1 << be32_to_cpu(page_count_prop[0])); addr_prop = of_get_flat_dt_prop(node, "reg", NULL); if (addr_prop == NULL) return 0; - phys_addr = addr_prop[0]; - block_size = addr_prop[1]; + phys_addr = be64_to_cpu(addr_prop[0]); + block_size = be64_to_cpu(addr_prop[1]); if (block_size != (16 * GB)) return 0; printk(KERN_INFO "Huge page(16GB) memory: " @@ -432,6 +449,24 @@ static void mmu_psize_set_default_penc(void) mmu_psize_defs[bpsize].penc[apsize] = -1; } +#ifdef CONFIG_PPC_64K_PAGES + +static bool might_have_hea(void) +{ + /* + * The HEA ethernet adapter requires awareness of the + * GX bus. Without that awareness we can easily assume + * we will never see an HEA ethernet device. + */ +#ifdef CONFIG_IBMEBUS + return !cpu_has_feature(CPU_FTR_ARCH_207S); +#else + return false; +#endif +} + +#endif /* #ifdef CONFIG_PPC_64K_PAGES */ + static void __init htab_init_page_sizes(void) { int rc; @@ -486,10 +521,11 @@ static void __init htab_init_page_sizes(void) mmu_linear_psize = MMU_PAGE_64K; if (mmu_has_feature(MMU_FTR_CI_LARGE_PAGE)) { /* - * Don't use 64k pages for ioremap on pSeries, since - * that would stop us accessing the HEA ethernet. + * When running on pSeries using 64k pages for ioremap + * would stop us accessing the HEA ethernet. So if we + * have the chance of ever seeing one, stay at 4k. */ - if (!machine_is(pseries)) + if (!might_have_hea() || !machine_is(pseries)) mmu_io_psize = MMU_PAGE_64K; } else mmu_ci_restrictions = 1; @@ -533,17 +569,17 @@ static int __init htab_dt_scan_pftsize(unsigned long node, const char *uname, int depth, void *data) { - char *type = of_get_flat_dt_prop(node, "device_type", NULL); - u32 *prop; + const char *type = of_get_flat_dt_prop(node, "device_type", NULL); + const __be32 *prop; /* We are scanning "cpu" nodes only */ if (type == NULL || strcmp(type, "cpu") != 0) return 0; - prop = (u32 *)of_get_flat_dt_prop(node, "ibm,pft-size", NULL); + prop = of_get_flat_dt_prop(node, "ibm,pft-size", NULL); if (prop != NULL) { /* pft_size[0] is the NUMA CEC cookie */ - ppc64_pft_size = prop[1]; + ppc64_pft_size = be32_to_cpu(prop[1]); return 1; } return 0; @@ -590,47 +626,43 @@ int remove_section_mapping(unsigned long start, unsigned long end) } #endif /* CONFIG_MEMORY_HOTPLUG */ -#define FUNCTION_TEXT(A) ((*(unsigned long *)(A))) +extern u32 htab_call_hpte_insert1[]; +extern u32 htab_call_hpte_insert2[]; +extern u32 htab_call_hpte_remove[]; +extern u32 htab_call_hpte_updatepp[]; +extern u32 ht64_call_hpte_insert1[]; +extern u32 ht64_call_hpte_insert2[]; +extern u32 ht64_call_hpte_remove[]; +extern u32 ht64_call_hpte_updatepp[]; static void __init htab_finish_init(void) { - extern unsigned int *htab_call_hpte_insert1; - extern unsigned int *htab_call_hpte_insert2; - extern unsigned int *htab_call_hpte_remove; - extern unsigned int *htab_call_hpte_updatepp; - #ifdef CONFIG_PPC_HAS_HASH_64K - extern unsigned int *ht64_call_hpte_insert1; - extern unsigned int *ht64_call_hpte_insert2; - extern unsigned int *ht64_call_hpte_remove; - extern unsigned int *ht64_call_hpte_updatepp; - patch_branch(ht64_call_hpte_insert1, - FUNCTION_TEXT(ppc_md.hpte_insert), + ppc_function_entry(ppc_md.hpte_insert), BRANCH_SET_LINK); patch_branch(ht64_call_hpte_insert2, - FUNCTION_TEXT(ppc_md.hpte_insert), + ppc_function_entry(ppc_md.hpte_insert), BRANCH_SET_LINK); patch_branch(ht64_call_hpte_remove, - FUNCTION_TEXT(ppc_md.hpte_remove), + ppc_function_entry(ppc_md.hpte_remove), BRANCH_SET_LINK); patch_branch(ht64_call_hpte_updatepp, - FUNCTION_TEXT(ppc_md.hpte_updatepp), + ppc_function_entry(ppc_md.hpte_updatepp), BRANCH_SET_LINK); - #endif /* CONFIG_PPC_HAS_HASH_64K */ patch_branch(htab_call_hpte_insert1, - FUNCTION_TEXT(ppc_md.hpte_insert), + ppc_function_entry(ppc_md.hpte_insert), BRANCH_SET_LINK); patch_branch(htab_call_hpte_insert2, - FUNCTION_TEXT(ppc_md.hpte_insert), + ppc_function_entry(ppc_md.hpte_insert), BRANCH_SET_LINK); patch_branch(htab_call_hpte_remove, - FUNCTION_TEXT(ppc_md.hpte_remove), + ppc_function_entry(ppc_md.hpte_remove), BRANCH_SET_LINK); patch_branch(htab_call_hpte_updatepp, - FUNCTION_TEXT(ppc_md.hpte_updatepp), + ppc_function_entry(ppc_md.hpte_updatepp), BRANCH_SET_LINK); } @@ -947,6 +979,22 @@ void hash_failure_debug(unsigned long ea, unsigned long access, trap, vsid, ssize, psize, lpsize, pte); } +static void check_paca_psize(unsigned long ea, struct mm_struct *mm, + int psize, bool user_region) +{ + if (user_region) { + if (psize != get_paca_psize(ea)) { + get_paca()->context = mm->context; + slb_flush_and_rebolt(); + } + } else if (get_paca()->vmalloc_sllp != + mmu_psize_defs[mmu_vmalloc_psize].sllp) { + get_paca()->vmalloc_sllp = + mmu_psize_defs[mmu_vmalloc_psize].sllp; + slb_vmalloc_update(); + } +} + /* Result code is: * 0 - handled * 1 - normal page fault @@ -1068,6 +1116,8 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) WARN_ON(1); } #endif + check_paca_psize(ea, mm, psize, user_region); + goto bail; } @@ -1108,17 +1158,8 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) #endif } } - if (user_region) { - if (psize != get_paca_psize(ea)) { - get_paca()->context = mm->context; - slb_flush_and_rebolt(); - } - } else if (get_paca()->vmalloc_sllp != - mmu_psize_defs[mmu_vmalloc_psize].sllp) { - get_paca()->vmalloc_sllp = - mmu_psize_defs[mmu_vmalloc_psize].sllp; - slb_vmalloc_update(); - } + + check_paca_psize(ea, mm, psize, user_region); #endif /* CONFIG_PPC_64K_PAGES */ #ifdef CONFIG_PPC_HAS_HASH_64K |
