diff options
Diffstat (limited to 'arch/powerpc/include/asm/pte-hash64-64k.h')
| -rw-r--r-- | arch/powerpc/include/asm/pte-hash64-64k.h | 62 |
1 files changed, 14 insertions, 48 deletions
diff --git a/arch/powerpc/include/asm/pte-hash64-64k.h b/arch/powerpc/include/asm/pte-hash64-64k.h index e05d26fa372..d836d945068 100644 --- a/arch/powerpc/include/asm/pte-hash64-64k.h +++ b/arch/powerpc/include/asm/pte-hash64-64k.h @@ -22,8 +22,8 @@ #define _PAGE_HASHPTE _PAGE_HPTE_SUB /* Note the full page bits must be in the same location as for normal - * 4k pages as the same asssembly will be used to insert 64K pages - * wether the kernel has CONFIG_PPC_64K_PAGES or not + * 4k pages as the same assembly will be used to insert 64K pages + * whether the kernel has CONFIG_PPC_64K_PAGES or not */ #define _PAGE_F_SECOND 0x00008000 /* full page: hidx bits */ #define _PAGE_F_GIX 0x00007000 /* full page: hidx bits */ @@ -47,7 +47,8 @@ * generic accessors and iterators here */ #define __real_pte(e,p) ((real_pte_t) { \ - (e), pte_val(*((p) + PTRS_PER_PTE)) }) + (e), (pte_val(e) & _PAGE_COMBO) ? \ + (pte_val(*((p) + PTRS_PER_PTE))) : 0 }) #define __rpte_to_hidx(r,index) ((pte_val((r).pte) & _PAGE_COMBO) ? \ (((r).hidx >> ((index)<<2)) & 0xf) : ((pte_val((r).pte) >> 12) & 0xf)) #define __rpte_to_pte(r) ((r).pte) @@ -57,14 +58,16 @@ /* Trick: we set __end to va + 64k, which happens works for * a 16M page as well as we want only one iteration */ -#define pte_iterate_hashed_subpages(rpte, psize, va, index, shift) \ - do { \ - unsigned long __end = va + PAGE_SIZE; \ - unsigned __split = (psize == MMU_PAGE_4K || \ - psize == MMU_PAGE_64K_AP); \ - shift = mmu_psize_defs[psize].shift; \ - for (index = 0; va < __end; index++, va += (1L << shift)) { \ - if (!__split || __rpte_sub_valid(rpte, index)) do { \ +#define pte_iterate_hashed_subpages(rpte, psize, vpn, index, shift) \ + do { \ + unsigned long __end = vpn + (1UL << (PAGE_SHIFT - VPN_SHIFT)); \ + unsigned __split = (psize == MMU_PAGE_4K || \ + psize == MMU_PAGE_64K_AP); \ + shift = mmu_psize_defs[psize].shift; \ + for (index = 0; vpn < __end; index++, \ + vpn += (1L << (shift - VPN_SHIFT))) { \ + if (!__split || __rpte_sub_valid(rpte, index)) \ + do { #define pte_iterate_hashed_end() } while(0); } } while(0) @@ -75,41 +78,4 @@ remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE, \ __pgprot(pgprot_val((prot)) | _PAGE_4K_PFN)) - -#ifdef CONFIG_PPC_SUBPAGE_PROT -/* - * For the sub-page protection option, we extend the PGD with one of - * these. Basically we have a 3-level tree, with the top level being - * the protptrs array. To optimize speed and memory consumption when - * only addresses < 4GB are being protected, pointers to the first - * four pages of sub-page protection words are stored in the low_prot - * array. - * Each page of sub-page protection words protects 1GB (4 bytes - * protects 64k). For the 3-level tree, each page of pointers then - * protects 8TB. - */ -struct subpage_prot_table { - unsigned long maxaddr; /* only addresses < this are protected */ - unsigned int **protptrs[2]; - unsigned int *low_prot[4]; -}; - -#undef PGD_TABLE_SIZE -#define PGD_TABLE_SIZE ((sizeof(pgd_t) << PGD_INDEX_SIZE) + \ - sizeof(struct subpage_prot_table)) - -#define SBP_L1_BITS (PAGE_SHIFT - 2) -#define SBP_L2_BITS (PAGE_SHIFT - 3) -#define SBP_L1_COUNT (1 << SBP_L1_BITS) -#define SBP_L2_COUNT (1 << SBP_L2_BITS) -#define SBP_L2_SHIFT (PAGE_SHIFT + SBP_L1_BITS) -#define SBP_L3_SHIFT (SBP_L2_SHIFT + SBP_L2_BITS) - -extern void subpage_prot_free(pgd_t *pgd); - -static inline struct subpage_prot_table *pgd_subpage_prot(pgd_t *pgd) -{ - return (struct subpage_prot_table *)(pgd + PTRS_PER_PGD); -} -#endif /* CONFIG_PPC_SUBPAGE_PROT */ #endif /* __ASSEMBLY__ */ |
