diff options
author | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2005-11-07 11:06:55 +1100 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2005-11-06 16:56:47 -0800 |
commit | 3c726f8dee6f55e96475574e9f645327e461884c (patch) | |
tree | f67c381e8f57959aa4a94bda4c68e24253cd8171 /include/asm-ppc64/pgalloc.h | |
parent | f912696ab330bf539231d1f8032320f2a08b850f (diff) |
[PATCH] ppc64: support 64k pages
Adds a new CONFIG_PPC_64K_PAGES which, when enabled, changes the kernel
base page size to 64K. The resulting kernel still boots on any
hardware. On current machines with 4K pages support only, the kernel
will maintain 16 "subpages" for each 64K page transparently.
Note that while real 64K capable HW has been tested, the current patch
will not enable it yet as such hardware is not released yet, and I'm
still verifying with the firmware architects the proper to get the
information from the newer hypervisors.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include/asm-ppc64/pgalloc.h')
-rw-r--r-- | include/asm-ppc64/pgalloc.h | 47 |
1 files changed, 39 insertions, 8 deletions
diff --git a/include/asm-ppc64/pgalloc.h b/include/asm-ppc64/pgalloc.h index 26bc49c1108..98da0e4262b 100644 --- a/include/asm-ppc64/pgalloc.h +++ b/include/asm-ppc64/pgalloc.h @@ -8,10 +8,16 @@ extern kmem_cache_t *pgtable_cache[]; +#ifdef CONFIG_PPC_64K_PAGES +#define PTE_CACHE_NUM 0 +#define PMD_CACHE_NUM 0 +#define PGD_CACHE_NUM 1 +#else #define PTE_CACHE_NUM 0 #define PMD_CACHE_NUM 1 #define PUD_CACHE_NUM 1 #define PGD_CACHE_NUM 0 +#endif /* * This program is free software; you can redistribute it and/or @@ -30,6 +36,8 @@ static inline void pgd_free(pgd_t *pgd) kmem_cache_free(pgtable_cache[PGD_CACHE_NUM], pgd); } +#ifndef CONFIG_PPC_64K_PAGES + #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD) static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) @@ -43,7 +51,30 @@ static inline void pud_free(pud_t *pud) kmem_cache_free(pgtable_cache[PUD_CACHE_NUM], pud); } -#define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD) +static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) +{ + pud_set(pud, (unsigned long)pmd); +} + +#define pmd_populate(mm, pmd, pte_page) \ + pmd_populate_kernel(mm, pmd, page_address(pte_page)) +#define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte)) + + +#else /* CONFIG_PPC_64K_PAGES */ + +#define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd) + +static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, + pte_t *pte) +{ + pmd_set(pmd, (unsigned long)pte); +} + +#define pmd_populate(mm, pmd, pte_page) \ + pmd_populate_kernel(mm, pmd, page_address(pte_page)) + +#endif /* CONFIG_PPC_64K_PAGES */ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) { @@ -56,17 +87,15 @@ static inline void pmd_free(pmd_t *pmd) kmem_cache_free(pgtable_cache[PMD_CACHE_NUM], pmd); } -#define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, pte) -#define pmd_populate(mm, pmd, pte_page) \ - pmd_populate_kernel(mm, pmd, page_address(pte_page)) - -static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) +static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, + unsigned long address) { return kmem_cache_alloc(pgtable_cache[PTE_CACHE_NUM], GFP_KERNEL|__GFP_REPEAT); } -static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) +static inline struct page *pte_alloc_one(struct mm_struct *mm, + unsigned long address) { return virt_to_page(pte_alloc_one_kernel(mm, address)); } @@ -103,7 +132,7 @@ static inline void pgtable_free(pgtable_free_t pgf) kmem_cache_free(pgtable_cache[cachenum], p); } -void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf); +extern void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf); #define __pte_free_tlb(tlb, ptepage) \ pgtable_free_tlb(tlb, pgtable_free_cache(page_address(ptepage), \ @@ -111,9 +140,11 @@ void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf); #define __pmd_free_tlb(tlb, pmd) \ pgtable_free_tlb(tlb, pgtable_free_cache(pmd, \ PMD_CACHE_NUM, PMD_TABLE_SIZE-1)) +#ifndef CONFIG_PPC_64K_PAGES #define __pud_free_tlb(tlb, pmd) \ pgtable_free_tlb(tlb, pgtable_free_cache(pud, \ PUD_CACHE_NUM, PUD_TABLE_SIZE-1)) +#endif /* CONFIG_PPC_64K_PAGES */ #define check_pgt_cache() do { } while (0) |