diff options
Diffstat (limited to 'arch/powerpc/mm/hugetlbpage.c')
| -rw-r--r-- | arch/powerpc/mm/hugetlbpage.c | 559 |
1 files changed, 380 insertions, 179 deletions
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 8558b572e55..7e70ae968e5 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -12,6 +12,7 @@ #include <linux/io.h> #include <linux/slab.h> #include <linux/hugetlb.h> +#include <linux/export.h> #include <linux/of_fdt.h> #include <linux/memblock.h> #include <linux/bootmem.h> @@ -20,6 +21,9 @@ #include <asm/pgalloc.h> #include <asm/tlb.h> #include <asm/setup.h> +#include <asm/hugetlb.h> + +#ifdef CONFIG_HUGETLB_PAGE #define PAGE_SHIFT_64K 16 #define PAGE_SHIFT_16M 24 @@ -29,83 +33,79 @@ unsigned int HPAGE_SHIFT; /* * Tracks gpages after the device tree is scanned and before the - * huge_boot_pages list is ready. On 64-bit implementations, this is - * just used to track 16G pages and so is a single array. 32-bit - * implementations may have more than one gpage size due to limitations - * of the memory allocators, so we need multiple arrays + * huge_boot_pages list is ready. On non-Freescale implementations, this is + * just used to track 16G pages and so is a single array. FSL-based + * implementations may have more than one gpage size, so we need multiple + * arrays */ -#ifdef CONFIG_PPC64 -#define MAX_NUMBER_GPAGES 1024 -static u64 gpage_freearray[MAX_NUMBER_GPAGES]; -static unsigned nr_gpages; -#else +#ifdef CONFIG_PPC_FSL_BOOK3E #define MAX_NUMBER_GPAGES 128 struct psize_gpages { u64 gpage_list[MAX_NUMBER_GPAGES]; unsigned int nr_gpages; }; static struct psize_gpages gpage_freearray[MMU_PAGE_COUNT]; +#else +#define MAX_NUMBER_GPAGES 1024 +static u64 gpage_freearray[MAX_NUMBER_GPAGES]; +static unsigned nr_gpages; #endif -static inline int shift_to_mmu_psize(unsigned int shift) -{ - int psize; +#define hugepd_none(hpd) ((hpd).pd == 0) - for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) - if (mmu_psize_defs[psize].shift == shift) - return psize; - return -1; -} +#ifdef CONFIG_PPC_BOOK3S_64 +/* + * At this point we do the placement change only for BOOK3S 64. This would + * possibly work on other subarchs. + */ -static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize) +/* + * We have PGD_INDEX_SIZ = 12 and PTE_INDEX_SIZE = 8, so that we can have + * 16GB hugepage pte in PGD and 16MB hugepage pte at PMD; + */ +int pmd_huge(pmd_t pmd) { - if (mmu_psize_defs[mmu_psize].shift) - return mmu_psize_defs[mmu_psize].shift; - BUG(); + /* + * leaf pte for huge page, bottom two bits != 00 + */ + return ((pmd_val(pmd) & 0x3) != 0x0); } -#define hugepd_none(hpd) ((hpd).pd == 0) - -pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, unsigned *shift) +int pud_huge(pud_t pud) { - pgd_t *pg; - pud_t *pu; - pmd_t *pm; - hugepd_t *hpdp = NULL; - unsigned pdshift = PGDIR_SHIFT; - - if (shift) - *shift = 0; + /* + * leaf pte for huge page, bottom two bits != 00 + */ + return ((pud_val(pud) & 0x3) != 0x0); +} - pg = pgdir + pgd_index(ea); - if (is_hugepd(pg)) { - hpdp = (hugepd_t *)pg; - } else if (!pgd_none(*pg)) { - pdshift = PUD_SHIFT; - pu = pud_offset(pg, ea); - if (is_hugepd(pu)) - hpdp = (hugepd_t *)pu; - else if (!pud_none(*pu)) { - pdshift = PMD_SHIFT; - pm = pmd_offset(pu, ea); - if (is_hugepd(pm)) - hpdp = (hugepd_t *)pm; - else if (!pmd_none(*pm)) { - return pte_offset_kernel(pm, ea); - } - } - } +int pgd_huge(pgd_t pgd) +{ + /* + * leaf pte for huge page, bottom two bits != 00 + */ + return ((pgd_val(pgd) & 0x3) != 0x0); +} +#else +int pmd_huge(pmd_t pmd) +{ + return 0; +} - if (!hpdp) - return NULL; +int pud_huge(pud_t pud) +{ + return 0; +} - if (shift) - *shift = hugepd_shift(*hpdp); - return hugepte_offset(hpdp, ea, pdshift); +int pgd_huge(pgd_t pgd) +{ + return 0; } +#endif pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) { + /* Only called for hugetlbfs pages, hence can ignore THP */ return find_linux_pte_or_hugepte(mm->pgd, addr, NULL); } @@ -115,12 +115,12 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, struct kmem_cache *cachep; pte_t *new; -#ifdef CONFIG_PPC64 - cachep = PGT_CACHE(pdshift - pshift); -#else +#ifdef CONFIG_PPC_FSL_BOOK3E int i; int num_hugepd = 1 << (pshift - pdshift); cachep = hugepte_cache; +#else + cachep = PGT_CACHE(pdshift - pshift); #endif new = kmem_cache_zalloc(cachep, GFP_KERNEL|__GFP_REPEAT); @@ -132,12 +132,7 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, return -ENOMEM; spin_lock(&mm->page_table_lock); -#ifdef CONFIG_PPC64 - if (!hugepd_none(*hpdp)) - kmem_cache_free(cachep, new); - else - hpdp->pd = ((unsigned long)new & ~PD_HUGE) | pshift; -#else +#ifdef CONFIG_PPC_FSL_BOOK3E /* * We have multiple higher-level entries that point to the same * actual pte location. Fill in each as we go and backtrack on error. @@ -148,6 +143,7 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, if (unlikely(!hugepd_none(*hpdp))) break; else + /* We use the old format for PPC_FSL_BOOK3E */ hpdp->pd = ((unsigned long)new & ~PD_HUGE) | pshift; } /* If we bailed from the for loop early, an error occurred, clean up */ @@ -156,11 +152,89 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, hpdp->pd = 0; kmem_cache_free(cachep, new); } +#else + if (!hugepd_none(*hpdp)) + kmem_cache_free(cachep, new); + else { +#ifdef CONFIG_PPC_BOOK3S_64 + hpdp->pd = (unsigned long)new | + (shift_to_mmu_psize(pshift) << 2); +#else + hpdp->pd = ((unsigned long)new & ~PD_HUGE) | pshift; +#endif + } #endif spin_unlock(&mm->page_table_lock); return 0; } +/* + * These macros define how to determine which level of the page table holds + * the hpdp. + */ +#ifdef CONFIG_PPC_FSL_BOOK3E +#define HUGEPD_PGD_SHIFT PGDIR_SHIFT +#define HUGEPD_PUD_SHIFT PUD_SHIFT +#else +#define HUGEPD_PGD_SHIFT PUD_SHIFT +#define HUGEPD_PUD_SHIFT PMD_SHIFT +#endif + +#ifdef CONFIG_PPC_BOOK3S_64 +/* + * At this point we do the placement change only for BOOK3S 64. This would + * possibly work on other subarchs. + */ +pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz) +{ + pgd_t *pg; + pud_t *pu; + pmd_t *pm; + hugepd_t *hpdp = NULL; + unsigned pshift = __ffs(sz); + unsigned pdshift = PGDIR_SHIFT; + + addr &= ~(sz-1); + pg = pgd_offset(mm, addr); + + if (pshift == PGDIR_SHIFT) + /* 16GB huge page */ + return (pte_t *) pg; + else if (pshift > PUD_SHIFT) + /* + * We need to use hugepd table + */ + hpdp = (hugepd_t *)pg; + else { + pdshift = PUD_SHIFT; + pu = pud_alloc(mm, pg, addr); + if (pshift == PUD_SHIFT) + return (pte_t *)pu; + else if (pshift > PMD_SHIFT) + hpdp = (hugepd_t *)pu; + else { + pdshift = PMD_SHIFT; + pm = pmd_alloc(mm, pu, addr); + if (pshift == PMD_SHIFT) + /* 16MB hugepage */ + return (pte_t *)pm; + else + hpdp = (hugepd_t *)pm; + } + } + if (!hpdp) + return NULL; + + BUG_ON(!hugepd_none(*hpdp) && !hugepd_ok(*hpdp)); + + if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr, pdshift, pshift)) + return NULL; + + return hugepte_offset(hpdp, addr, pdshift); +} + +#else + pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz) { pgd_t *pg; @@ -173,12 +247,13 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz addr &= ~(sz-1); pg = pgd_offset(mm, addr); - if (pshift >= PUD_SHIFT) { + + if (pshift >= HUGEPD_PGD_SHIFT) { hpdp = (hugepd_t *)pg; } else { pdshift = PUD_SHIFT; pu = pud_alloc(mm, pg, addr); - if (pshift >= PMD_SHIFT) { + if (pshift >= HUGEPD_PUD_SHIFT) { hpdp = (hugepd_t *)pu; } else { pdshift = PMD_SHIFT; @@ -197,8 +272,9 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz return hugepte_offset(hpdp, addr, pdshift); } +#endif -#ifdef CONFIG_PPC32 +#ifdef CONFIG_PPC_FSL_BOOK3E /* Build list of addresses of gigantic pages. This function is used in early * boot before the buddy or bootmem allocator is setup. */ @@ -225,7 +301,7 @@ void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages) int alloc_bootmem_huge_page(struct hstate *hstate) { struct huge_bootmem_page *m; - int idx = shift_to_mmu_psize(hstate->order + PAGE_SHIFT); + int idx = shift_to_mmu_psize(huge_page_shift(hstate)); int nr_gpages = gpage_freearray[idx].nr_gpages; if (nr_gpages == 0) @@ -256,7 +332,8 @@ int alloc_bootmem_huge_page(struct hstate *hstate) unsigned long gpage_npages[MMU_PAGE_COUNT]; -static int __init do_gpage_early_setup(char *param, char *val) +static int __init do_gpage_early_setup(char *param, char *val, + const char *unused) { static phys_addr_t size; unsigned long npages; @@ -297,7 +374,8 @@ void __init reserve_hugetlb_gpages(void) int i; strlcpy(cmdline, boot_command_line, COMMAND_LINE_SIZE); - parse_args("hugetlb gpages", cmdline, NULL, 0, &do_gpage_early_setup); + parse_args("hugetlb gpages", cmdline, NULL, 0, 0, 0, + &do_gpage_early_setup); /* * Walk gpage list in reverse, allocating larger page sizes first. @@ -318,7 +396,7 @@ void __init reserve_hugetlb_gpages(void) } } -#else /* PPC64 */ +#else /* !PPC_FSL_BOOK3E */ /* Build list of addresses of gigantic pages. This function is used in early * boot before the buddy or bootmem allocator is setup. @@ -356,7 +434,7 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) return 0; } -#ifdef CONFIG_PPC32 +#ifdef CONFIG_PPC_FSL_BOOK3E #define HUGEPD_FREELIST_SIZE \ ((PAGE_SIZE - sizeof(struct hugepd_freelist)) / sizeof(pte_t)) @@ -384,12 +462,13 @@ static void hugepd_free(struct mmu_gather *tlb, void *hugepte) { struct hugepd_freelist **batchp; - batchp = &__get_cpu_var(hugepd_freelist_cur); + batchp = &get_cpu_var(hugepd_freelist_cur); if (atomic_read(&tlb->mm->mm_users) < 2 || cpumask_equal(mm_cpumask(tlb->mm), cpumask_of(smp_processor_id()))) { kmem_cache_free(hugepte_cache, hugepte); + put_cpu_var(hugepd_freelist_cur); return; } @@ -403,6 +482,7 @@ static void hugepd_free(struct mmu_gather *tlb, void *hugepte) call_rcu_sched(&(*batchp)->rcu, hugepd_free_rcu_callback); *batchp = NULL; } + put_cpu_var(hugepd_freelist_cur); } #endif @@ -416,11 +496,11 @@ static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshif unsigned long pdmask = ~((1UL << pdshift) - 1); unsigned int num_hugepd = 1; -#ifdef CONFIG_PPC64 - unsigned int shift = hugepd_shift(*hpdp); -#else - /* Note: On 32-bit the hpdp may be the first of several */ +#ifdef CONFIG_PPC_FSL_BOOK3E + /* Note: On fsl the hpdp may be the first of several */ num_hugepd = (1 << (hugepd_shift(*hpdp) - pdshift)); +#else + unsigned int shift = hugepd_shift(*hpdp); #endif start &= pdmask; @@ -438,10 +518,11 @@ static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshif hpdp->pd = 0; tlb->need_flush = 1; -#ifdef CONFIG_PPC64 - pgtable_free_tlb(tlb, hugepte, pdshift - shift); -#else + +#ifdef CONFIG_PPC_FSL_BOOK3E hugepd_free(tlb, hugepte); +#else + pgtable_free_tlb(tlb, hugepte, pdshift - shift); #endif } @@ -454,14 +535,29 @@ static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud, unsigned long start; start = addr; - pmd = pmd_offset(pud, addr); do { + pmd = pmd_offset(pud, addr); next = pmd_addr_end(addr, end); - if (pmd_none(*pmd)) + if (!is_hugepd(pmd)) { + /* + * if it is not hugepd pointer, we should already find + * it cleared. + */ + WARN_ON(!pmd_none_or_clear_bad(pmd)); continue; + } +#ifdef CONFIG_PPC_FSL_BOOK3E + /* + * Increment next by the size of the huge mapping since + * there may be more than one entry at this level for a + * single hugepage, but all of them point to + * the same kmem cache that holds the hugepte. + */ + next = addr + (1 << hugepd_shift(*(hugepd_t *)pmd)); +#endif free_hugepd_range(tlb, (hugepd_t *)pmd, PMD_SHIFT, addr, next, floor, ceiling); - } while (pmd++, addr = next, addr != end); + } while (addr = next, addr != end); start &= PUD_MASK; if (start < floor) @@ -488,8 +584,8 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, unsigned long start; start = addr; - pud = pud_offset(pgd, addr); do { + pud = pud_offset(pgd, addr); next = pud_addr_end(addr, end); if (!is_hugepd(pud)) { if (pud_none_or_clear_bad(pud)) @@ -497,10 +593,19 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, hugetlb_free_pmd_range(tlb, pud, addr, next, floor, ceiling); } else { +#ifdef CONFIG_PPC_FSL_BOOK3E + /* + * Increment next by the size of the huge mapping since + * there may be more than one entry at this level for a + * single hugepage, but all of them point to + * the same kmem cache that holds the hugepte. + */ + next = addr + (1 << hugepd_shift(*(hugepd_t *)pud)); +#endif free_hugepd_range(tlb, (hugepd_t *)pud, PUD_SHIFT, addr, next, floor, ceiling); } - } while (pud++, addr = next, addr != end); + } while (addr = next, addr != end); start &= PGDIR_MASK; if (start < floor) @@ -520,8 +625,6 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, /* * This function frees user-level page tables of a process. - * - * Must be called with pagetable lock held. */ void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr, unsigned long end, @@ -555,12 +658,12 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb, continue; hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling); } else { -#ifdef CONFIG_PPC32 +#ifdef CONFIG_PPC_FSL_BOOK3E /* * Increment next by the size of the huge mapping since - * on 32-bit there may be more than one entry at the pgd - * level for a single hugepage, but all of them point to - * the same kmem cache that holds the hugepte. + * there may be more than one entry at the pgd level + * for a single hugepage, but all of them point to the + * same kmem cache that holds the hugepte. */ next = addr + (1 << hugepd_shift(*(hugepd_t *)pgd)); #endif @@ -577,11 +680,14 @@ follow_huge_addr(struct mm_struct *mm, unsigned long address, int write) struct page *page; unsigned shift; unsigned long mask; - + /* + * Transparent hugepages are handled by generic code. We can skip them + * here. + */ ptep = find_linux_pte_or_hugepte(mm->pgd, address, &shift); /* Verify it is a huge page else bail. */ - if (!ptep || !shift) + if (!ptep || !shift || pmd_trans_huge(*(pmd_t *)ptep)) return ERR_PTR(-EINVAL); mask = (1UL << shift) - 1; @@ -592,16 +698,6 @@ follow_huge_addr(struct mm_struct *mm, unsigned long address, int write) return page; } -int pmd_huge(pmd_t pmd) -{ - return 0; -} - -int pud_huge(pud_t pud) -{ - return 0; -} - struct page * follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int write) @@ -610,69 +706,6 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address, return NULL; } -static noinline int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr, - unsigned long end, int write, struct page **pages, int *nr) -{ - unsigned long mask; - unsigned long pte_end; - struct page *head, *page, *tail; - pte_t pte; - int refs; - - pte_end = (addr + sz) & ~(sz-1); - if (pte_end < end) - end = pte_end; - - pte = *ptep; - mask = _PAGE_PRESENT | _PAGE_USER; - if (write) - mask |= _PAGE_RW; - - if ((pte_val(pte) & mask) != mask) - return 0; - - /* hugepages are never "special" */ - VM_BUG_ON(!pfn_valid(pte_pfn(pte))); - - refs = 0; - head = pte_page(pte); - - page = head + ((addr & (sz-1)) >> PAGE_SHIFT); - tail = page; - do { - VM_BUG_ON(compound_head(page) != head); - pages[*nr] = page; - (*nr)++; - page++; - refs++; - } while (addr += PAGE_SIZE, addr != end); - - if (!page_cache_add_speculative(head, refs)) { - *nr -= refs; - return 0; - } - - if (unlikely(pte_val(pte) != pte_val(*ptep))) { - /* Could be optimized better */ - *nr -= refs; - while (refs--) - put_page(head); - return 0; - } - - /* - * Any tail page need their mapcount reference taken before we - * return. - */ - while (refs--) { - if (PageTail(tail)) - get_huge_page_tail(tail); - tail++; - } - - return 1; -} - static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end, unsigned long sz) { @@ -698,19 +731,17 @@ int gup_hugepd(hugepd_t *hugepd, unsigned pdshift, return 1; } +#ifdef CONFIG_PPC_MM_SLICES unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { -#ifdef CONFIG_PPC_MM_SLICES struct hstate *hstate = hstate_file(file); int mmu_psize = shift_to_mmu_psize(huge_page_shift(hstate)); - return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1, 0); -#else - return get_unmapped_area(file, addr, len, pgoff, flags); -#endif + return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1); } +#endif unsigned long vma_mmu_pagesize(struct vm_area_struct *vma) { @@ -784,7 +815,7 @@ static int __init hugepage_setup_sz(char *str) } __setup("hugepagesz=", hugepage_setup_sz); -#ifdef CONFIG_FSL_BOOKE +#ifdef CONFIG_PPC_FSL_BOOK3E struct kmem_cache *hugepte_cache; static int __init hugetlbpage_init(void) { @@ -849,11 +880,16 @@ static int __init hugetlbpage_init(void) pdshift = PUD_SHIFT; else pdshift = PGDIR_SHIFT; - - pgtable_cache_add(pdshift - shift, NULL); - if (!PGT_CACHE(pdshift - shift)) - panic("hugetlbpage_init(): could not create " - "pgtable cache for %d bit pagesize\n", shift); + /* + * if we have pdshift and shift value same, we don't + * use pgt cache for hugepd. + */ + if (pdshift != shift) { + pgtable_cache_add(pdshift - shift, NULL); + if (!PGT_CACHE(pdshift - shift)) + panic("hugetlbpage_init(): could not create " + "pgtable cache for %d bit pagesize\n", shift); + } } /* Set default large page size. Currently, we pick 16M or 1M @@ -880,9 +916,174 @@ void flush_dcache_icache_hugepage(struct page *page) if (!PageHighMem(page)) { __flush_dcache_icache(page_address(page+i)); } else { - start = kmap_atomic(page+i, KM_PPC_SYNC_ICACHE); + start = kmap_atomic(page+i); __flush_dcache_icache(start); - kunmap_atomic(start, KM_PPC_SYNC_ICACHE); + kunmap_atomic(start); + } + } +} + +#endif /* CONFIG_HUGETLB_PAGE */ + +/* + * We have 4 cases for pgds and pmds: + * (1) invalid (all zeroes) + * (2) pointer to next table, as normal; bottom 6 bits == 0 + * (3) leaf pte for huge page, bottom two bits != 00 + * (4) hugepd pointer, bottom two bits == 00, next 4 bits indicate size of table + * + * So long as we atomically load page table pointers we are safe against teardown, + * we can follow the address down to the the page and take a ref on it. + */ + +pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, unsigned *shift) +{ + pgd_t pgd, *pgdp; + pud_t pud, *pudp; + pmd_t pmd, *pmdp; + pte_t *ret_pte; + hugepd_t *hpdp = NULL; + unsigned pdshift = PGDIR_SHIFT; + + if (shift) + *shift = 0; + + pgdp = pgdir + pgd_index(ea); + pgd = ACCESS_ONCE(*pgdp); + /* + * Always operate on the local stack value. This make sure the + * value don't get updated by a parallel THP split/collapse, + * page fault or a page unmap. The return pte_t * is still not + * stable. So should be checked there for above conditions. + */ + if (pgd_none(pgd)) + return NULL; + else if (pgd_huge(pgd)) { + ret_pte = (pte_t *) pgdp; + goto out; + } else if (is_hugepd(&pgd)) + hpdp = (hugepd_t *)&pgd; + else { + /* + * Even if we end up with an unmap, the pgtable will not + * be freed, because we do an rcu free and here we are + * irq disabled + */ + pdshift = PUD_SHIFT; + pudp = pud_offset(&pgd, ea); + pud = ACCESS_ONCE(*pudp); + + if (pud_none(pud)) + return NULL; + else if (pud_huge(pud)) { + ret_pte = (pte_t *) pudp; + goto out; + } else if (is_hugepd(&pud)) + hpdp = (hugepd_t *)&pud; + else { + pdshift = PMD_SHIFT; + pmdp = pmd_offset(&pud, ea); + pmd = ACCESS_ONCE(*pmdp); + /* + * A hugepage collapse is captured by pmd_none, because + * it mark the pmd none and do a hpte invalidate. + * + * A hugepage split is captured by pmd_trans_splitting + * because we mark the pmd trans splitting and do a + * hpte invalidate + * + */ + if (pmd_none(pmd) || pmd_trans_splitting(pmd)) + return NULL; + + if (pmd_huge(pmd) || pmd_large(pmd)) { + ret_pte = (pte_t *) pmdp; + goto out; + } else if (is_hugepd(&pmd)) + hpdp = (hugepd_t *)&pmd; + else + return pte_offset_kernel(&pmd, ea); } } + if (!hpdp) + return NULL; + + ret_pte = hugepte_offset(hpdp, ea, pdshift); + pdshift = hugepd_shift(*hpdp); +out: + if (shift) + *shift = pdshift; + return ret_pte; +} +EXPORT_SYMBOL_GPL(find_linux_pte_or_hugepte); + +int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr, + unsigned long end, int write, struct page **pages, int *nr) +{ + unsigned long mask; + unsigned long pte_end; + struct page *head, *page, *tail; + pte_t pte; + int refs; + + pte_end = (addr + sz) & ~(sz-1); + if (pte_end < end) + end = pte_end; + + pte = ACCESS_ONCE(*ptep); + mask = _PAGE_PRESENT | _PAGE_USER; + if (write) + mask |= _PAGE_RW; + + if ((pte_val(pte) & mask) != mask) + return 0; + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + /* + * check for splitting here + */ + if (pmd_trans_splitting(pte_pmd(pte))) + return 0; +#endif + + /* hugepages are never "special" */ + VM_BUG_ON(!pfn_valid(pte_pfn(pte))); + + refs = 0; + head = pte_page(pte); + + page = head + ((addr & (sz-1)) >> PAGE_SHIFT); + tail = page; + do { + VM_BUG_ON(compound_head(page) != head); + pages[*nr] = page; + (*nr)++; + page++; + refs++; + } while (addr += PAGE_SIZE, addr != end); + + if (!page_cache_add_speculative(head, refs)) { + *nr -= refs; + return 0; + } + + if (unlikely(pte_val(pte) != pte_val(*ptep))) { + /* Could be optimized better */ + *nr -= refs; + while (refs--) + put_page(head); + return 0; + } + + /* + * Any tail page need their mapcount reference taken before we + * return. + */ + while (refs--) { + if (PageTail(tail)) + get_huge_page_tail(tail); + tail++; + } + + return 1; } |
