diff options
Diffstat (limited to 'arch/powerpc/mm/hugetlbpage.c')
| -rw-r--r-- | arch/powerpc/mm/hugetlbpage.c | 1492 |
1 files changed, 907 insertions, 585 deletions
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 426c269e552..7e70ae968e5 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -1,312 +1,676 @@ /* - * PPC64 (POWER4) Huge TLB Page Support for Kernel. + * PPC Huge TLB Page Support for Kernel. * * Copyright (C) 2003 David Gibson, IBM Corporation. + * Copyright (C) 2011 Becky Bruce, Freescale Semiconductor * * Based on the IA-32 version: * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com> */ -#include <linux/init.h> -#include <linux/fs.h> #include <linux/mm.h> -#include <linux/hugetlb.h> -#include <linux/pagemap.h> -#include <linux/smp_lock.h> +#include <linux/io.h> #include <linux/slab.h> -#include <linux/err.h> -#include <linux/sysctl.h> -#include <asm/mman.h> +#include <linux/hugetlb.h> +#include <linux/export.h> +#include <linux/of_fdt.h> +#include <linux/memblock.h> +#include <linux/bootmem.h> +#include <linux/moduleparam.h> +#include <asm/pgtable.h> #include <asm/pgalloc.h> #include <asm/tlb.h> -#include <asm/tlbflush.h> -#include <asm/mmu_context.h> -#include <asm/machdep.h> -#include <asm/cputable.h> -#include <asm/tlb.h> +#include <asm/setup.h> +#include <asm/hugetlb.h> + +#ifdef CONFIG_HUGETLB_PAGE + +#define PAGE_SHIFT_64K 16 +#define PAGE_SHIFT_16M 24 +#define PAGE_SHIFT_16G 34 -#include <linux/sysctl.h> +unsigned int HPAGE_SHIFT; + +/* + * Tracks gpages after the device tree is scanned and before the + * huge_boot_pages list is ready. On non-Freescale implementations, this is + * just used to track 16G pages and so is a single array. FSL-based + * implementations may have more than one gpage size, so we need multiple + * arrays + */ +#ifdef CONFIG_PPC_FSL_BOOK3E +#define MAX_NUMBER_GPAGES 128 +struct psize_gpages { + u64 gpage_list[MAX_NUMBER_GPAGES]; + unsigned int nr_gpages; +}; +static struct psize_gpages gpage_freearray[MMU_PAGE_COUNT]; +#else +#define MAX_NUMBER_GPAGES 1024 +static u64 gpage_freearray[MAX_NUMBER_GPAGES]; +static unsigned nr_gpages; +#endif + +#define hugepd_none(hpd) ((hpd).pd == 0) + +#ifdef CONFIG_PPC_BOOK3S_64 +/* + * At this point we do the placement change only for BOOK3S 64. This would + * possibly work on other subarchs. + */ -#define NUM_LOW_AREAS (0x100000000UL >> SID_SHIFT) -#define NUM_HIGH_AREAS (PGTABLE_RANGE >> HTLB_AREA_SHIFT) +/* + * We have PGD_INDEX_SIZ = 12 and PTE_INDEX_SIZE = 8, so that we can have + * 16GB hugepage pte in PGD and 16MB hugepage pte at PMD; + */ +int pmd_huge(pmd_t pmd) +{ + /* + * leaf pte for huge page, bottom two bits != 00 + */ + return ((pmd_val(pmd) & 0x3) != 0x0); +} + +int pud_huge(pud_t pud) +{ + /* + * leaf pte for huge page, bottom two bits != 00 + */ + return ((pud_val(pud) & 0x3) != 0x0); +} + +int pgd_huge(pgd_t pgd) +{ + /* + * leaf pte for huge page, bottom two bits != 00 + */ + return ((pgd_val(pgd) & 0x3) != 0x0); +} +#else +int pmd_huge(pmd_t pmd) +{ + return 0; +} + +int pud_huge(pud_t pud) +{ + return 0; +} + +int pgd_huge(pgd_t pgd) +{ + return 0; +} +#endif -/* Modelled after find_linux_pte() */ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) { + /* Only called for hugetlbfs pages, hence can ignore THP */ + return find_linux_pte_or_hugepte(mm->pgd, addr, NULL); +} + +static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, + unsigned long address, unsigned pdshift, unsigned pshift) +{ + struct kmem_cache *cachep; + pte_t *new; + +#ifdef CONFIG_PPC_FSL_BOOK3E + int i; + int num_hugepd = 1 << (pshift - pdshift); + cachep = hugepte_cache; +#else + cachep = PGT_CACHE(pdshift - pshift); +#endif + + new = kmem_cache_zalloc(cachep, GFP_KERNEL|__GFP_REPEAT); + + BUG_ON(pshift > HUGEPD_SHIFT_MASK); + BUG_ON((unsigned long)new & HUGEPD_SHIFT_MASK); + + if (! new) + return -ENOMEM; + + spin_lock(&mm->page_table_lock); +#ifdef CONFIG_PPC_FSL_BOOK3E + /* + * We have multiple higher-level entries that point to the same + * actual pte location. Fill in each as we go and backtrack on error. + * We need all of these so the DTLB pgtable walk code can find the + * right higher-level entry without knowing if it's a hugepage or not. + */ + for (i = 0; i < num_hugepd; i++, hpdp++) { + if (unlikely(!hugepd_none(*hpdp))) + break; + else + /* We use the old format for PPC_FSL_BOOK3E */ + hpdp->pd = ((unsigned long)new & ~PD_HUGE) | pshift; + } + /* If we bailed from the for loop early, an error occurred, clean up */ + if (i < num_hugepd) { + for (i = i - 1 ; i >= 0; i--, hpdp--) + hpdp->pd = 0; + kmem_cache_free(cachep, new); + } +#else + if (!hugepd_none(*hpdp)) + kmem_cache_free(cachep, new); + else { +#ifdef CONFIG_PPC_BOOK3S_64 + hpdp->pd = (unsigned long)new | + (shift_to_mmu_psize(pshift) << 2); +#else + hpdp->pd = ((unsigned long)new & ~PD_HUGE) | pshift; +#endif + } +#endif + spin_unlock(&mm->page_table_lock); + return 0; +} + +/* + * These macros define how to determine which level of the page table holds + * the hpdp. + */ +#ifdef CONFIG_PPC_FSL_BOOK3E +#define HUGEPD_PGD_SHIFT PGDIR_SHIFT +#define HUGEPD_PUD_SHIFT PUD_SHIFT +#else +#define HUGEPD_PGD_SHIFT PUD_SHIFT +#define HUGEPD_PUD_SHIFT PMD_SHIFT +#endif + +#ifdef CONFIG_PPC_BOOK3S_64 +/* + * At this point we do the placement change only for BOOK3S 64. This would + * possibly work on other subarchs. + */ +pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz) +{ pgd_t *pg; pud_t *pu; pmd_t *pm; - pte_t *pt; - - BUG_ON(! in_hugepage_area(mm->context, addr)); - - addr &= HPAGE_MASK; + hugepd_t *hpdp = NULL; + unsigned pshift = __ffs(sz); + unsigned pdshift = PGDIR_SHIFT; + addr &= ~(sz-1); pg = pgd_offset(mm, addr); - if (!pgd_none(*pg)) { - pu = pud_offset(pg, addr); - if (!pud_none(*pu)) { - pm = pmd_offset(pu, addr); -#ifdef CONFIG_PPC_64K_PAGES - /* Currently, we use the normal PTE offset within full - * size PTE pages, thus our huge PTEs are scattered in - * the PTE page and we do waste some. We may change - * that in the future, but the current mecanism keeps - * things much simpler - */ - if (!pmd_none(*pm)) { - /* Note: pte_offset_* are all equivalent on - * ppc64 as we don't have HIGHMEM - */ - pt = pte_offset_kernel(pm, addr); - return pt; - } -#else /* CONFIG_PPC_64K_PAGES */ - /* On 4k pages, we put huge PTEs in the PMD page */ - pt = (pte_t *)pm; - return pt; -#endif /* CONFIG_PPC_64K_PAGES */ + + if (pshift == PGDIR_SHIFT) + /* 16GB huge page */ + return (pte_t *) pg; + else if (pshift > PUD_SHIFT) + /* + * We need to use hugepd table + */ + hpdp = (hugepd_t *)pg; + else { + pdshift = PUD_SHIFT; + pu = pud_alloc(mm, pg, addr); + if (pshift == PUD_SHIFT) + return (pte_t *)pu; + else if (pshift > PMD_SHIFT) + hpdp = (hugepd_t *)pu; + else { + pdshift = PMD_SHIFT; + pm = pmd_alloc(mm, pu, addr); + if (pshift == PMD_SHIFT) + /* 16MB hugepage */ + return (pte_t *)pm; + else + hpdp = (hugepd_t *)pm; } } + if (!hpdp) + return NULL; - return NULL; + BUG_ON(!hugepd_none(*hpdp) && !hugepd_ok(*hpdp)); + + if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr, pdshift, pshift)) + return NULL; + + return hugepte_offset(hpdp, addr, pdshift); } -pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr) +#else + +pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz) { pgd_t *pg; pud_t *pu; pmd_t *pm; - pte_t *pt; - - BUG_ON(! in_hugepage_area(mm->context, addr)); + hugepd_t *hpdp = NULL; + unsigned pshift = __ffs(sz); + unsigned pdshift = PGDIR_SHIFT; - addr &= HPAGE_MASK; + addr &= ~(sz-1); pg = pgd_offset(mm, addr); - pu = pud_alloc(mm, pg, addr); - - if (pu) { - pm = pmd_alloc(mm, pu, addr); - if (pm) { -#ifdef CONFIG_PPC_64K_PAGES - /* See comment in huge_pte_offset. Note that if we ever - * want to put the page size in the PMD, we would have - * to open code our own pte_alloc* function in order - * to populate and set the size atomically - */ - pt = pte_alloc_map(mm, pm, addr); -#else /* CONFIG_PPC_64K_PAGES */ - pt = (pte_t *)pm; -#endif /* CONFIG_PPC_64K_PAGES */ - return pt; + + if (pshift >= HUGEPD_PGD_SHIFT) { + hpdp = (hugepd_t *)pg; + } else { + pdshift = PUD_SHIFT; + pu = pud_alloc(mm, pg, addr); + if (pshift >= HUGEPD_PUD_SHIFT) { + hpdp = (hugepd_t *)pu; + } else { + pdshift = PMD_SHIFT; + pm = pmd_alloc(mm, pu, addr); + hpdp = (hugepd_t *)pm; } } - return NULL; + if (!hpdp) + return NULL; + + BUG_ON(!hugepd_none(*hpdp) && !hugepd_ok(*hpdp)); + + if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr, pdshift, pshift)) + return NULL; + + return hugepte_offset(hpdp, addr, pdshift); } +#endif -void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, - pte_t *ptep, pte_t pte) +#ifdef CONFIG_PPC_FSL_BOOK3E +/* Build list of addresses of gigantic pages. This function is used in early + * boot before the buddy or bootmem allocator is setup. + */ +void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages) { - if (pte_present(*ptep)) { - /* We open-code pte_clear because we need to pass the right - * argument to hpte_update (huge / !huge) - */ - unsigned long old = pte_update(ptep, ~0UL); - if (old & _PAGE_HASHPTE) - hpte_update(mm, addr & HPAGE_MASK, ptep, old, 1); - flush_tlb_pending(); + unsigned int idx = shift_to_mmu_psize(__ffs(page_size)); + int i; + + if (addr == 0) + return; + + gpage_freearray[idx].nr_gpages = number_of_pages; + + for (i = 0; i < number_of_pages; i++) { + gpage_freearray[idx].gpage_list[i] = addr; + addr += page_size; } - *ptep = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS); } -pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, - pte_t *ptep) +/* + * Moves the gigantic page addresses from the temporary list to the + * huge_boot_pages list. + */ +int alloc_bootmem_huge_page(struct hstate *hstate) { - unsigned long old = pte_update(ptep, ~0UL); + struct huge_bootmem_page *m; + int idx = shift_to_mmu_psize(huge_page_shift(hstate)); + int nr_gpages = gpage_freearray[idx].nr_gpages; - if (old & _PAGE_HASHPTE) - hpte_update(mm, addr & HPAGE_MASK, ptep, old, 1); - *ptep = __pte(0); + if (nr_gpages == 0) + return 0; - return __pte(old); +#ifdef CONFIG_HIGHMEM + /* + * If gpages can be in highmem we can't use the trick of storing the + * data structure in the page; allocate space for this + */ + m = alloc_bootmem(sizeof(struct huge_bootmem_page)); + m->phys = gpage_freearray[idx].gpage_list[--nr_gpages]; +#else + m = phys_to_virt(gpage_freearray[idx].gpage_list[--nr_gpages]); +#endif + + list_add(&m->list, &huge_boot_pages); + gpage_freearray[idx].nr_gpages = nr_gpages; + gpage_freearray[idx].gpage_list[nr_gpages] = 0; + m->hstate = hstate; + + return 1; } - /* - * This function checks for proper alignment of input addr and len parameters. + * Scan the command line hugepagesz= options for gigantic pages; store those in + * a list that we use to allocate the memory once all options are parsed. */ -int is_aligned_hugepage_range(unsigned long addr, unsigned long len) + +unsigned long gpage_npages[MMU_PAGE_COUNT]; + +static int __init do_gpage_early_setup(char *param, char *val, + const char *unused) { - if (len & ~HPAGE_MASK) - return -EINVAL; - if (addr & ~HPAGE_MASK) - return -EINVAL; - if (! (within_hugepage_low_range(addr, len) - || within_hugepage_high_range(addr, len)) ) - return -EINVAL; + static phys_addr_t size; + unsigned long npages; + + /* + * The hugepagesz and hugepages cmdline options are interleaved. We + * use the size variable to keep track of whether or not this was done + * properly and skip over instances where it is incorrect. Other + * command-line parsing code will issue warnings, so we don't need to. + * + */ + if ((strcmp(param, "default_hugepagesz") == 0) || + (strcmp(param, "hugepagesz") == 0)) { + size = memparse(val, NULL); + } else if (strcmp(param, "hugepages") == 0) { + if (size != 0) { + if (sscanf(val, "%lu", &npages) <= 0) + npages = 0; + gpage_npages[shift_to_mmu_psize(__ffs(size))] = npages; + size = 0; + } + } return 0; } -static void flush_low_segments(void *parm) -{ - u16 areas = (unsigned long) parm; - unsigned long i; - asm volatile("isync" : : : "memory"); +/* + * This function allocates physical space for pages that are larger than the + * buddy allocator can handle. We want to allocate these in highmem because + * the amount of lowmem is limited. This means that this function MUST be + * called before lowmem_end_addr is set up in MMU_init() in order for the lmb + * allocate to grab highmem. + */ +void __init reserve_hugetlb_gpages(void) +{ + static __initdata char cmdline[COMMAND_LINE_SIZE]; + phys_addr_t size, base; + int i; - BUILD_BUG_ON((sizeof(areas)*8) != NUM_LOW_AREAS); + strlcpy(cmdline, boot_command_line, COMMAND_LINE_SIZE); + parse_args("hugetlb gpages", cmdline, NULL, 0, 0, 0, + &do_gpage_early_setup); - for (i = 0; i < NUM_LOW_AREAS; i++) { - if (! (areas & (1U << i))) + /* + * Walk gpage list in reverse, allocating larger page sizes first. + * Skip over unsupported sizes, or sizes that have 0 gpages allocated. + * When we reach the point in the list where pages are no longer + * considered gpages, we're done. + */ + for (i = MMU_PAGE_COUNT-1; i >= 0; i--) { + if (mmu_psize_defs[i].shift == 0 || gpage_npages[i] == 0) continue; - asm volatile("slbie %0" - : : "r" ((i << SID_SHIFT) | SLBIE_C)); - } + else if (mmu_psize_to_shift(i) < (MAX_ORDER + PAGE_SHIFT)) + break; - asm volatile("isync" : : : "memory"); + size = (phys_addr_t)(1ULL << mmu_psize_to_shift(i)); + base = memblock_alloc_base(size * gpage_npages[i], size, + MEMBLOCK_ALLOC_ANYWHERE); + add_gpage(base, size, gpage_npages[i]); + } } -static void flush_high_segments(void *parm) -{ - u16 areas = (unsigned long) parm; - unsigned long i, j; - - asm volatile("isync" : : : "memory"); +#else /* !PPC_FSL_BOOK3E */ - BUILD_BUG_ON((sizeof(areas)*8) != NUM_HIGH_AREAS); - - for (i = 0; i < NUM_HIGH_AREAS; i++) { - if (! (areas & (1U << i))) - continue; - for (j = 0; j < (1UL << (HTLB_AREA_SHIFT-SID_SHIFT)); j++) - asm volatile("slbie %0" - :: "r" (((i << HTLB_AREA_SHIFT) - + (j << SID_SHIFT)) | SLBIE_C)); +/* Build list of addresses of gigantic pages. This function is used in early + * boot before the buddy or bootmem allocator is setup. + */ +void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages) +{ + if (!addr) + return; + while (number_of_pages > 0) { + gpage_freearray[nr_gpages] = addr; + nr_gpages++; + number_of_pages--; + addr += page_size; } - - asm volatile("isync" : : : "memory"); } -static int prepare_low_area_for_htlb(struct mm_struct *mm, unsigned long area) +/* Moves the gigantic page addresses from the temporary list to the + * huge_boot_pages list. + */ +int alloc_bootmem_huge_page(struct hstate *hstate) { - unsigned long start = area << SID_SHIFT; - unsigned long end = (area+1) << SID_SHIFT; - struct vm_area_struct *vma; - - BUG_ON(area >= NUM_LOW_AREAS); - - /* Check no VMAs are in the region */ - vma = find_vma(mm, start); - if (vma && (vma->vm_start < end)) - return -EBUSY; + struct huge_bootmem_page *m; + if (nr_gpages == 0) + return 0; + m = phys_to_virt(gpage_freearray[--nr_gpages]); + gpage_freearray[nr_gpages] = 0; + list_add(&m->list, &huge_boot_pages); + m->hstate = hstate; + return 1; +} +#endif +int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) +{ return 0; } -static int prepare_high_area_for_htlb(struct mm_struct *mm, unsigned long area) -{ - unsigned long start = area << HTLB_AREA_SHIFT; - unsigned long end = (area+1) << HTLB_AREA_SHIFT; - struct vm_area_struct *vma; +#ifdef CONFIG_PPC_FSL_BOOK3E +#define HUGEPD_FREELIST_SIZE \ + ((PAGE_SIZE - sizeof(struct hugepd_freelist)) / sizeof(pte_t)) - BUG_ON(area >= NUM_HIGH_AREAS); +struct hugepd_freelist { + struct rcu_head rcu; + unsigned int index; + void *ptes[0]; +}; - /* Hack, so that each addresses is controlled by exactly one - * of the high or low area bitmaps, the first high area starts - * at 4GB, not 0 */ - if (start == 0) - start = 0x100000000UL; +static DEFINE_PER_CPU(struct hugepd_freelist *, hugepd_freelist_cur); - /* Check no VMAs are in the region */ - vma = find_vma(mm, start); - if (vma && (vma->vm_start < end)) - return -EBUSY; +static void hugepd_free_rcu_callback(struct rcu_head *head) +{ + struct hugepd_freelist *batch = + container_of(head, struct hugepd_freelist, rcu); + unsigned int i; - return 0; + for (i = 0; i < batch->index; i++) + kmem_cache_free(hugepte_cache, batch->ptes[i]); + + free_page((unsigned long)batch); } -static int open_low_hpage_areas(struct mm_struct *mm, u16 newareas) +static void hugepd_free(struct mmu_gather *tlb, void *hugepte) { - unsigned long i; + struct hugepd_freelist **batchp; - BUILD_BUG_ON((sizeof(newareas)*8) != NUM_LOW_AREAS); - BUILD_BUG_ON((sizeof(mm->context.low_htlb_areas)*8) != NUM_LOW_AREAS); + batchp = &get_cpu_var(hugepd_freelist_cur); - newareas &= ~(mm->context.low_htlb_areas); - if (! newareas) - return 0; /* The segments we want are already open */ + if (atomic_read(&tlb->mm->mm_users) < 2 || + cpumask_equal(mm_cpumask(tlb->mm), + cpumask_of(smp_processor_id()))) { + kmem_cache_free(hugepte_cache, hugepte); + put_cpu_var(hugepd_freelist_cur); + return; + } - for (i = 0; i < NUM_LOW_AREAS; i++) - if ((1 << i) & newareas) - if (prepare_low_area_for_htlb(mm, i) != 0) - return -EBUSY; + if (*batchp == NULL) { + *batchp = (struct hugepd_freelist *)__get_free_page(GFP_ATOMIC); + (*batchp)->index = 0; + } - mm->context.low_htlb_areas |= newareas; + (*batchp)->ptes[(*batchp)->index++] = hugepte; + if ((*batchp)->index == HUGEPD_FREELIST_SIZE) { + call_rcu_sched(&(*batchp)->rcu, hugepd_free_rcu_callback); + *batchp = NULL; + } + put_cpu_var(hugepd_freelist_cur); +} +#endif - /* update the paca copy of the context struct */ - get_paca()->context = mm->context; +static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshift, + unsigned long start, unsigned long end, + unsigned long floor, unsigned long ceiling) +{ + pte_t *hugepte = hugepd_page(*hpdp); + int i; + + unsigned long pdmask = ~((1UL << pdshift) - 1); + unsigned int num_hugepd = 1; + +#ifdef CONFIG_PPC_FSL_BOOK3E + /* Note: On fsl the hpdp may be the first of several */ + num_hugepd = (1 << (hugepd_shift(*hpdp) - pdshift)); +#else + unsigned int shift = hugepd_shift(*hpdp); +#endif + + start &= pdmask; + if (start < floor) + return; + if (ceiling) { + ceiling &= pdmask; + if (! ceiling) + return; + } + if (end - 1 > ceiling - 1) + return; - /* the context change must make it to memory before the flush, - * so that further SLB misses do the right thing. */ - mb(); - on_each_cpu(flush_low_segments, (void *)(unsigned long)newareas, 0, 1); + for (i = 0; i < num_hugepd; i++, hpdp++) + hpdp->pd = 0; - return 0; + tlb->need_flush = 1; + +#ifdef CONFIG_PPC_FSL_BOOK3E + hugepd_free(tlb, hugepte); +#else + pgtable_free_tlb(tlb, hugepte, pdshift - shift); +#endif } -static int open_high_hpage_areas(struct mm_struct *mm, u16 newareas) +static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud, + unsigned long addr, unsigned long end, + unsigned long floor, unsigned long ceiling) { - unsigned long i; - - BUILD_BUG_ON((sizeof(newareas)*8) != NUM_HIGH_AREAS); - BUILD_BUG_ON((sizeof(mm->context.high_htlb_areas)*8) - != NUM_HIGH_AREAS); - - newareas &= ~(mm->context.high_htlb_areas); - if (! newareas) - return 0; /* The areas we want are already open */ + pmd_t *pmd; + unsigned long next; + unsigned long start; - for (i = 0; i < NUM_HIGH_AREAS; i++) - if ((1 << i) & newareas) - if (prepare_high_area_for_htlb(mm, i) != 0) - return -EBUSY; + start = addr; + do { + pmd = pmd_offset(pud, addr); + next = pmd_addr_end(addr, end); + if (!is_hugepd(pmd)) { + /* + * if it is not hugepd pointer, we should already find + * it cleared. + */ + WARN_ON(!pmd_none_or_clear_bad(pmd)); + continue; + } +#ifdef CONFIG_PPC_FSL_BOOK3E + /* + * Increment next by the size of the huge mapping since + * there may be more than one entry at this level for a + * single hugepage, but all of them point to + * the same kmem cache that holds the hugepte. + */ + next = addr + (1 << hugepd_shift(*(hugepd_t *)pmd)); +#endif + free_hugepd_range(tlb, (hugepd_t *)pmd, PMD_SHIFT, + addr, next, floor, ceiling); + } while (addr = next, addr != end); + + start &= PUD_MASK; + if (start < floor) + return; + if (ceiling) { + ceiling &= PUD_MASK; + if (!ceiling) + return; + } + if (end - 1 > ceiling - 1) + return; - mm->context.high_htlb_areas |= newareas; + pmd = pmd_offset(pud, start); + pud_clear(pud); + pmd_free_tlb(tlb, pmd, start); +} - /* update the paca copy of the context struct */ - get_paca()->context = mm->context; +static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, + unsigned long addr, unsigned long end, + unsigned long floor, unsigned long ceiling) +{ + pud_t *pud; + unsigned long next; + unsigned long start; - /* the context change must make it to memory before the flush, - * so that further SLB misses do the right thing. */ - mb(); - on_each_cpu(flush_high_segments, (void *)(unsigned long)newareas, 0, 1); + start = addr; + do { + pud = pud_offset(pgd, addr); + next = pud_addr_end(addr, end); + if (!is_hugepd(pud)) { + if (pud_none_or_clear_bad(pud)) + continue; + hugetlb_free_pmd_range(tlb, pud, addr, next, floor, + ceiling); + } else { +#ifdef CONFIG_PPC_FSL_BOOK3E + /* + * Increment next by the size of the huge mapping since + * there may be more than one entry at this level for a + * single hugepage, but all of them point to + * the same kmem cache that holds the hugepte. + */ + next = addr + (1 << hugepd_shift(*(hugepd_t *)pud)); +#endif + free_hugepd_range(tlb, (hugepd_t *)pud, PUD_SHIFT, + addr, next, floor, ceiling); + } + } while (addr = next, addr != end); + + start &= PGDIR_MASK; + if (start < floor) + return; + if (ceiling) { + ceiling &= PGDIR_MASK; + if (!ceiling) + return; + } + if (end - 1 > ceiling - 1) + return; - return 0; + pud = pud_offset(pgd, start); + pgd_clear(pgd); + pud_free_tlb(tlb, pud, start); } -int prepare_hugepage_range(unsigned long addr, unsigned long len) +/* + * This function frees user-level page tables of a process. + */ +void hugetlb_free_pgd_range(struct mmu_gather *tlb, + unsigned long addr, unsigned long end, + unsigned long floor, unsigned long ceiling) { - int err; - - if ( (addr+len) < addr ) - return -EINVAL; + pgd_t *pgd; + unsigned long next; - if ((addr + len) < 0x100000000UL) - err = open_low_hpage_areas(current->mm, - LOW_ESID_MASK(addr, len)); - else - err = open_high_hpage_areas(current->mm, - HTLB_AREA_MASK(addr, len)); - if (err) { - printk(KERN_DEBUG "prepare_hugepage_range(%lx, %lx)" - " failed (lowmask: 0x%04hx, highmask: 0x%04hx)\n", - addr, len, - LOW_ESID_MASK(addr, len), HTLB_AREA_MASK(addr, len)); - return err; - } + /* + * Because there are a number of different possible pagetable + * layouts for hugepage ranges, we limit knowledge of how + * things should be laid out to the allocation path + * (huge_pte_alloc(), above). Everything else works out the + * structure as it goes from information in the hugepd + * pointers. That means that we can't here use the + * optimization used in the normal page free_pgd_range(), of + * checking whether we're actually covering a large enough + * range to have to do anything at the top level of the walk + * instead of at the bottom. + * + * To make sense of this, you should probably go read the big + * block comment at the top of the normal free_pgd_range(), + * too. + */ - return 0; + do { + next = pgd_addr_end(addr, end); + pgd = pgd_offset(tlb->mm, addr); + if (!is_hugepd(pgd)) { + if (pgd_none_or_clear_bad(pgd)) + continue; + hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling); + } else { +#ifdef CONFIG_PPC_FSL_BOOK3E + /* + * Increment next by the size of the huge mapping since + * there may be more than one entry at the pgd level + * for a single hugepage, but all of them point to the + * same kmem cache that holds the hugepte. + */ + next = addr + (1 << hugepd_shift(*(hugepd_t *)pgd)); +#endif + free_hugepd_range(tlb, (hugepd_t *)pgd, PGDIR_SHIFT, + addr, next, floor, ceiling); + } + } while (addr = next, addr != end); } struct page * @@ -314,23 +678,26 @@ follow_huge_addr(struct mm_struct *mm, unsigned long address, int write) { pte_t *ptep; struct page *page; + unsigned shift; + unsigned long mask; + /* + * Transparent hugepages are handled by generic code. We can skip them + * here. + */ + ptep = find_linux_pte_or_hugepte(mm->pgd, address, &shift); - if (! in_hugepage_area(mm->context, address)) + /* Verify it is a huge page else bail. */ + if (!ptep || !shift || pmd_trans_huge(*(pmd_t *)ptep)) return ERR_PTR(-EINVAL); - ptep = huge_pte_offset(mm, address); + mask = (1UL << shift) - 1; page = pte_page(*ptep); if (page) - page += (address % HPAGE_SIZE) / PAGE_SIZE; + page += (address & mask) / PAGE_SIZE; return page; } -int pmd_huge(pmd_t pmd) -{ - return 0; -} - struct page * follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int write) @@ -339,429 +706,384 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address, return NULL; } -/* Because we have an exclusive hugepage region which lies within the - * normal user address space, we have to take special measures to make - * non-huge mmap()s evade the hugepage reserved regions. */ -unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, - unsigned long len, unsigned long pgoff, - unsigned long flags) +static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end, + unsigned long sz) { - struct mm_struct *mm = current->mm; - struct vm_area_struct *vma; - unsigned long start_addr; + unsigned long __boundary = (addr + sz) & ~(sz-1); + return (__boundary - 1 < end - 1) ? __boundary : end; +} - if (len > TASK_SIZE) - return -ENOMEM; +int gup_hugepd(hugepd_t *hugepd, unsigned pdshift, + unsigned long addr, unsigned long end, + int write, struct page **pages, int *nr) +{ + pte_t *ptep; + unsigned long sz = 1UL << hugepd_shift(*hugepd); + unsigned long next; - if (addr) { - addr = PAGE_ALIGN(addr); - vma = find_vma(mm, addr); - if (((TASK_SIZE - len) >= addr) - && (!vma || (addr+len) <= vma->vm_start) - && !is_hugepage_only_range(mm, addr,len)) - return addr; - } - if (len > mm->cached_hole_size) { - start_addr = addr = mm->free_area_cache; - } else { - start_addr = addr = TASK_UNMAPPED_BASE; - mm->cached_hole_size = 0; - } + ptep = hugepte_offset(hugepd, addr, pdshift); + do { + next = hugepte_addr_end(addr, end, sz); + if (!gup_hugepte(ptep, sz, addr, end, write, pages, nr)) + return 0; + } while (ptep++, addr = next, addr != end); -full_search: - vma = find_vma(mm, addr); - while (TASK_SIZE - len >= addr) { - BUG_ON(vma && (addr >= vma->vm_end)); + return 1; +} - if (touches_hugepage_low_range(mm, addr, len)) { - addr = ALIGN(addr+1, 1<<SID_SHIFT); - vma = find_vma(mm, addr); - continue; - } - if (touches_hugepage_high_range(mm, addr, len)) { - addr = ALIGN(addr+1, 1UL<<HTLB_AREA_SHIFT); - vma = find_vma(mm, addr); - continue; - } - if (!vma || addr + len <= vma->vm_start) { - /* - * Remember the place where we stopped the search: - */ - mm->free_area_cache = addr + len; - return addr; - } - if (addr + mm->cached_hole_size < vma->vm_start) - mm->cached_hole_size = vma->vm_start - addr; - addr = vma->vm_end; - vma = vma->vm_next; - } +#ifdef CONFIG_PPC_MM_SLICES +unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, + unsigned long len, unsigned long pgoff, + unsigned long flags) +{ + struct hstate *hstate = hstate_file(file); + int mmu_psize = shift_to_mmu_psize(huge_page_shift(hstate)); - /* Make sure we didn't miss any holes */ - if (start_addr != TASK_UNMAPPED_BASE) { - start_addr = addr = TASK_UNMAPPED_BASE; - mm->cached_hole_size = 0; - goto full_search; - } - return -ENOMEM; + return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1); } +#endif -/* - * This mmap-allocator allocates new areas top-down from below the - * stack's low limit (the base): - * - * Because we have an exclusive hugepage region which lies within the - * normal user address space, we have to take special measures to make - * non-huge mmap()s evade the hugepage reserved regions. - */ -unsigned long -arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, - const unsigned long len, const unsigned long pgoff, - const unsigned long flags) -{ - struct vm_area_struct *vma, *prev_vma; - struct mm_struct *mm = current->mm; - unsigned long base = mm->mmap_base, addr = addr0; - unsigned long largest_hole = mm->cached_hole_size; - int first_time = 1; - - /* requested length too big for entire address space */ - if (len > TASK_SIZE) - return -ENOMEM; - - /* dont allow allocations above current base */ - if (mm->free_area_cache > base) - mm->free_area_cache = base; - - /* requesting a specific address */ - if (addr) { - addr = PAGE_ALIGN(addr); - vma = find_vma(mm, addr); - if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start) - && !is_hugepage_only_range(mm, addr,len)) - return addr; - } +unsigned long vma_mmu_pagesize(struct vm_area_struct *vma) +{ +#ifdef CONFIG_PPC_MM_SLICES + unsigned int psize = get_slice_psize(vma->vm_mm, vma->vm_start); - if (len <= largest_hole) { - largest_hole = 0; - mm->free_area_cache = base; - } -try_again: - /* make sure it can fit in the remaining address space */ - if (mm->free_area_cache < len) - goto fail; + return 1UL << mmu_psize_to_shift(psize); +#else + if (!is_vm_hugetlb_page(vma)) + return PAGE_SIZE; - /* either no address requested or cant fit in requested address hole */ - addr = (mm->free_area_cache - len) & PAGE_MASK; - do { -hugepage_recheck: - if (touches_hugepage_low_range(mm, addr, len)) { - addr = (addr & ((~0) << SID_SHIFT)) - len; - goto hugepage_recheck; - } else if (touches_hugepage_high_range(mm, addr, len)) { - addr = (addr & ((~0UL) << HTLB_AREA_SHIFT)) - len; - goto hugepage_recheck; - } + return huge_page_size(hstate_vma(vma)); +#endif +} - /* - * Lookup failure means no vma is above this address, - * i.e. return with success: - */ - if (!(vma = find_vma_prev(mm, addr, &prev_vma))) - return addr; +static inline bool is_power_of_4(unsigned long x) +{ + if (is_power_of_2(x)) + return (__ilog2(x) % 2) ? false : true; + return false; +} - /* - * new region fits between prev_vma->vm_end and - * vma->vm_start, use it: - */ - if (addr+len <= vma->vm_start && - (!prev_vma || (addr >= prev_vma->vm_end))) { - /* remember the address as a hint for next time */ - mm->cached_hole_size = largest_hole; - return (mm->free_area_cache = addr); - } else { - /* pull free_area_cache down to the first hole */ - if (mm->free_area_cache == vma->vm_end) { - mm->free_area_cache = vma->vm_start; - mm->cached_hole_size = largest_hole; - } - } +static int __init add_huge_page_size(unsigned long long size) +{ + int shift = __ffs(size); + int mmu_psize; - /* remember the largest hole we saw so far */ - if (addr + largest_hole < vma->vm_start) - largest_hole = vma->vm_start - addr; + /* Check that it is a page size supported by the hardware and + * that it fits within pagetable and slice limits. */ +#ifdef CONFIG_PPC_FSL_BOOK3E + if ((size < PAGE_SIZE) || !is_power_of_4(size)) + return -EINVAL; +#else + if (!is_power_of_2(size) + || (shift > SLICE_HIGH_SHIFT) || (shift <= PAGE_SHIFT)) + return -EINVAL; +#endif - /* try just below the current vma->vm_start */ - addr = vma->vm_start-len; - } while (len <= vma->vm_start); + if ((mmu_psize = shift_to_mmu_psize(shift)) < 0) + return -EINVAL; -fail: - /* - * if hint left us with no space for the requested - * mapping then try again: - */ - if (first_time) { - mm->free_area_cache = base; - largest_hole = 0; - first_time = 0; - goto try_again; - } - /* - * A failed mmap() very likely causes application failure, - * so fall back to the bottom-up function here. This scenario - * can happen with large stack limits and large mmap() - * allocations. - */ - mm->free_area_cache = TASK_UNMAPPED_BASE; - mm->cached_hole_size = ~0UL; - addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags); - /* - * Restore the topdown base: +#ifdef CONFIG_SPU_FS_64K_LS + /* Disable support for 64K huge pages when 64K SPU local store + * support is enabled as the current implementation conflicts. */ - mm->free_area_cache = base; - mm->cached_hole_size = ~0UL; + if (shift == PAGE_SHIFT_64K) + return -EINVAL; +#endif /* CONFIG_SPU_FS_64K_LS */ + + BUG_ON(mmu_psize_defs[mmu_psize].shift != shift); - return addr; + /* Return if huge page size has already been setup */ + if (size_to_hstate(size)) + return 0; + + hugetlb_add_hstate(shift - PAGE_SHIFT); + + return 0; } -static unsigned long htlb_get_low_area(unsigned long len, u16 segmask) +static int __init hugepage_setup_sz(char *str) { - unsigned long addr = 0; - struct vm_area_struct *vma; + unsigned long long size; - vma = find_vma(current->mm, addr); - while (addr + len <= 0x100000000UL) { - BUG_ON(vma && (addr >= vma->vm_end)); /* invariant */ + size = memparse(str, &str); - if (! __within_hugepage_low_range(addr, len, segmask)) { - addr = ALIGN(addr+1, 1<<SID_SHIFT); - vma = find_vma(current->mm, addr); - continue; - } + if (add_huge_page_size(size) != 0) + printk(KERN_WARNING "Invalid huge page size specified(%llu)\n", size); - if (!vma || (addr + len) <= vma->vm_start) - return addr; - addr = ALIGN(vma->vm_end, HPAGE_SIZE); - /* Depending on segmask this might not be a confirmed - * hugepage region, so the ALIGN could have skipped - * some VMAs */ - vma = find_vma(current->mm, addr); - } - - return -ENOMEM; + return 1; } +__setup("hugepagesz=", hugepage_setup_sz); -static unsigned long htlb_get_high_area(unsigned long len, u16 areamask) +#ifdef CONFIG_PPC_FSL_BOOK3E +struct kmem_cache *hugepte_cache; +static int __init hugetlbpage_init(void) { - unsigned long addr = 0x100000000UL; - struct vm_area_struct *vma; + int psize; - vma = find_vma(current->mm, addr); - while (addr + len <= TASK_SIZE_USER64) { - BUG_ON(vma && (addr >= vma->vm_end)); /* invariant */ + for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { + unsigned shift; - if (! __within_hugepage_high_range(addr, len, areamask)) { - addr = ALIGN(addr+1, 1UL<<HTLB_AREA_SHIFT); - vma = find_vma(current->mm, addr); + if (!mmu_psize_defs[psize].shift) continue; - } - if (!vma || (addr + len) <= vma->vm_start) - return addr; - addr = ALIGN(vma->vm_end, HPAGE_SIZE); - /* Depending on segmask this might not be a confirmed - * hugepage region, so the ALIGN could have skipped - * some VMAs */ - vma = find_vma(current->mm, addr); + shift = mmu_psize_to_shift(psize); + + /* Don't treat normal page sizes as huge... */ + if (shift != PAGE_SHIFT) + if (add_huge_page_size(1ULL << shift) < 0) + continue; } - return -ENOMEM; -} + /* + * Create a kmem cache for hugeptes. The bottom bits in the pte have + * size information encoded in them, so align them to allow this + */ + hugepte_cache = kmem_cache_create("hugepte-cache", sizeof(pte_t), + HUGEPD_SHIFT_MASK + 1, 0, NULL); + if (hugepte_cache == NULL) + panic("%s: Unable to create kmem cache for hugeptes\n", + __func__); + + /* Default hpage size = 4M */ + if (mmu_psize_defs[MMU_PAGE_4M].shift) + HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_4M].shift; + else + panic("%s: Unable to set default huge page size\n", __func__); -unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, - unsigned long len, unsigned long pgoff, - unsigned long flags) + + return 0; +} +#else +static int __init hugetlbpage_init(void) { - int lastshift; - u16 areamask, curareas; + int psize; - if (HPAGE_SHIFT == 0) - return -EINVAL; - if (len & ~HPAGE_MASK) - return -EINVAL; + if (!mmu_has_feature(MMU_FTR_16M_PAGE)) + return -ENODEV; - if (!cpu_has_feature(CPU_FTR_16M_PAGE)) - return -EINVAL; + for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { + unsigned shift; + unsigned pdshift; - if (test_thread_flag(TIF_32BIT)) { - curareas = current->mm->context.low_htlb_areas; - - /* First see if we can do the mapping in the existing - * low areas */ - addr = htlb_get_low_area(len, curareas); - if (addr != -ENOMEM) - return addr; - - lastshift = 0; - for (areamask = LOW_ESID_MASK(0x100000000UL-len, len); - ! lastshift; areamask >>=1) { - if (areamask & 1) - lastshift = 1; - - addr = htlb_get_low_area(len, curareas | areamask); - if ((addr != -ENOMEM) - && open_low_hpage_areas(current->mm, areamask) == 0) - return addr; - } - } else { - curareas = current->mm->context.high_htlb_areas; - - /* First see if we can do the mapping in the existing - * high areas */ - addr = htlb_get_high_area(len, curareas); - if (addr != -ENOMEM) - return addr; - - lastshift = 0; - for (areamask = HTLB_AREA_MASK(TASK_SIZE_USER64-len, len); - ! lastshift; areamask >>=1) { - if (areamask & 1) - lastshift = 1; - - addr = htlb_get_high_area(len, curareas | areamask); - if ((addr != -ENOMEM) - && open_high_hpage_areas(current->mm, areamask) == 0) - return addr; + if (!mmu_psize_defs[psize].shift) + continue; + + shift = mmu_psize_to_shift(psize); + + if (add_huge_page_size(1ULL << shift) < 0) + continue; + + if (shift < PMD_SHIFT) + pdshift = PMD_SHIFT; + else if (shift < PUD_SHIFT) + pdshift = PUD_SHIFT; + else + pdshift = PGDIR_SHIFT; + /* + * if we have pdshift and shift value same, we don't + * use pgt cache for hugepd. + */ + if (pdshift != shift) { + pgtable_cache_add(pdshift - shift, NULL); + if (!PGT_CACHE(pdshift - shift)) + panic("hugetlbpage_init(): could not create " + "pgtable cache for %d bit pagesize\n", shift); } } - printk(KERN_DEBUG "hugetlb_get_unmapped_area() unable to open" - " enough areas\n"); - return -ENOMEM; + + /* Set default large page size. Currently, we pick 16M or 1M + * depending on what is available + */ + if (mmu_psize_defs[MMU_PAGE_16M].shift) + HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_16M].shift; + else if (mmu_psize_defs[MMU_PAGE_1M].shift) + HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_1M].shift; + + return 0; } +#endif +module_init(hugetlbpage_init); -int hash_huge_page(struct mm_struct *mm, unsigned long access, - unsigned long ea, unsigned long vsid, int local) +void flush_dcache_icache_hugepage(struct page *page) { - pte_t *ptep; - unsigned long old_pte, new_pte; - unsigned long va, rflags, pa; - long slot; - int err = 1; + int i; + void *start; + + BUG_ON(!PageCompound(page)); - ptep = huge_pte_offset(mm, ea); + for (i = 0; i < (1UL << compound_order(page)); i++) { + if (!PageHighMem(page)) { + __flush_dcache_icache(page_address(page+i)); + } else { + start = kmap_atomic(page+i); + __flush_dcache_icache(start); + kunmap_atomic(start); + } + } +} - /* Search the Linux page table for a match with va */ - va = (vsid << 28) | (ea & 0x0fffffff); +#endif /* CONFIG_HUGETLB_PAGE */ +/* + * We have 4 cases for pgds and pmds: + * (1) invalid (all zeroes) + * (2) pointer to next table, as normal; bottom 6 bits == 0 + * (3) leaf pte for huge page, bottom two bits != 00 + * (4) hugepd pointer, bottom two bits == 00, next 4 bits indicate size of table + * + * So long as we atomically load page table pointers we are safe against teardown, + * we can follow the address down to the the page and take a ref on it. + */ + +pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, unsigned *shift) +{ + pgd_t pgd, *pgdp; + pud_t pud, *pudp; + pmd_t pmd, *pmdp; + pte_t *ret_pte; + hugepd_t *hpdp = NULL; + unsigned pdshift = PGDIR_SHIFT; + + if (shift) + *shift = 0; + + pgdp = pgdir + pgd_index(ea); + pgd = ACCESS_ONCE(*pgdp); /* - * If no pte found or not present, send the problem up to - * do_page_fault + * Always operate on the local stack value. This make sure the + * value don't get updated by a parallel THP split/collapse, + * page fault or a page unmap. The return pte_t * is still not + * stable. So should be checked there for above conditions. */ - if (unlikely(!ptep || pte_none(*ptep))) + if (pgd_none(pgd)) + return NULL; + else if (pgd_huge(pgd)) { + ret_pte = (pte_t *) pgdp; goto out; + } else if (is_hugepd(&pgd)) + hpdp = (hugepd_t *)&pgd; + else { + /* + * Even if we end up with an unmap, the pgtable will not + * be freed, because we do an rcu free and here we are + * irq disabled + */ + pdshift = PUD_SHIFT; + pudp = pud_offset(&pgd, ea); + pud = ACCESS_ONCE(*pudp); + + if (pud_none(pud)) + return NULL; + else if (pud_huge(pud)) { + ret_pte = (pte_t *) pudp; + goto out; + } else if (is_hugepd(&pud)) + hpdp = (hugepd_t *)&pud; + else { + pdshift = PMD_SHIFT; + pmdp = pmd_offset(&pud, ea); + pmd = ACCESS_ONCE(*pmdp); + /* + * A hugepage collapse is captured by pmd_none, because + * it mark the pmd none and do a hpte invalidate. + * + * A hugepage split is captured by pmd_trans_splitting + * because we mark the pmd trans splitting and do a + * hpte invalidate + * + */ + if (pmd_none(pmd) || pmd_trans_splitting(pmd)) + return NULL; + + if (pmd_huge(pmd) || pmd_large(pmd)) { + ret_pte = (pte_t *) pmdp; + goto out; + } else if (is_hugepd(&pmd)) + hpdp = (hugepd_t *)&pmd; + else + return pte_offset_kernel(&pmd, ea); + } + } + if (!hpdp) + return NULL; + + ret_pte = hugepte_offset(hpdp, ea, pdshift); + pdshift = hugepd_shift(*hpdp); +out: + if (shift) + *shift = pdshift; + return ret_pte; +} +EXPORT_SYMBOL_GPL(find_linux_pte_or_hugepte); - /* - * Check the user's access rights to the page. If access should be - * prevented then send the problem up to do_page_fault. - */ - if (unlikely(access & ~pte_val(*ptep))) - goto out; +int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr, + unsigned long end, int write, struct page **pages, int *nr) +{ + unsigned long mask; + unsigned long pte_end; + struct page *head, *page, *tail; + pte_t pte; + int refs; + + pte_end = (addr + sz) & ~(sz-1); + if (pte_end < end) + end = pte_end; + + pte = ACCESS_ONCE(*ptep); + mask = _PAGE_PRESENT | _PAGE_USER; + if (write) + mask |= _PAGE_RW; + + if ((pte_val(pte) & mask) != mask) + return 0; + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE /* - * At this point, we have a pte (old_pte) which can be used to build - * or update an HPTE. There are 2 cases: - * - * 1. There is a valid (present) pte with no associated HPTE (this is - * the most common case) - * 2. There is a valid (present) pte with an associated HPTE. The - * current values of the pp bits in the HPTE prevent access - * because we are doing software DIRTY bit management and the - * page is currently not DIRTY. + * check for splitting here */ + if (pmd_trans_splitting(pte_pmd(pte))) + return 0; +#endif + + /* hugepages are never "special" */ + VM_BUG_ON(!pfn_valid(pte_pfn(pte))); + refs = 0; + head = pte_page(pte); + page = head + ((addr & (sz-1)) >> PAGE_SHIFT); + tail = page; do { - old_pte = pte_val(*ptep); - if (old_pte & _PAGE_BUSY) - goto out; - new_pte = old_pte | _PAGE_BUSY | - _PAGE_ACCESSED | _PAGE_HASHPTE; - } while(old_pte != __cmpxchg_u64((unsigned long *)ptep, - old_pte, new_pte)); - - rflags = 0x2 | (!(new_pte & _PAGE_RW)); - /* _PAGE_EXEC -> HW_NO_EXEC since it's inverted */ - rflags |= ((new_pte & _PAGE_EXEC) ? 0 : HPTE_R_N); - - /* Check if pte already has an hpte (case 2) */ - if (unlikely(old_pte & _PAGE_HASHPTE)) { - /* There MIGHT be an HPTE for this pte */ - unsigned long hash, slot; - - hash = hpt_hash(va, HPAGE_SHIFT); - if (old_pte & _PAGE_F_SECOND) - hash = ~hash; - slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; - slot += (old_pte & _PAGE_F_GIX) >> 12; - - if (ppc_md.hpte_updatepp(slot, rflags, va, 1, local) == -1) - old_pte &= ~_PAGE_HPTEFLAGS; + VM_BUG_ON(compound_head(page) != head); + pages[*nr] = page; + (*nr)++; + page++; + refs++; + } while (addr += PAGE_SIZE, addr != end); + + if (!page_cache_add_speculative(head, refs)) { + *nr -= refs; + return 0; } - if (likely(!(old_pte & _PAGE_HASHPTE))) { - unsigned long hash = hpt_hash(va, HPAGE_SHIFT); - unsigned long hpte_group; - - pa = pte_pfn(__pte(old_pte)) << PAGE_SHIFT; - -repeat: - hpte_group = ((hash & htab_hash_mask) * - HPTES_PER_GROUP) & ~0x7UL; - - /* clear HPTE slot informations in new PTE */ - new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | _PAGE_HASHPTE; - - /* Add in WIMG bits */ - /* XXX We should store these in the pte */ - /* --BenH: I think they are ... */ - rflags |= _PAGE_COHERENT; - - /* Insert into the hash table, primary slot */ - slot = ppc_md.hpte_insert(hpte_group, va, pa, rflags, 0, - mmu_huge_psize); - - /* Primary is full, try the secondary */ - if (unlikely(slot == -1)) { - new_pte |= _PAGE_F_SECOND; - hpte_group = ((~hash & htab_hash_mask) * - HPTES_PER_GROUP) & ~0x7UL; - slot = ppc_md.hpte_insert(hpte_group, va, pa, rflags, - HPTE_V_SECONDARY, - mmu_huge_psize); - if (slot == -1) { - if (mftb() & 0x1) - hpte_group = ((hash & htab_hash_mask) * - HPTES_PER_GROUP)&~0x7UL; - - ppc_md.hpte_remove(hpte_group); - goto repeat; - } - } - - if (unlikely(slot == -2)) - panic("hash_huge_page: pte_insert failed\n"); - - new_pte |= (slot << 12) & _PAGE_F_GIX; + if (unlikely(pte_val(pte) != pte_val(*ptep))) { + /* Could be optimized better */ + *nr -= refs; + while (refs--) + put_page(head); + return 0; } /* - * No need to use ldarx/stdcx here because all who - * might be updating the pte will hold the - * page_table_lock + * Any tail page need their mapcount reference taken before we + * return. */ - *ptep = __pte(new_pte & ~_PAGE_BUSY); - - err = 0; + while (refs--) { + if (PageTail(tail)) + get_huge_page_tail(tail); + tail++; + } - out: - return err; + return 1; } |
