diff options
Diffstat (limited to 'arch/ia64/mm/hugetlbpage.c')
| -rw-r--r-- | arch/ia64/mm/hugetlbpage.c | 72 |
1 files changed, 45 insertions, 27 deletions
diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c index 2d13889d0a9..76069c18ee4 100644 --- a/arch/ia64/mm/hugetlbpage.c +++ b/arch/ia64/mm/hugetlbpage.c @@ -8,24 +8,24 @@ * Feb, 2004: dynamic hugetlb page size via boot parameter */ -#include <linux/config.h> #include <linux/init.h> #include <linux/fs.h> #include <linux/mm.h> #include <linux/hugetlb.h> #include <linux/pagemap.h> -#include <linux/smp_lock.h> -#include <linux/slab.h> +#include <linux/module.h> #include <linux/sysctl.h> +#include <linux/log2.h> #include <asm/mman.h> #include <asm/pgalloc.h> #include <asm/tlb.h> #include <asm/tlbflush.h> -unsigned int hpage_shift=HPAGE_SHIFT_DEFAULT; +unsigned int hpage_shift = HPAGE_SHIFT_DEFAULT; +EXPORT_SYMBOL(hpage_shift); pte_t * -huge_pte_alloc (struct mm_struct *mm, unsigned long addr) +huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz) { unsigned long taddr = htlbpage_to_page(addr); pgd_t *pgd; @@ -38,7 +38,7 @@ huge_pte_alloc (struct mm_struct *mm, unsigned long addr) if (pud) { pmd = pmd_alloc(mm, pud, taddr); if (pmd) - pte = pte_alloc_map(mm, pmd, taddr); + pte = pte_alloc_map(mm, NULL, pmd, taddr); } return pte; } @@ -65,12 +65,19 @@ huge_pte_offset (struct mm_struct *mm, unsigned long addr) return pte; } +int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) +{ + return 0; +} + #define mk_pte_huge(entry) { pte_val(entry) |= _PAGE_P; } /* - * This function checks for proper alignment of input addr and len parameters. + * Don't actually need to do any preparation, but need to make sure + * the address is in the right region. */ -int is_aligned_hugepage_range(unsigned long addr, unsigned long len) +int prepare_hugepage_range(struct file *file, + unsigned long addr, unsigned long len) { if (len & ~HPAGE_MASK) return -EINVAL; @@ -101,19 +108,24 @@ int pmd_huge(pmd_t pmd) { return 0; } + +int pud_huge(pud_t pud) +{ + return 0; +} + struct page * follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int write) { return NULL; } -void hugetlb_free_pgd_range(struct mmu_gather **tlb, +void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr, unsigned long end, unsigned long floor, unsigned long ceiling) { /* - * This is called only when is_hugepage_only_range(addr,), - * and it follows that is_hugepage_only_range(end,) also. + * This is called to free hugetlb page tables. * * The offset of these addresses from the base of the hugetlb * region must be scaled down by HPAGE_SIZE/PAGE_SIZE so that @@ -125,9 +137,9 @@ void hugetlb_free_pgd_range(struct mmu_gather **tlb, addr = htlbpage_to_page(addr); end = htlbpage_to_page(end); - if (is_hugepage_only_range(tlb->mm, floor, HPAGE_SIZE)) + if (REGION_NUMBER(floor) == RGN_HPAGE) floor = htlbpage_to_page(floor); - if (is_hugepage_only_range(tlb->mm, ceiling, HPAGE_SIZE)) + if (REGION_NUMBER(ceiling) == RGN_HPAGE) ceiling = htlbpage_to_page(ceiling); free_pgd_range(tlb, addr, end, floor, ceiling); @@ -136,25 +148,31 @@ void hugetlb_free_pgd_range(struct mmu_gather **tlb, unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { - struct vm_area_struct *vmm; + struct vm_unmapped_area_info info; if (len > RGN_MAP_LIMIT) return -ENOMEM; if (len & ~HPAGE_MASK) return -EINVAL; + + /* Handle MAP_FIXED */ + if (flags & MAP_FIXED) { + if (prepare_hugepage_range(file, addr, len)) + return -EINVAL; + return addr; + } + /* This code assumes that RGN_HPAGE != 0. */ if ((REGION_NUMBER(addr) != RGN_HPAGE) || (addr & (HPAGE_SIZE - 1))) addr = HPAGE_REGION_BASE; - else - addr = ALIGN(addr, HPAGE_SIZE); - for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) { - /* At this point: (!vmm || addr < vmm->vm_end). */ - if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT) - return -ENOMEM; - if (!vmm || (addr + len) <= vmm->vm_start) - return addr; - addr = ALIGN(vmm->vm_end, HPAGE_SIZE); - } + + info.flags = 0; + info.length = len; + info.low_limit = addr; + info.high_limit = HPAGE_REGION_BASE + RGN_MAP_LIMIT; + info.align_mask = PAGE_MASK & (HPAGE_SIZE - 1); + info.align_offset = 0; + return vm_unmapped_area(&info); } static int __init hugetlb_setup_sz(char *str) @@ -169,7 +187,7 @@ static int __init hugetlb_setup_sz(char *str) tr_pages = 0x15557000UL; size = memparse(str, &str); - if (*str || (size & (size-1)) || !(tr_pages & size) || + if (*str || !is_power_of_2(size) || !(tr_pages & size) || size <= PAGE_SIZE || size >= (1UL << PAGE_SHIFT << MAX_ORDER)) { printk(KERN_WARNING "Invalid huge page size specified\n"); @@ -182,6 +200,6 @@ static int __init hugetlb_setup_sz(char *str) * override here with new page shift. */ ia64_set_rr(HPAGE_REGION_BASE, hpage_shift << 2); - return 1; + return 0; } -__setup("hugepagesz=", hugetlb_setup_sz); +early_param("hugepagesz", hugetlb_setup_sz); |
