diff options
Diffstat (limited to 'arch/sh/mm/mmap.c')
| -rw-r--r-- | arch/sh/mm/mmap.c | 124 |
1 files changed, 81 insertions, 43 deletions
diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c index 931f4d003fa..6777177807c 100644 --- a/arch/sh/mm/mmap.c +++ b/arch/sh/mm/mmap.c @@ -1,7 +1,7 @@ /* * arch/sh/mm/mmap.c * - * Copyright (C) 2008 Paul Mundt + * Copyright (C) 2008 - 2009 Paul Mundt * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive @@ -14,30 +14,36 @@ #include <asm/page.h> #include <asm/processor.h> -#ifdef CONFIG_MMU unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */ EXPORT_SYMBOL(shm_align_mask); +#ifdef CONFIG_MMU /* * To avoid cache aliases, we map the shared page with same color. */ -#define COLOUR_ALIGN(addr, pgoff) \ - ((((addr) + shm_align_mask) & ~shm_align_mask) + \ - (((pgoff) << PAGE_SHIFT) & shm_align_mask)) +static inline unsigned long COLOUR_ALIGN(unsigned long addr, + unsigned long pgoff) +{ + unsigned long base = (addr + shm_align_mask) & ~shm_align_mask; + unsigned long off = (pgoff << PAGE_SHIFT) & shm_align_mask; + + return base + off; +} unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; - unsigned long start_addr; int do_colour_align; + struct vm_unmapped_area_info info; if (flags & MAP_FIXED) { /* We do not accept a shared mapping if it would violate * cache aliasing constraints. */ - if ((flags & MAP_SHARED) && (addr & shm_align_mask)) + if ((flags & MAP_SHARED) && + ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask)) return -EINVAL; return addr; } @@ -61,47 +67,79 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, return addr; } - if (len > mm->cached_hole_size) { - start_addr = addr = mm->free_area_cache; - } else { - mm->cached_hole_size = 0; - start_addr = addr = TASK_UNMAPPED_BASE; + info.flags = 0; + info.length = len; + info.low_limit = TASK_UNMAPPED_BASE; + info.high_limit = TASK_SIZE; + info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0; + info.align_offset = pgoff << PAGE_SHIFT; + return vm_unmapped_area(&info); +} + +unsigned long +arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + const unsigned long len, const unsigned long pgoff, + const unsigned long flags) +{ + struct vm_area_struct *vma; + struct mm_struct *mm = current->mm; + unsigned long addr = addr0; + int do_colour_align; + struct vm_unmapped_area_info info; + + if (flags & MAP_FIXED) { + /* We do not accept a shared mapping if it would violate + * cache aliasing constraints. + */ + if ((flags & MAP_SHARED) && + ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask)) + return -EINVAL; + return addr; } -full_search: - if (do_colour_align) - addr = COLOUR_ALIGN(addr, pgoff); - else - addr = PAGE_ALIGN(mm->free_area_cache); - - for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { - /* At this point: (!vma || addr < vma->vm_end). */ - if (unlikely(TASK_SIZE - len < addr)) { - /* - * Start a new search - just in case we missed - * some holes. - */ - if (start_addr != TASK_UNMAPPED_BASE) { - start_addr = addr = TASK_UNMAPPED_BASE; - mm->cached_hole_size = 0; - goto full_search; - } - return -ENOMEM; - } - if (likely(!vma || addr + len <= vma->vm_start)) { - /* - * Remember the place where we stopped the search: - */ - mm->free_area_cache = addr + len; - return addr; - } - if (addr + mm->cached_hole_size < vma->vm_start) - mm->cached_hole_size = vma->vm_start - addr; + if (unlikely(len > TASK_SIZE)) + return -ENOMEM; - addr = vma->vm_end; + do_colour_align = 0; + if (filp || (flags & MAP_SHARED)) + do_colour_align = 1; + + /* requesting a specific address */ + if (addr) { if (do_colour_align) addr = COLOUR_ALIGN(addr, pgoff); + else + addr = PAGE_ALIGN(addr); + + vma = find_vma(mm, addr); + if (TASK_SIZE - len >= addr && + (!vma || addr + len <= vma->vm_start)) + return addr; } + + info.flags = VM_UNMAPPED_AREA_TOPDOWN; + info.length = len; + info.low_limit = PAGE_SIZE; + info.high_limit = mm->mmap_base; + info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0; + info.align_offset = pgoff << PAGE_SHIFT; + addr = vm_unmapped_area(&info); + + /* + * A failed mmap() very likely causes application failure, + * so fall back to the bottom-up function here. This scenario + * can happen with large stack limits and large mmap() + * allocations. + */ + if (addr & ~PAGE_MASK) { + VM_BUG_ON(addr != -ENOMEM); + info.flags = 0; + info.low_limit = TASK_UNMAPPED_BASE; + info.high_limit = TASK_SIZE; + addr = vm_unmapped_area(&info); + } + + return addr; } #endif /* CONFIG_MMU */ @@ -109,7 +147,7 @@ full_search: * You really shouldn't be using read() or write() on /dev/mem. This * might go away in the future. */ -int valid_phys_addr_range(unsigned long addr, size_t count) +int valid_phys_addr_range(phys_addr_t addr, size_t count) { if (addr < __MEMORY_START) return 0; |
