diff options
Diffstat (limited to 'arch/arm/mm/fault-armv.c')
| -rw-r--r-- | arch/arm/mm/fault-armv.c | 41 |
1 files changed, 34 insertions, 7 deletions
diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c index c493d7244d3..ff379ac115d 100644 --- a/arch/arm/mm/fault-armv.c +++ b/arch/arm/mm/fault-armv.c @@ -8,7 +8,6 @@ * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ -#include <linux/module.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/mm.h> @@ -26,7 +25,7 @@ #include "mm.h" -static unsigned long shared_pte_mask = L_PTE_MT_BUFFERABLE; +static pteval_t shared_pte_mask = L_PTE_MT_BUFFERABLE; #if __LINUX_ARM_ARCH__ < 6 /* @@ -66,11 +65,36 @@ static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address, return ret; } +#if USE_SPLIT_PTE_PTLOCKS +/* + * If we are using split PTE locks, then we need to take the page + * lock here. Otherwise we are using shared mm->page_table_lock + * which is already locked, thus cannot take it. + */ +static inline void do_pte_lock(spinlock_t *ptl) +{ + /* + * Use nested version here to indicate that we are already + * holding one similar spinlock. + */ + spin_lock_nested(ptl, SINGLE_DEPTH_NESTING); +} + +static inline void do_pte_unlock(spinlock_t *ptl) +{ + spin_unlock(ptl); +} +#else /* !USE_SPLIT_PTE_PTLOCKS */ +static inline void do_pte_lock(spinlock_t *ptl) {} +static inline void do_pte_unlock(spinlock_t *ptl) {} +#endif /* USE_SPLIT_PTE_PTLOCKS */ + static int adjust_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn) { spinlock_t *ptl; pgd_t *pgd; + pud_t *pud; pmd_t *pmd; pte_t *pte; int ret; @@ -79,7 +103,11 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address, if (pgd_none_or_clear_bad(pgd)) return 0; - pmd = pmd_offset(pgd, address); + pud = pud_offset(pgd, address); + if (pud_none_or_clear_bad(pud)) + return 0; + + pmd = pmd_offset(pud, address); if (pmd_none_or_clear_bad(pmd)) return 0; @@ -90,11 +118,11 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address, */ ptl = pte_lockptr(vma->vm_mm, pmd); pte = pte_offset_map(pmd, address); - spin_lock(ptl); + do_pte_lock(ptl); ret = do_adjust_pte(vma, address, pfn, pte); - spin_unlock(ptl); + do_pte_unlock(ptl); pte_unmap(pte); return ret; @@ -106,7 +134,6 @@ make_coherent(struct address_space *mapping, struct vm_area_struct *vma, { struct mm_struct *mm = vma->vm_mm; struct vm_area_struct *mpnt; - struct prio_tree_iter iter; unsigned long offset; pgoff_t pgoff; int aliases = 0; @@ -119,7 +146,7 @@ make_coherent(struct address_space *mapping, struct vm_area_struct *vma, * cache coherency. */ flush_dcache_mmap_lock(mapping); - vma_prio_tree_foreach(mpnt, &iter, &mapping->i_mmap, pgoff, pgoff) { + vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) { /* * If this VMA is not in our MM, we can ignore it. * Note that we intentionally mask out the VMA |
