aboutsummaryrefslogtreecommitdiff
path: root/arch/arm/mm/fault-armv.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm/fault-armv.c')
-rw-r--r--arch/arm/mm/fault-armv.c53
1 files changed, 40 insertions, 13 deletions
diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c
index 9b906dec1ca..ff379ac115d 100644
--- a/arch/arm/mm/fault-armv.c
+++ b/arch/arm/mm/fault-armv.c
@@ -8,7 +8,6 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-#include <linux/module.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/mm.h>
@@ -26,8 +25,9 @@
#include "mm.h"
-static unsigned long shared_pte_mask = L_PTE_MT_BUFFERABLE;
+static pteval_t shared_pte_mask = L_PTE_MT_BUFFERABLE;
+#if __LINUX_ARM_ARCH__ < 6
/*
* We take the easy way out of this problem - we make the
* PTE uncacheable. However, we leave the write buffer on.
@@ -65,11 +65,36 @@ static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address,
return ret;
}
+#if USE_SPLIT_PTE_PTLOCKS
+/*
+ * If we are using split PTE locks, then we need to take the page
+ * lock here. Otherwise we are using shared mm->page_table_lock
+ * which is already locked, thus cannot take it.
+ */
+static inline void do_pte_lock(spinlock_t *ptl)
+{
+ /*
+ * Use nested version here to indicate that we are already
+ * holding one similar spinlock.
+ */
+ spin_lock_nested(ptl, SINGLE_DEPTH_NESTING);
+}
+
+static inline void do_pte_unlock(spinlock_t *ptl)
+{
+ spin_unlock(ptl);
+}
+#else /* !USE_SPLIT_PTE_PTLOCKS */
+static inline void do_pte_lock(spinlock_t *ptl) {}
+static inline void do_pte_unlock(spinlock_t *ptl) {}
+#endif /* USE_SPLIT_PTE_PTLOCKS */
+
static int adjust_pte(struct vm_area_struct *vma, unsigned long address,
unsigned long pfn)
{
spinlock_t *ptl;
pgd_t *pgd;
+ pud_t *pud;
pmd_t *pmd;
pte_t *pte;
int ret;
@@ -78,7 +103,11 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address,
if (pgd_none_or_clear_bad(pgd))
return 0;
- pmd = pmd_offset(pgd, address);
+ pud = pud_offset(pgd, address);
+ if (pud_none_or_clear_bad(pud))
+ return 0;
+
+ pmd = pmd_offset(pud, address);
if (pmd_none_or_clear_bad(pmd))
return 0;
@@ -88,13 +117,13 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address,
* open-code the spin-locking.
*/
ptl = pte_lockptr(vma->vm_mm, pmd);
- pte = pte_offset_map_nested(pmd, address);
- spin_lock(ptl);
+ pte = pte_offset_map(pmd, address);
+ do_pte_lock(ptl);
ret = do_adjust_pte(vma, address, pfn, pte);
- spin_unlock(ptl);
- pte_unmap_nested(pte);
+ do_pte_unlock(ptl);
+ pte_unmap(pte);
return ret;
}
@@ -105,7 +134,6 @@ make_coherent(struct address_space *mapping, struct vm_area_struct *vma,
{
struct mm_struct *mm = vma->vm_mm;
struct vm_area_struct *mpnt;
- struct prio_tree_iter iter;
unsigned long offset;
pgoff_t pgoff;
int aliases = 0;
@@ -118,7 +146,7 @@ make_coherent(struct address_space *mapping, struct vm_area_struct *vma,
* cache coherency.
*/
flush_dcache_mmap_lock(mapping);
- vma_prio_tree_foreach(mpnt, &iter, &mapping->i_mmap, pgoff, pgoff) {
+ vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
/*
* If this VMA is not in our MM, we can ignore it.
* Note that we intentionally mask out the VMA
@@ -141,7 +169,7 @@ make_coherent(struct address_space *mapping, struct vm_area_struct *vma,
* a page table, or changing an existing PTE. Basically, there are two
* things that we need to take care of:
*
- * 1. If PG_dcache_dirty is set for the page, we need to ensure
+ * 1. If PG_dcache_clean is not set for the page, we need to ensure
* that any cache entries for the kernels virtual memory
* range are written back to the page.
* 2. If we have multiple shared mappings of the same space in
@@ -168,10 +196,8 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
return;
mapping = page_mapping(page);
-#ifndef CONFIG_SMP
- if (test_and_clear_bit(PG_dcache_dirty, &page->flags))
+ if (!test_and_set_bit(PG_dcache_clean, &page->flags))
__flush_dcache_page(mapping, page);
-#endif
if (mapping) {
if (cache_is_vivt())
make_coherent(mapping, vma, addr, ptep, pfn);
@@ -179,6 +205,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
__flush_icache_all();
}
}
+#endif /* __LINUX_ARM_ARCH__ < 6 */
/*
* Check whether the write buffer has physical address aliasing