aboutsummaryrefslogtreecommitdiff
path: root/arch/arm/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/Makefile12
-rw-r--r--arch/arm/mm/cache-fa.S32
-rw-r--r--arch/arm/mm/cache-v3.S43
-rw-r--r--arch/arm/mm/cache-v4.S43
-rw-r--r--arch/arm/mm/cache-v4wb.S32
-rw-r--r--arch/arm/mm/cache-v4wt.S40
-rw-r--r--arch/arm/mm/cache-v6.S34
-rw-r--r--arch/arm/mm/cache-v7.S34
-rw-r--r--arch/arm/mm/cache-xsc3l2.c11
-rw-r--r--arch/arm/mm/copypage-feroceon.c3
-rw-r--r--arch/arm/mm/copypage-v3.c2
-rw-r--r--arch/arm/mm/copypage-v4mc.c2
-rw-r--r--arch/arm/mm/copypage-v4wb.c3
-rw-r--r--arch/arm/mm/copypage-v4wt.c2
-rw-r--r--arch/arm/mm/copypage-v6.c4
-rw-r--r--arch/arm/mm/copypage-xsc3.c3
-rw-r--r--arch/arm/mm/copypage-xscale.c2
-rw-r--r--arch/arm/mm/dma-mapping.c159
-rw-r--r--arch/arm/mm/fault-armv.c85
-rw-r--r--arch/arm/mm/flush.c51
-rw-r--r--arch/arm/mm/init.c2
-rw-r--r--arch/arm/mm/proc-arm1020.S32
-rw-r--r--arch/arm/mm/proc-arm1020e.S32
-rw-r--r--arch/arm/mm/proc-arm1022.S32
-rw-r--r--arch/arm/mm/proc-arm1026.S32
-rw-r--r--arch/arm/mm/proc-arm920.S32
-rw-r--r--arch/arm/mm/proc-arm922.S32
-rw-r--r--arch/arm/mm/proc-arm925.S32
-rw-r--r--arch/arm/mm/proc-arm926.S32
-rw-r--r--arch/arm/mm/proc-arm940.S32
-rw-r--r--arch/arm/mm/proc-arm946.S32
-rw-r--r--arch/arm/mm/proc-feroceon.S54
-rw-r--r--arch/arm/mm/proc-mohawk.S32
-rw-r--r--arch/arm/mm/proc-xsc3.S39
-rw-r--r--arch/arm/mm/proc-xscale.S49
-rw-r--r--arch/arm/mm/tlb-v7.S1
36 files changed, 818 insertions, 276 deletions
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile
index 827e238e5d4..e8d34a80851 100644
--- a/arch/arm/mm/Makefile
+++ b/arch/arm/mm/Makefile
@@ -27,6 +27,9 @@ obj-$(CONFIG_CPU_ABRT_EV5TJ) += abort-ev5tj.o
obj-$(CONFIG_CPU_ABRT_EV6) += abort-ev6.o
obj-$(CONFIG_CPU_ABRT_EV7) += abort-ev7.o
+AFLAGS_abort-ev6.o :=-Wa,-march=armv6k
+AFLAGS_abort-ev7.o :=-Wa,-march=armv7-a
+
obj-$(CONFIG_CPU_PABRT_LEGACY) += pabort-legacy.o
obj-$(CONFIG_CPU_PABRT_V6) += pabort-v6.o
obj-$(CONFIG_CPU_PABRT_V7) += pabort-v7.o
@@ -39,6 +42,9 @@ obj-$(CONFIG_CPU_CACHE_V6) += cache-v6.o
obj-$(CONFIG_CPU_CACHE_V7) += cache-v7.o
obj-$(CONFIG_CPU_CACHE_FA) += cache-fa.o
+AFLAGS_cache-v6.o :=-Wa,-march=armv6
+AFLAGS_cache-v7.o :=-Wa,-march=armv7-a
+
obj-$(CONFIG_CPU_COPY_V3) += copypage-v3.o
obj-$(CONFIG_CPU_COPY_V4WT) += copypage-v4wt.o
obj-$(CONFIG_CPU_COPY_V4WB) += copypage-v4wb.o
@@ -58,6 +64,9 @@ obj-$(CONFIG_CPU_TLB_V6) += tlb-v6.o
obj-$(CONFIG_CPU_TLB_V7) += tlb-v7.o
obj-$(CONFIG_CPU_TLB_FA) += tlb-fa.o
+AFLAGS_tlb-v6.o :=-Wa,-march=armv6
+AFLAGS_tlb-v7.o :=-Wa,-march=armv7-a
+
obj-$(CONFIG_CPU_ARM610) += proc-arm6_7.o
obj-$(CONFIG_CPU_ARM710) += proc-arm6_7.o
obj-$(CONFIG_CPU_ARM7TDMI) += proc-arm7tdmi.o
@@ -84,6 +93,9 @@ obj-$(CONFIG_CPU_FEROCEON) += proc-feroceon.o
obj-$(CONFIG_CPU_V6) += proc-v6.o
obj-$(CONFIG_CPU_V7) += proc-v7.o
+AFLAGS_proc-v6.o :=-Wa,-march=armv6
+AFLAGS_proc-v7.o :=-Wa,-march=armv7-a
+
obj-$(CONFIG_CACHE_FEROCEON_L2) += cache-feroceon-l2.o
obj-$(CONFIG_CACHE_L2X0) += cache-l2x0.o
obj-$(CONFIG_CACHE_XSC3L2) += cache-xsc3l2.o
diff --git a/arch/arm/mm/cache-fa.S b/arch/arm/mm/cache-fa.S
index a89444a3c01..7148e53e607 100644
--- a/arch/arm/mm/cache-fa.S
+++ b/arch/arm/mm/cache-fa.S
@@ -157,7 +157,7 @@ ENTRY(fa_flush_kern_dcache_area)
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(fa_dma_inv_range)
+fa_dma_inv_range:
tst r0, #CACHE_DLINESIZE - 1
bic r0, r0, #CACHE_DLINESIZE - 1
mcrne p15, 0, r0, c7, c14, 1 @ clean & invalidate D entry
@@ -180,7 +180,7 @@ ENTRY(fa_dma_inv_range)
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(fa_dma_clean_range)
+fa_dma_clean_range:
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHE_DLINESIZE
@@ -205,6 +205,30 @@ ENTRY(fa_dma_flush_range)
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
mov pc, lr
+/*
+ * dma_map_area(start, size, dir)
+ * - start - kernel virtual start address
+ * - size - size of region
+ * - dir - DMA direction
+ */
+ENTRY(fa_dma_map_area)
+ add r1, r1, r0
+ cmp r2, #DMA_TO_DEVICE
+ beq fa_dma_clean_range
+ bcs fa_dma_inv_range
+ b fa_dma_flush_range
+ENDPROC(fa_dma_map_area)
+
+/*
+ * dma_unmap_area(start, size, dir)
+ * - start - kernel virtual start address
+ * - size - size of region
+ * - dir - DMA direction
+ */
+ENTRY(fa_dma_unmap_area)
+ mov pc, lr
+ENDPROC(fa_dma_unmap_area)
+
__INITDATA
.type fa_cache_fns, #object
@@ -215,7 +239,7 @@ ENTRY(fa_cache_fns)
.long fa_coherent_kern_range
.long fa_coherent_user_range
.long fa_flush_kern_dcache_area
- .long fa_dma_inv_range
- .long fa_dma_clean_range
+ .long fa_dma_map_area
+ .long fa_dma_unmap_area
.long fa_dma_flush_range
.size fa_cache_fns, . - fa_cache_fns
diff --git a/arch/arm/mm/cache-v3.S b/arch/arm/mm/cache-v3.S
index 2a482731ea3..c2ff3c599fe 100644
--- a/arch/arm/mm/cache-v3.S
+++ b/arch/arm/mm/cache-v3.S
@@ -84,20 +84,6 @@ ENTRY(v3_flush_kern_dcache_area)
/* FALLTHROUGH */
/*
- * dma_inv_range(start, end)
- *
- * Invalidate (discard) the specified virtual address range.
- * May not write back any entries. If 'start' or 'end'
- * are not cache line aligned, those lines must be written
- * back.
- *
- * - start - virtual start address
- * - end - virtual end address
- */
-ENTRY(v3_dma_inv_range)
- /* FALLTHROUGH */
-
-/*
* dma_flush_range(start, end)
*
* Clean and invalidate the specified virtual address range.
@@ -108,18 +94,29 @@ ENTRY(v3_dma_inv_range)
ENTRY(v3_dma_flush_range)
mov r0, #0
mcr p15, 0, r0, c7, c0, 0 @ flush ID cache
+ mov pc, lr
+
+/*
+ * dma_unmap_area(start, size, dir)
+ * - start - kernel virtual start address
+ * - size - size of region
+ * - dir - DMA direction
+ */
+ENTRY(v3_dma_unmap_area)
+ teq r2, #DMA_TO_DEVICE
+ bne v3_dma_flush_range
/* FALLTHROUGH */
/*
- * dma_clean_range(start, end)
- *
- * Clean (write back) the specified virtual address range.
- *
- * - start - virtual start address
- * - end - virtual end address
+ * dma_map_area(start, size, dir)
+ * - start - kernel virtual start address
+ * - size - size of region
+ * - dir - DMA direction
*/
-ENTRY(v3_dma_clean_range)
+ENTRY(v3_dma_map_area)
mov pc, lr
+ENDPROC(v3_dma_unmap_area)
+ENDPROC(v3_dma_map_area)
__INITDATA
@@ -131,7 +128,7 @@ ENTRY(v3_cache_fns)
.long v3_coherent_kern_range
.long v3_coherent_user_range
.long v3_flush_kern_dcache_area
- .long v3_dma_inv_range
- .long v3_dma_clean_range
+ .long v3_dma_map_area
+ .long v3_dma_unmap_area
.long v3_dma_flush_range
.size v3_cache_fns, . - v3_cache_fns
diff --git a/arch/arm/mm/cache-v4.S b/arch/arm/mm/cache-v4.S
index 5c7da3e372e..4810f7e3e81 100644
--- a/arch/arm/mm/cache-v4.S
+++ b/arch/arm/mm/cache-v4.S
@@ -94,20 +94,6 @@ ENTRY(v4_flush_kern_dcache_area)
/* FALLTHROUGH */
/*
- * dma_inv_range(start, end)
- *
- * Invalidate (discard) the specified virtual address range.
- * May not write back any entries. If 'start' or 'end'
- * are not cache line aligned, those lines must be written
- * back.
- *
- * - start - virtual start address
- * - end - virtual end address
- */
-ENTRY(v4_dma_inv_range)
- /* FALLTHROUGH */
-
-/*
* dma_flush_range(start, end)
*
* Clean and invalidate the specified virtual address range.
@@ -120,18 +106,29 @@ ENTRY(v4_dma_flush_range)
mov r0, #0
mcr p15, 0, r0, c7, c7, 0 @ flush ID cache
#endif
+ mov pc, lr
+
+/*
+ * dma_unmap_area(start, size, dir)
+ * - start - kernel virtual start address
+ * - size - size of region
+ * - dir - DMA direction
+ */
+ENTRY(v4_dma_unmap_area)
+ teq r2, #DMA_TO_DEVICE
+ bne v4_dma_flush_range
/* FALLTHROUGH */
/*
- * dma_clean_range(start, end)
- *
- * Clean (write back) the specified virtual address range.
- *
- * - start - virtual start address
- * - end - virtual end address
+ * dma_map_area(start, size, dir)
+ * - start - kernel virtual start address
+ * - size - size of region
+ * - dir - DMA direction
*/
-ENTRY(v4_dma_clean_range)
+ENTRY(v4_dma_map_area)
mov pc, lr
+ENDPROC(v4_dma_unmap_area)
+ENDPROC(v4_dma_map_area)
__INITDATA
@@ -143,7 +140,7 @@ ENTRY(v4_cache_fns)
.long v4_coherent_kern_range
.long v4_coherent_user_range
.long v4_flush_kern_dcache_area
- .long v4_dma_inv_range
- .long v4_dma_clean_range
+ .long v4_dma_map_area
+ .long v4_dma_unmap_area
.long v4_dma_flush_range
.size v4_cache_fns, . - v4_cache_fns
diff --git a/arch/arm/mm/cache-v4wb.S b/arch/arm/mm/cache-v4wb.S
index 3dbedf1ec0e..df8368afa10 100644
--- a/arch/arm/mm/cache-v4wb.S
+++ b/arch/arm/mm/cache-v4wb.S
@@ -173,7 +173,7 @@ ENTRY(v4wb_coherent_user_range)
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(v4wb_dma_inv_range)
+v4wb_dma_inv_range:
tst r0, #CACHE_DLINESIZE - 1
bic r0, r0, #CACHE_DLINESIZE - 1
mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
@@ -194,7 +194,7 @@ ENTRY(v4wb_dma_inv_range)
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(v4wb_dma_clean_range)
+v4wb_dma_clean_range:
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHE_DLINESIZE
@@ -216,6 +216,30 @@ ENTRY(v4wb_dma_clean_range)
.globl v4wb_dma_flush_range
.set v4wb_dma_flush_range, v4wb_coherent_kern_range
+/*
+ * dma_map_area(start, size, dir)
+ * - start - kernel virtual start address
+ * - size - size of region
+ * - dir - DMA direction
+ */
+ENTRY(v4wb_dma_map_area)
+ add r1, r1, r0
+ cmp r2, #DMA_TO_DEVICE
+ beq v4wb_dma_clean_range
+ bcs v4wb_dma_inv_range
+ b v4wb_dma_flush_range
+ENDPROC(v4wb_dma_map_area)
+
+/*
+ * dma_unmap_area(start, size, dir)
+ * - start - kernel virtual start address
+ * - size - size of region
+ * - dir - DMA direction
+ */
+ENTRY(v4wb_dma_unmap_area)
+ mov pc, lr
+ENDPROC(v4wb_dma_unmap_area)
+
__INITDATA
.type v4wb_cache_fns, #object
@@ -226,7 +250,7 @@ ENTRY(v4wb_cache_fns)
.long v4wb_coherent_kern_range
.long v4wb_coherent_user_range
.long v4wb_flush_kern_dcache_area
- .long v4wb_dma_inv_range
- .long v4wb_dma_clean_range
+ .long v4wb_dma_map_area
+ .long v4wb_dma_unmap_area
.long v4wb_dma_flush_range
.size v4wb_cache_fns, . - v4wb_cache_fns
diff --git a/arch/arm/mm/cache-v4wt.S b/arch/arm/mm/cache-v4wt.S
index b3b7410270b..45c70312f43 100644
--- a/arch/arm/mm/cache-v4wt.S
+++ b/arch/arm/mm/cache-v4wt.S
@@ -142,23 +142,12 @@ ENTRY(v4wt_flush_kern_dcache_area)
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(v4wt_dma_inv_range)
+v4wt_dma_inv_range:
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
- /* FALLTHROUGH */
-
-/*
- * dma_clean_range(start, end)
- *
- * Clean the specified virtual address range.
- *
- * - start - virtual start address
- * - end - virtual end address
- */
-ENTRY(v4wt_dma_clean_range)
mov pc, lr
/*
@@ -172,6 +161,29 @@ ENTRY(v4wt_dma_clean_range)
.globl v4wt_dma_flush_range
.equ v4wt_dma_flush_range, v4wt_dma_inv_range
+/*
+ * dma_unmap_area(start, size, dir)
+ * - start - kernel virtual start address
+ * - size - size of region
+ * - dir - DMA direction
+ */
+ENTRY(v4wt_dma_unmap_area)
+ add r1, r1, r0
+ teq r2, #DMA_TO_DEVICE
+ bne v4wt_dma_inv_range
+ /* FALLTHROUGH */
+
+/*
+ * dma_map_area(start, size, dir)
+ * - start - kernel virtual start address
+ * - size - size of region
+ * - dir - DMA direction
+ */
+ENTRY(v4wt_dma_map_area)
+ mov pc, lr
+ENDPROC(v4wt_dma_unmap_area)
+ENDPROC(v4wt_dma_map_area)
+
__INITDATA
.type v4wt_cache_fns, #object
@@ -182,7 +194,7 @@ ENTRY(v4wt_cache_fns)
.long v4wt_coherent_kern_range
.long v4wt_coherent_user_range
.long v4wt_flush_kern_dcache_area
- .long v4wt_dma_inv_range
- .long v4wt_dma_clean_range
+ .long v4wt_dma_map_area
+ .long v4wt_dma_unmap_area
.long v4wt_dma_flush_range
.size v4wt_cache_fns, . - v4wt_cache_fns
diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S
index 4ba0a24ce6f..9d89c67a1cc 100644
--- a/arch/arm/mm/cache-v6.S
+++ b/arch/arm/mm/cache-v6.S
@@ -195,7 +195,7 @@ ENTRY(v6_flush_kern_dcache_area)
* - start - virtual start address of region
* - end - virtual end address of region
*/
-ENTRY(v6_dma_inv_range)
+v6_dma_inv_range:
tst r0, #D_CACHE_LINE_SIZE - 1
bic r0, r0, #D_CACHE_LINE_SIZE - 1
#ifdef HARVARD_CACHE
@@ -228,7 +228,7 @@ ENTRY(v6_dma_inv_range)
* - start - virtual start address of region
* - end - virtual end address of region
*/
-ENTRY(v6_dma_clean_range)
+v6_dma_clean_range:
bic r0, r0, #D_CACHE_LINE_SIZE - 1
1:
#ifdef HARVARD_CACHE
@@ -263,6 +263,32 @@ ENTRY(v6_dma_flush_range)
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
mov pc, lr
+/*
+ * dma_map_area(start, size, dir)
+ * - start - kernel virtual start address
+ * - size - size of region
+ * - dir - DMA direction
+ */
+ENTRY(v6_dma_map_area)
+ add r1, r1, r0
+ teq r2, #DMA_FROM_DEVICE
+ beq v6_dma_inv_range
+ b v6_dma_clean_range
+ENDPROC(v6_dma_map_area)
+
+/*
+ * dma_unmap_area(start, size, dir)
+ * - start - kernel virtual start address
+ * - size - size of region
+ * - dir - DMA direction
+ */
+ENTRY(v6_dma_unmap_area)
+ add r1, r1, r0
+ teq r2, #DMA_TO_DEVICE
+ bne v6_dma_inv_range
+ mov pc, lr
+ENDPROC(v6_dma_unmap_area)
+
__INITDATA
.type v6_cache_fns, #object
@@ -273,7 +299,7 @@ ENTRY(v6_cache_fns)
.long v6_coherent_kern_range
.long v6_coherent_user_range
.long v6_flush_kern_dcache_area
- .long v6_dma_inv_range
- .long v6_dma_clean_range
+ .long v6_dma_map_area
+ .long v6_dma_unmap_area
.long v6_dma_flush_range
.size v6_cache_fns, . - v6_cache_fns
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
index 9073db849fb..bcd64f26587 100644
--- a/arch/arm/mm/cache-v7.S
+++ b/arch/arm/mm/cache-v7.S
@@ -216,7 +216,7 @@ ENDPROC(v7_flush_kern_dcache_area)
* - start - virtual start address of region
* - end - virtual end address of region
*/
-ENTRY(v7_dma_inv_range)
+v7_dma_inv_range:
dcache_line_size r2, r3
sub r3, r2, #1
tst r0, r3
@@ -240,7 +240,7 @@ ENDPROC(v7_dma_inv_range)
* - start - virtual start address of region
* - end - virtual end address of region
*/
-ENTRY(v7_dma_clean_range)
+v7_dma_clean_range:
dcache_line_size r2, r3
sub r3, r2, #1
bic r0, r0, r3
@@ -271,6 +271,32 @@ ENTRY(v7_dma_flush_range)
mov pc, lr
ENDPROC(v7_dma_flush_range)
+/*
+ * dma_map_area(start, size, dir)
+ * - start - kernel virtual start address
+ * - size - size of region
+ * - dir - DMA direction
+ */
+ENTRY(v7_dma_map_area)
+ add r1, r1, r0
+ teq r2, #DMA_FROM_DEVICE
+ beq v7_dma_inv_range
+ b v7_dma_clean_range
+ENDPROC(v7_dma_map_area)
+
+/*
+ * dma_unmap_area(start, size, dir)
+ * - start - kernel virtual start address
+ * - size - size of region
+ * - dir - DMA direction
+ */
+ENTRY(v7_dma_unmap_area)
+ add r1, r1, r0
+ teq r2, #DMA_TO_DEVICE
+ bne v7_dma_inv_range
+ mov pc, lr
+ENDPROC(v7_dma_unmap_area)
+
__INITDATA
.type v7_cache_fns, #object
@@ -281,7 +307,7 @@ ENTRY(v7_cache_fns)
.long v7_coherent_kern_range
.long v7_coherent_user_range
.long v7_flush_kern_dcache_area
- .long v7_dma_inv_range
- .long v7_dma_clean_range
+ .long v7_dma_map_area
+ .long v7_dma_unmap_area
.long v7_dma_flush_range
.size v7_cache_fns, . - v7_cache_fns
diff --git a/arch/arm/mm/cache-xsc3l2.c b/arch/arm/mm/cache-xsc3l2.c
index 5d180cb0bd9..c3154928bcc 100644
--- a/arch/arm/mm/cache-xsc3l2.c
+++ b/arch/arm/mm/cache-xsc3l2.c
@@ -221,15 +221,14 @@ static int __init xsc3_l2_init(void)
if (!cpu_is_xsc3() || !xsc3_l2_present())
return 0;
- if (!(get_cr() & CR_L2)) {
+ if (get_cr() & CR_L2) {
pr_info("XScale3 L2 cache enabled.\n");
- adjust_cr(CR_L2, CR_L2);
xsc3_l2_inv_all();
- }
- outer_cache.inv_range = xsc3_l2_inv_range;
- outer_cache.clean_range = xsc3_l2_clean_range;
- outer_cache.flush_range = xsc3_l2_flush_range;
+ outer_cache.inv_range = xsc3_l2_inv_range;
+ outer_cache.clean_range = xsc3_l2_clean_range;
+ outer_cache.flush_range = xsc3_l2_flush_range;
+ }
return 0;
}
diff --git a/arch/arm/mm/copypage-feroceon.c b/arch/arm/mm/copypage-feroceon.c
index 70997d5bee2..5eb4fd93893 100644
--- a/arch/arm/mm/copypage-feroceon.c
+++ b/arch/arm/mm/copypage-feroceon.c
@@ -68,12 +68,13 @@ feroceon_copy_user_page(void *kto, const void *kfrom)
}
void feroceon_copy_user_highpage(struct page *to, struct page *from,
- unsigned long vaddr)
+ unsigned long vaddr, struct vm_area_struct *vma)
{
void *kto, *kfrom;
kto = kmap_atomic(to, KM_USER0);
kfrom = kmap_atomic(from, KM_USER1);
+ flush_cache_page(vma, vaddr, page_to_pfn(from));
feroceon_copy_user_page(kto, kfrom);
kunmap_atomic(kfrom, KM_USER1);
kunmap_atomic(kto, KM_USER0);
diff --git a/arch/arm/mm/copypage-v3.c b/arch/arm/mm/copypage-v3.c
index de9c06854ad..f72303e1d80 100644
--- a/arch/arm/mm/copypage-v3.c
+++ b/arch/arm/mm/copypage-v3.c
@@ -38,7 +38,7 @@ v3_copy_user_page(void *kto, const void *kfrom)
}
void v3_copy_user_highpage(struct page *to, struct page *from,
- unsigned long vaddr)
+ unsigned long vaddr, struct vm_area_struct *vma)
{
void *kto, *kfrom;
diff --git a/arch/arm/mm/copypage-v4mc.c b/arch/arm/mm/copypage-v4mc.c
index 7370a7142b0..598c51ad507 100644
--- a/arch/arm/mm/copypage-v4mc.c
+++ b/arch/arm/mm/copypage-v4mc.c
@@ -69,7 +69,7 @@ mc_copy_user_page(void *from, void *to)
}
void v4_mc_copy_user_highpage(struct page *to, struct page *from,
- unsigned long vaddr)
+ unsigned long vaddr, struct vm_area_struct *vma)
{
void *kto = kmap_atomic(to, KM_USER1);
diff --git a/arch/arm/mm/copypage-v4wb.c b/arch/arm/mm/copypage-v4wb.c
index 9ab09841422..7c2eb55cd4a 100644
--- a/arch/arm/mm/copypage-v4wb.c
+++ b/arch/arm/mm/copypage-v4wb.c
@@ -48,12 +48,13 @@ v4wb_copy_user_page(void *kto, const void *kfrom)
}
void v4wb_copy_user_highpage(struct page *to, struct page *from,
- unsigned long vaddr)
+ unsigned long vaddr, struct vm_area_struct *vma)
{
void *kto, *kfrom;
kto = kmap_atomic(to, KM_USER0);
kfrom = kmap_atomic(from, KM_USER1);
+ flush_cache_page(vma, vaddr, page_to_pfn(from));
v4wb_copy_user_page(kto, kfrom);
kunmap_atomic(kfrom, KM_USER1);
kunmap_atomic(kto, KM_USER0);
diff --git a/arch/arm/mm/copypage-v4wt.c b/arch/arm/mm/copypage-v4wt.c
index 300efafd664..172e6a55458 100644
--- a/arch/arm/mm/copypage-v4wt.c
+++ b/arch/arm/mm/copypage-v4wt.c
@@ -44,7 +44,7 @@ v4wt_copy_user_page(void *kto, const void *kfrom)
}
void v4wt_copy_user_highpage(struct page *to, struct page *from,
- unsigned long vaddr)
+ unsigned long vaddr, struct vm_area_struct *vma)
{
void *kto, *kfrom;
diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c
index 0fa1319273d..8bca4dea6df 100644
--- a/arch/arm/mm/copypage-v6.c
+++ b/arch/arm/mm/copypage-v6.c
@@ -34,7 +34,7 @@ static DEFINE_SPINLOCK(v6_lock);
* attack the kernel's existing mapping of these pages.
*/
static void v6_copy_user_highpage_nonaliasing(struct page *to,
- struct page *from, unsigned long vaddr)
+ struct page *from, unsigned long vaddr, struct vm_area_struct *vma)
{
void *kto, *kfrom;
@@ -81,7 +81,7 @@ static void discard_old_kernel_data(void *kto)
* Copy the page, taking account of the cache colour.
*/
static void v6_copy_user_highpage_aliasing(struct page *to,
- struct page *from, unsigned long vaddr)
+ struct page *from, unsigned long vaddr, struct vm_area_struct *vma)
{
unsigned int offset = CACHE_COLOUR(vaddr);
unsigned long kfrom, kto;
diff --git a/arch/arm/mm/copypage-xsc3.c b/arch/arm/mm/copypage-xsc3.c
index bc4525f5ab2..747ad4140fc 100644
--- a/arch/arm/mm/copypage-xsc3.c
+++ b/arch/arm/mm/copypage-xsc3.c
@@ -71,12 +71,13 @@ xsc3_mc_copy_user_page(void *kto, const void *kfrom)
}
void xsc3_mc_copy_user_highpage(struct page *to, struct page *from,
- unsigned long vaddr)
+ unsigned long vaddr, struct vm_area_struct *vma)
{
void *kto, *kfrom;
kto = kmap_atomic(to, KM_USER0);
kfrom = kmap_atomic(from, KM_USER1);
+ flush_cache_page(vma, vaddr, page_to_pfn(from));
xsc3_mc_copy_user_page(kto, kfrom);
kunmap_atomic(kfrom, KM_USER1);
kunmap_atomic(kto, KM_USER0);
diff --git a/arch/arm/mm/copypage-xscale.c b/arch/arm/mm/copypage-xscale.c
index 76824d3e966..9920c0ae209 100644
--- a/arch/arm/mm/copypage-xscale.c
+++ b/arch/arm/mm/copypage-xscale.c
@@ -91,7 +91,7 @@ mc_copy_user_page(void *from, void *to)
}
void xscale_mc_copy_user_highpage(struct page *to, struct page *from,
- unsigned long vaddr)
+ unsigned long vaddr, struct vm_area_struct *vma)
{
void *kto = kmap_atomic(to, KM_USER1);
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 26325cb5d36..64daef2173b 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -404,78 +404,44 @@ EXPORT_SYMBOL(dma_free_coherent);
* platforms with CONFIG_DMABOUNCE.
* Use the driver DMA support - see dma-mapping.h (dma_sync_*)
*/
-void dma_cache_maint(const void *start, size_t size, int direction)
+void ___dma_single_cpu_to_dev(const void *kaddr, size_t size,
+ enum dma_data_direction dir)
{
- void (*inner_op)(const void *, const void *);
- void (*outer_op)(unsigned long, unsigned long);
-
- BUG_ON(!virt_addr_valid(start) || !virt_addr_valid(start + size - 1));
-
- switch (direction) {
- case DMA_FROM_DEVICE: /* invalidate only */
- inner_op = dmac_inv_range;
- outer_op = outer_inv_range;
- break;
- case DMA_TO_DEVICE: /* writeback only */
- inner_op = dmac_clean_range;
- outer_op = outer_clean_range;
- break;
- case DMA_BIDIRECTIONAL: /* writeback and invalidate */
- inner_op = dmac_flush_range;
- outer_op = outer_flush_range;
- break;
- default:
- BUG();
- }
+ unsigned long paddr;
+
+ BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1));
+
+ dmac_map_area(kaddr, size, dir);
- inner_op(start, start + size);
- outer_op(__pa(start), __pa(start) + size);
+ paddr = __pa(kaddr);
+ if (dir == DMA_FROM_DEVICE) {
+ outer_inv_range(paddr, paddr + size);
+ } else {
+ outer_clean_range(paddr, paddr + size);
+ }
+ /* FIXME: non-speculating: flush on bidirectional mappings? */
}
-EXPORT_SYMBOL(dma_cache_maint);
+EXPORT_SYMBOL(___dma_single_cpu_to_dev);
-static void dma_cache_maint_contiguous(struct page *page, unsigned long offset,
- size_t size, int direction)
+void ___dma_single_dev_to_cpu(const void *kaddr, size_t size,
+ enum dma_data_direction dir)
{
- void *vaddr;
- unsigned long paddr;
- void (*inner_op)(const void *, const void *);
- void (*outer_op)(unsigned long, unsigned long);
-
- switch (direction) {
- case DMA_FROM_DEVICE: /* invalidate only */
- inner_op = dmac_inv_range;
- outer_op = outer_inv_range;
- break;
- case DMA_TO_DEVICE: /* writeback only */
- inner_op = dmac_clean_range;
- outer_op = outer_clean_range;
- break;
- case DMA_BIDIRECTIONAL: /* writeback and invalidate */
- inner_op = dmac_flush_range;
- outer_op = outer_flush_range;
- break;
- default:
- BUG();
- }
+ BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1));
- if (!PageHighMem(page)) {
- vaddr = page_address(page) + offset;
- inner_op(vaddr, vaddr + size);
- } else {
- vaddr = kmap_high_get(page);
- if (vaddr) {
- vaddr += offset;
- inner_op(vaddr, vaddr + size);
- kunmap_high(page);
- }
+ /* FIXME: non-speculating: not required */
+ /* don't bother invalidating if DMA to device */
+ if (dir != DMA_TO_DEVICE) {
+ unsigned long paddr = __pa(kaddr);
+ outer_inv_range(paddr, paddr + size);
}
- paddr = page_to_phys(page) + offset;
- outer_op(paddr, paddr + size);
+ dmac_unmap_area(kaddr, size, dir);
}
+EXPORT_SYMBOL(___dma_single_dev_to_cpu);
-void dma_cache_maint_page(struct page *page, unsigned long offset,
- size_t size, int dir)
+static void dma_cache_maint_page(struct page *page, unsigned long offset,
+ size_t size, enum dma_data_direction dir,
+ void (*op)(const void *, size_t, int))
{
/*
* A single sg entry may refer to multiple physically contiguous
@@ -486,20 +452,62 @@ void dma_cache_maint_page(struct page *page, unsigned long offset,
size_t left = size;
do {
size_t len = left;
- if (PageHighMem(page) && len + offset > PAGE_SIZE) {
- if (offset >= PAGE_SIZE) {
- page += offset / PAGE_SIZE;
- offset %=