aboutsummaryrefslogtreecommitdiff
path: root/arch/arm/mm/cache-v4wb.S
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm/cache-v4wb.S')
-rw-r--r--arch/arm/mm/cache-v4wb.S73
1 files changed, 51 insertions, 22 deletions
diff --git a/arch/arm/mm/cache-v4wb.S b/arch/arm/mm/cache-v4wb.S
index 2ebc1b3bf85..cd494532140 100644
--- a/arch/arm/mm/cache-v4wb.S
+++ b/arch/arm/mm/cache-v4wb.S
@@ -32,7 +32,7 @@
/*
* This is the size at which it becomes more efficient to
* clean the whole cache, rather than using the individual
- * cache line maintainence instructions.
+ * cache line maintenance instructions.
*
* Size Clean (ticks) Dirty (ticks)
* 4096 21 20 21 53 55 54
@@ -51,6 +51,17 @@ flush_base:
.text
/*
+ * flush_icache_all()
+ *
+ * Unconditionally clean and invalidate the entire icache.
+ */
+ENTRY(v4wb_flush_icache_all)
+ mov r0, #0
+ mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
+ mov pc, lr
+ENDPROC(v4wb_flush_icache_all)
+
+/*
* flush_user_cache_all()
*
* Clean and invalidate all cache entries in a particular address
@@ -114,15 +125,16 @@ ENTRY(v4wb_flush_user_cache_range)
mov pc, lr
/*
- * flush_kern_dcache_page(void *page)
+ * flush_kern_dcache_area(void *addr, size_t size)
*
* Ensure no D cache aliasing occurs, either with itself or
* the I cache
*
- * - addr - page aligned address
+ * - addr - kernel address
+ * - size - region size
*/
-ENTRY(v4wb_flush_kern_dcache_page)
- add r1, r0, #PAGE_SZ
+ENTRY(v4wb_flush_kern_dcache_area)
+ add r1, r0, r1
/* fall through */
/*
@@ -155,9 +167,9 @@ ENTRY(v4wb_coherent_user_range)
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
- mov ip, #0
- mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache
- mcr p15, 0, ip, c7, c10, 4 @ drain WB
+ mov r0, #0
+ mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
+ mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr
@@ -172,7 +184,7 @@ ENTRY(v4wb_coherent_user_range)
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(v4wb_dma_inv_range)
+v4wb_dma_inv_range:
tst r0, #CACHE_DLINESIZE - 1
bic r0, r0, #CACHE_DLINESIZE - 1
mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
@@ -193,7 +205,7 @@ ENTRY(v4wb_dma_inv_range)
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(v4wb_dma_clean_range)
+v4wb_dma_clean_range:
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHE_DLINESIZE
@@ -215,17 +227,34 @@ ENTRY(v4wb_dma_clean_range)
.globl v4wb_dma_flush_range
.set v4wb_dma_flush_range, v4wb_coherent_kern_range
+/*
+ * dma_map_area(start, size, dir)
+ * - start - kernel virtual start address
+ * - size - size of region
+ * - dir - DMA direction
+ */
+ENTRY(v4wb_dma_map_area)
+ add r1, r1, r0
+ cmp r2, #DMA_TO_DEVICE
+ beq v4wb_dma_clean_range
+ bcs v4wb_dma_inv_range
+ b v4wb_dma_flush_range
+ENDPROC(v4wb_dma_map_area)
+
+/*
+ * dma_unmap_area(start, size, dir)
+ * - start - kernel virtual start address
+ * - size - size of region
+ * - dir - DMA direction
+ */
+ENTRY(v4wb_dma_unmap_area)
+ mov pc, lr
+ENDPROC(v4wb_dma_unmap_area)
+
+ .globl v4wb_flush_kern_cache_louis
+ .equ v4wb_flush_kern_cache_louis, v4wb_flush_kern_cache_all
+
__INITDATA
- .type v4wb_cache_fns, #object
-ENTRY(v4wb_cache_fns)
- .long v4wb_flush_kern_cache_all
- .long v4wb_flush_user_cache_all
- .long v4wb_flush_user_cache_range
- .long v4wb_coherent_kern_range
- .long v4wb_coherent_user_range
- .long v4wb_flush_kern_dcache_page
- .long v4wb_dma_inv_range
- .long v4wb_dma_clean_range
- .long v4wb_dma_flush_range
- .size v4wb_cache_fns, . - v4wb_cache_fns
+ @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
+ define_cache_functions v4wb