aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2006-09-27 14:05:52 +0900
committerPaul Mundt <lethal@linux-sh.org>2006-09-27 14:05:52 +0900
commitfdfc74f9fcebdda14609159d5010b758a9409acf (patch)
tree191532cb703383768cc198a41503e412578921bb
parent36efc35447154317f9ffc5163a1793b5f7ff3de1 (diff)
sh: Support for SH-4A memory barriers.
SH-4A supports 'synco' as a barrier, sprinkle it around the cache ops as necessary.. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
-rw-r--r--arch/sh/mm/cache-sh4.c5
-rw-r--r--include/asm-sh/system.h7
2 files changed, 12 insertions, 0 deletions
diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c
index 94c05d09c3f..846b63d6f5e 100644
--- a/arch/sh/mm/cache-sh4.c
+++ b/arch/sh/mm/cache-sh4.c
@@ -184,6 +184,7 @@ void flush_cache_sigtramp(unsigned long addr)
i++, index += cpu_data->icache.way_incr)
ctrl_outl(0, index); /* Clear out Valid-bit */
back_to_P1();
+ wmb();
local_irq_restore(flags);
}
@@ -223,6 +224,8 @@ void flush_dcache_page(struct page *page)
flush_cache_4096(CACHE_OC_ADDRESS_ARRAY | 0x2000, phys);
flush_cache_4096(CACHE_OC_ADDRESS_ARRAY | 0x3000, phys);
}
+
+ wmb();
}
static inline void flush_icache_all(void)
@@ -247,6 +250,7 @@ void flush_dcache_all(void)
__flush_dcache_all();
else
__flush_dcache_all_ex();
+ wmb();
}
void flush_cache_all(void)
@@ -377,5 +381,6 @@ void flush_icache_user_range(struct vm_area_struct *vma,
struct page *page, unsigned long addr, int len)
{
flush_cache_page(vma, addr, page_to_pfn(page));
+ mb();
}
diff --git a/include/asm-sh/system.h b/include/asm-sh/system.h
index e89728d405d..eb4902ed920 100644
--- a/include/asm-sh/system.h
+++ b/include/asm-sh/system.h
@@ -84,10 +84,17 @@ static __inline__ unsigned long tas(volatile int *m)
extern void __xchg_called_with_bad_pointer(void);
+#ifdef CONFIG_CPU_SH4A
+#define mb() __asm__ __volatile__ ("synco": : :"memory")
+#define rmb() mb()
+#define wmb() __asm__ __volatile__ ("synco": : :"memory")
+#define read_barrier_depends() do { } while(0)
+#else
#define mb() __asm__ __volatile__ ("": : :"memory")
#define rmb() mb()
#define wmb() __asm__ __volatile__ ("": : :"memory")
#define read_barrier_depends() do { } while(0)
+#endif
#ifdef CONFIG_SMP
#define smp_mb() mb()