aboutsummaryrefslogtreecommitdiff
path: root/arch/microblaze/kernel/cpu/cache.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/microblaze/kernel/cpu/cache.c')
-rw-r--r--arch/microblaze/kernel/cpu/cache.c229
1 files changed, 113 insertions, 116 deletions
diff --git a/arch/microblaze/kernel/cpu/cache.c b/arch/microblaze/kernel/cpu/cache.c
index f04d8a86dea..a6e44410672 100644
--- a/arch/microblaze/kernel/cpu/cache.c
+++ b/arch/microblaze/kernel/cpu/cache.c
@@ -17,93 +17,84 @@
static inline void __enable_icache_msr(void)
{
- __asm__ __volatile__ (" msrset r0, %0; \
- nop; " \
+ __asm__ __volatile__ (" msrset r0, %0;" \
+ "nop;" \
: : "i" (MSR_ICE) : "memory");
}
static inline void __disable_icache_msr(void)
{
- __asm__ __volatile__ (" msrclr r0, %0; \
- nop; " \
+ __asm__ __volatile__ (" msrclr r0, %0;" \
+ "nop;" \
: : "i" (MSR_ICE) : "memory");
}
static inline void __enable_dcache_msr(void)
{
- __asm__ __volatile__ (" msrset r0, %0; \
- nop; " \
- : \
- : "i" (MSR_DCE) \
- : "memory");
+ __asm__ __volatile__ (" msrset r0, %0;" \
+ "nop;" \
+ : : "i" (MSR_DCE) : "memory");
}
static inline void __disable_dcache_msr(void)
{
- __asm__ __volatile__ (" msrclr r0, %0; \
- nop; " \
- : \
- : "i" (MSR_DCE) \
- : "memory");
+ __asm__ __volatile__ (" msrclr r0, %0;" \
+ "nop; " \
+ : : "i" (MSR_DCE) : "memory");
}
static inline void __enable_icache_nomsr(void)
{
- __asm__ __volatile__ (" mfs r12, rmsr; \
- nop; \
- ori r12, r12, %0; \
- mts rmsr, r12; \
- nop; " \
- : \
- : "i" (MSR_ICE) \
- : "memory", "r12");
+ __asm__ __volatile__ (" mfs r12, rmsr;" \
+ "nop;" \
+ "ori r12, r12, %0;" \
+ "mts rmsr, r12;" \
+ "nop;" \
+ : : "i" (MSR_ICE) : "memory", "r12");
}
static inline void __disable_icache_nomsr(void)
{
- __asm__ __volatile__ (" mfs r12, rmsr; \
- nop; \
- andi r12, r12, ~%0; \
- mts rmsr, r12; \
- nop; " \
- : \
- : "i" (MSR_ICE) \
- : "memory", "r12");
+ __asm__ __volatile__ (" mfs r12, rmsr;" \
+ "nop;" \
+ "andi r12, r12, ~%0;" \
+ "mts rmsr, r12;" \
+ "nop;" \
+ : : "i" (MSR_ICE) : "memory", "r12");
}
static inline void __enable_dcache_nomsr(void)
{
- __asm__ __volatile__ (" mfs r12, rmsr; \
- nop; \
- ori r12, r12, %0; \
- mts rmsr, r12; \
- nop; " \
- : \
- : "i" (MSR_DCE) \
- : "memory", "r12");
+ __asm__ __volatile__ (" mfs r12, rmsr;" \
+ "nop;" \
+ "ori r12, r12, %0;" \
+ "mts rmsr, r12;" \
+ "nop;" \
+ : : "i" (MSR_DCE) : "memory", "r12");
}
static inline void __disable_dcache_nomsr(void)
{
- __asm__ __volatile__ (" mfs r12, rmsr; \
- nop; \
- andi r12, r12, ~%0; \
- mts rmsr, r12; \
- nop; " \
- : \
- : "i" (MSR_DCE) \
- : "memory", "r12");
+ __asm__ __volatile__ (" mfs r12, rmsr;" \
+ "nop;" \
+ "andi r12, r12, ~%0;" \
+ "mts rmsr, r12;" \
+ "nop;" \
+ : : "i" (MSR_DCE) : "memory", "r12");
}
-/* Helper macro for computing the limits of cache range loops */
+/* Helper macro for computing the limits of cache range loops
+ *
+ * End address can be unaligned which is OK for C implementation.
+ * ASM implementation align it in ASM macros
+ */
#define CACHE_LOOP_LIMITS(start, end, cache_line_length, cache_size) \
do { \
int align = ~(cache_line_length - 1); \
end = min(start + cache_size, end); \
start &= align; \
- end = ((end & align) + cache_line_length); \
-} while (0);
+} while (0)
/*
* Helper macro to loop over the specified cache_size/line_length and
@@ -111,58 +102,56 @@ do { \
*/
#define CACHE_ALL_LOOP(cache_size, line_length, op) \
do { \
- unsigned int len = cache_size; \
+ unsigned int len = cache_size - line_length; \
int step = -line_length; \
- BUG_ON(step >= 0); \
+ WARN_ON(step >= 0); \
\
- __asm__ __volatile__ (" 1: " #op " %0, r0; \
- bgtid %0, 1b; \
- addk %0, %0, %1; \
- " : : "r" (len), "r" (step) \
+ __asm__ __volatile__ (" 1: " #op " %0, r0;" \
+ "bgtid %0, 1b;" \
+ "addk %0, %0, %1;" \
+ : : "r" (len), "r" (step) \
: "memory"); \
-} while (0);
-
+} while (0)
-#define CACHE_ALL_LOOP2(cache_size, line_length, op) \
-do { \
- unsigned int len = cache_size; \
- int step = -line_length; \
- BUG_ON(step >= 0); \
- \
- __asm__ __volatile__ (" 1: " #op " r0, %0; \
- bgtid %0, 1b; \
- addk %0, %0, %1; \
- " : : "r" (len), "r" (step) \
- : "memory"); \
-} while (0);
-
-/* for wdc.flush/clear */
+/* Used for wdc.flush/clear which can use rB for offset which is not possible
+ * to use for simple wdc or wic.
+ *
+ * start address is cache aligned
+ * end address is not aligned, if end is aligned then I have to subtract
+ * cacheline length because I can't flush/invalidate the next cacheline.
+ * If is not, I align it because I will flush/invalidate whole line.
+ */
#define CACHE_RANGE_LOOP_2(start, end, line_length, op) \
do { \
int step = -line_length; \
- int count = end - start; \
- BUG_ON(count <= 0); \
+ int align = ~(line_length - 1); \
+ int count; \
+ end = ((end & align) == end) ? end - line_length : end & align; \
+ count = end - start; \
+ WARN_ON(count < 0); \
\
- __asm__ __volatile__ (" 1: " #op " %0, %1; \
- bgtid %1, 1b; \
- addk %1, %1, %2; \
- " : : "r" (start), "r" (count), \
+ __asm__ __volatile__ (" 1: " #op " %0, %1;" \
+ "bgtid %1, 1b;" \
+ "addk %1, %1, %2;" \
+ : : "r" (start), "r" (count), \
"r" (step) : "memory"); \
-} while (0);
+} while (0)
/* It is used only first parameter for OP - for wic, wdc */
#define CACHE_RANGE_LOOP_1(start, end, line_length, op) \
do { \
- int volatile temp; \
- BUG_ON(end - start <= 0); \
+ int volatile temp = 0; \
+ int align = ~(line_length - 1); \
+ end = ((end & align) == end) ? end - line_length : end & align; \
+ WARN_ON(end - start < 0); \
\
- __asm__ __volatile__ (" 1: " #op " %1, r0; \
- cmpu %0, %1, %2; \
- bgtid %0, 1b; \
- addk %1, %1, %3; \
- " : : "r" (temp), "r" (start), "r" (end),\
+ __asm__ __volatile__ (" 1: " #op " %1, r0;" \
+ "cmpu %0, %1, %2;" \
+ "bgtid %0, 1b;" \
+ "addk %1, %1, %3;" \
+ : : "r" (temp), "r" (start), "r" (end), \
"r" (line_length) : "memory"); \
-} while (0);
+} while (0)
#define ASM_LOOP
@@ -351,7 +340,7 @@ static void __invalidate_dcache_all_noirq_wt(void)
#endif
pr_debug("%s\n", __func__);
#ifdef ASM_LOOP
- CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc)
+ CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc);
#else
for (i = 0; i < cpuinfo.dcache_size;
i += cpuinfo.dcache_line_length)
@@ -360,8 +349,13 @@ static void __invalidate_dcache_all_noirq_wt(void)
#endif
}
-/* FIXME this is weird - should be only wdc but not work
- * MS: I am getting bus errors and other weird things */
+/*
+ * FIXME It is blindly invalidation as is expected
+ * but can't be called on noMMU in microblaze_cache_init below
+ *
+ * MS: noMMU kernel won't boot if simple wdc is used
+ * The reason should be that there are discared data which kernel needs
+ */
static void __invalidate_dcache_all_wb(void)
{
#ifndef ASM_LOOP
@@ -369,12 +363,12 @@ static void __invalidate_dcache_all_wb(void)
#endif
pr_debug("%s\n", __func__);
#ifdef ASM_LOOP
- CACHE_ALL_LOOP2(cpuinfo.dcache_size, cpuinfo.dcache_line_length,
- wdc.clear)
+ CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length,
+ wdc);
#else
for (i = 0; i < cpuinfo.dcache_size;
i += cpuinfo.dcache_line_length)
- __asm__ __volatile__ ("wdc.clear %0, r0;" \
+ __asm__ __volatile__ ("wdc %0, r0;" \
: : "r" (i));
#endif
}
@@ -393,7 +387,7 @@ static void __invalidate_dcache_range_wb(unsigned long start,
#ifdef ASM_LOOP
CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.clear);
#else
- for (i = start; i < end; i += cpuinfo.icache_line_length)
+ for (i = start; i < end; i += cpuinfo.dcache_line_length)
__asm__ __volatile__ ("wdc.clear %0, r0;" \
: : "r" (i));
#endif
@@ -413,7 +407,7 @@ static void __invalidate_dcache_range_nomsr_wt(unsigned long start,
#ifdef ASM_LOOP
CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
#else
- for (i = start; i < end; i += cpuinfo.icache_line_length)
+ for (i = start; i < end; i += cpuinfo.dcache_line_length)
__asm__ __volatile__ ("wdc %0, r0;" \
: : "r" (i));
#endif
@@ -437,7 +431,7 @@ static void __invalidate_dcache_range_msr_irq_wt(unsigned long start,
#ifdef ASM_LOOP
CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
#else
- for (i = start; i < end; i += cpuinfo.icache_line_length)
+ for (i = start; i < end; i += cpuinfo.dcache_line_length)
__asm__ __volatile__ ("wdc %0, r0;" \
: : "r" (i));
#endif
@@ -465,7 +459,7 @@ static void __invalidate_dcache_range_nomsr_irq(unsigned long start,
#ifdef ASM_LOOP
CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
#else
- for (i = start; i < end; i += cpuinfo.icache_line_length)
+ for (i = start; i < end; i += cpuinfo.dcache_line_length)
__asm__ __volatile__ ("wdc %0, r0;" \
: : "r" (i));
#endif
@@ -504,7 +498,7 @@ static void __flush_dcache_range_wb(unsigned long start, unsigned long end)
#ifdef ASM_LOOP
CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.flush);
#else
- for (i = start; i < end; i += cpuinfo.icache_line_length)
+ for (i = start; i < end; i += cpuinfo.dcache_line_length)
__asm__ __volatile__ ("wdc.flush %0, r0;" \
: : "r" (i));
#endif
@@ -514,7 +508,7 @@ static void __flush_dcache_range_wb(unsigned long start, unsigned long end)
struct scache *mbc;
/* new wb cache model */
-const struct scache wb_msr = {
+static const struct scache wb_msr = {
.ie = __enable_icache_msr,
.id = __disable_icache_msr,
.ifl = __flush_icache_all_noirq,
@@ -530,7 +524,7 @@ const struct scache wb_msr = {
};
/* There is only difference in ie, id, de, dd functions */
-const struct scache wb_nomsr = {
+static const struct scache wb_nomsr = {
.ie = __enable_icache_nomsr,
.id = __disable_icache_nomsr,
.ifl = __flush_icache_all_noirq,
@@ -546,7 +540,7 @@ const struct scache wb_nomsr = {
};
/* Old wt cache model with disabling irq and turn off cache */
-const struct scache wt_msr = {
+static const struct scache wt_msr = {
.ie = __enable_icache_msr,
.id = __disable_icache_msr,
.ifl = __flush_icache_all_msr_irq,
@@ -561,7 +555,7 @@ const struct scache wt_msr = {
.dinr = __invalidate_dcache_range_msr_irq_wt,
};
-const struct scache wt_nomsr = {
+static const struct scache wt_nomsr = {
.ie = __enable_icache_nomsr,
.id = __disable_icache_nomsr,
.ifl = __flush_icache_all_nomsr_irq,
@@ -577,7 +571,7 @@ const struct scache wt_nomsr = {
};
/* New wt cache model for newer Microblaze versions */
-const struct scache wt_msr_noirq = {
+static const struct scache wt_msr_noirq = {
.ie = __enable_icache_msr,
.id = __disable_icache_msr,
.ifl = __flush_icache_all_noirq,
@@ -592,7 +586,7 @@ const struct scache wt_msr_noirq = {
.dinr = __invalidate_dcache_range_nomsr_wt,
};
-const struct scache wt_nomsr_noirq = {
+static const struct scache wt_nomsr_noirq = {
.ie = __enable_icache_nomsr,
.id = __disable_icache_nomsr,
.ifl = __flush_icache_all_noirq,
@@ -611,46 +605,49 @@ const struct scache wt_nomsr_noirq = {
#define CPUVER_7_20_A 0x0c
#define CPUVER_7_20_D 0x0f
-#define INFO(s) printk(KERN_INFO "cache: " s "\n");
-
void microblaze_cache_init(void)
{
if (cpuinfo.use_instr & PVR2_USE_MSR_INSTR) {
if (cpuinfo.dcache_wb) {
- INFO("wb_msr");
+ pr_info("wb_msr\n");
mbc = (struct scache *)&wb_msr;
- if (cpuinfo.ver_code < CPUVER_7_20_D) {
+ if (cpuinfo.ver_code <= CPUVER_7_20_D) {
/* MS: problem with signal handling - hw bug */
- INFO("WB won't work properly");
+ pr_info("WB won't work properly\n");
}
} else {
if (cpuinfo.ver_code >= CPUVER_7_20_A) {
- INFO("wt_msr_noirq");
+ pr_info("wt_msr_noirq\n");
mbc = (struct scache *)&wt_msr_noirq;
} else {
- INFO("wt_msr");
+ pr_info("wt_msr\n");
mbc = (struct scache *)&wt_msr;
}
}
} else {
if (cpuinfo.dcache_wb) {
- INFO("wb_nomsr");
+ pr_info("wb_nomsr\n");
mbc = (struct scache *)&wb_nomsr;
- if (cpuinfo.ver_code < CPUVER_7_20_D) {
+ if (cpuinfo.ver_code <= CPUVER_7_20_D) {
/* MS: problem with signal handling - hw bug */
- INFO("WB won't work properly");
+ pr_info("WB won't work properly\n");
}
} else {
if (cpuinfo.ver_code >= CPUVER_7_20_A) {
- INFO("wt_nomsr_noirq");
+ pr_info("wt_nomsr_noirq\n");
mbc = (struct scache *)&wt_nomsr_noirq;
} else {
- INFO("wt_nomsr");
+ pr_info("wt_nomsr\n");
mbc = (struct scache *)&wt_nomsr;
}
}
}
- invalidate_dcache();
+ /*
+ * FIXME Invalidation is done in U-BOOT
+ * WT cache: Data is already written to main memory
+ * WB cache: Discard data on noMMU which caused that kernel doesn't boot
+ */
+ /* invalidate_dcache(); */
enable_dcache();
invalidate_icache();