aboutsummaryrefslogtreecommitdiff
path: root/arch/arm/include/asm/edac.h
diff options
context:
space:
mode:
authorArnd Bergmann <arnd@arndb.de>2012-01-09 16:06:31 +0000
committerArnd Bergmann <arnd@arndb.de>2012-01-09 16:06:31 +0000
commitb48741cce3be32a48af9a2b272f3f13a077375cf (patch)
treee4bc91713e02fa6d8f08b07de53ea8f905593dfa /arch/arm/include/asm/edac.h
parent54b6c82ac9a124804816f244f587c0f40d25ad01 (diff)
parenta07613a54d700a974f3a4a657da78ef5d097315d (diff)
Merge branch 'samsung/cleanup' into next/cleanup2
Diffstat (limited to 'arch/arm/include/asm/edac.h')
-rw-r--r--arch/arm/include/asm/edac.h48
1 files changed, 48 insertions, 0 deletions
diff --git a/arch/arm/include/asm/edac.h b/arch/arm/include/asm/edac.h
new file mode 100644
index 00000000000..0df7a2c1fc3
--- /dev/null
+++ b/arch/arm/include/asm/edac.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2011 Calxeda, Inc.
+ * Based on PPC version Copyright 2007 MontaVista Software, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef ASM_EDAC_H
+#define ASM_EDAC_H
+/*
+ * ECC atomic, DMA, SMP and interrupt safe scrub function.
+ * Implements the per arch atomic_scrub() that EDAC use for software
+ * ECC scrubbing. It reads memory and then writes back the original
+ * value, allowing the hardware to detect and correct memory errors.
+ */
+static inline void atomic_scrub(void *va, u32 size)
+{
+#if __LINUX_ARM_ARCH__ >= 6
+ unsigned int *virt_addr = va;
+ unsigned int temp, temp2;
+ unsigned int i;
+
+ for (i = 0; i < size / sizeof(*virt_addr); i++, virt_addr++) {
+ /* Very carefully read and write to memory atomically
+ * so we are interrupt, DMA and SMP safe.
+ */
+ __asm__ __volatile__("\n"
+ "1: ldrex %0, [%2]\n"
+ " strex %1, %0, [%2]\n"
+ " teq %1, #0\n"
+ " bne 1b\n"
+ : "=&r"(temp), "=&r"(temp2)
+ : "r"(virt_addr)
+ : "cc");
+ }
+#endif
+}
+
+#endif