aboutsummaryrefslogtreecommitdiff
path: root/arch/arm/mm/copypage-xscale.c
diff options
context:
space:
mode:
authorCatalin Marinas <catalin.marinas@arm.com>2010-09-13 15:57:36 +0100
committerRussell King <rmk+kernel@arm.linux.org.uk>2010-09-19 12:17:43 +0100
commitc01778001a4f5ad9c62d882776235f3f31922fdd (patch)
treea425979b236dd5c7757e9a1f0c66d3819ad99021 /arch/arm/mm/copypage-xscale.c
parent0fc73099dd25df2c5181b7bad57d1faa5cd12d3c (diff)
ARM: 6379/1: Assume new page cache pages have dirty D-cache
There are places in Linux where writes to newly allocated page cache pages happen without a subsequent call to flush_dcache_page() (several PIO drivers including USB HCD). This patch changes the meaning of PG_arch_1 to be PG_dcache_clean and always flush the D-cache for a newly mapped page in update_mmu_cache(). The patch also sets the PG_arch_1 bit in the DMA cache maintenance function to avoid additional cache flushing in update_mmu_cache(). Tested-by: Rabin Vincent <rabin.vincent@stericsson.com> Cc: Nicolas Pitre <nicolas.pitre@linaro.org> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/mm/copypage-xscale.c')
-rw-r--r--arch/arm/mm/copypage-xscale.c2
1 files changed, 1 insertions, 1 deletions
diff --git a/arch/arm/mm/copypage-xscale.c b/arch/arm/mm/copypage-xscale.c
index 9920c0ae209..649bbcd325b 100644
--- a/arch/arm/mm/copypage-xscale.c
+++ b/arch/arm/mm/copypage-xscale.c
@@ -95,7 +95,7 @@ void xscale_mc_copy_user_highpage(struct page *to, struct page *from,
{
void *kto = kmap_atomic(to, KM_USER1);
- if (test_and_clear_bit(PG_dcache_dirty, &from->flags))
+ if (!test_and_set_bit(PG_dcache_clean, &from->flags))
__flush_dcache_page(page_mapping(from), from);
spin_lock(&minicache_lock);