diff options
-rw-r--r-- | arch/arm/mm/flush.c | 26 | ||||
-rw-r--r-- | include/asm-arm/cacheflush.h | 18 |
2 files changed, 41 insertions, 3 deletions
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c index b103e56806b..d438ce41cdd 100644 --- a/arch/arm/mm/flush.c +++ b/arch/arm/mm/flush.c @@ -87,6 +87,32 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsig if (cache_is_vipt_aliasing()) flush_pfn_alias(pfn, user_addr); } + +void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, + unsigned long uaddr, void *kaddr, + unsigned long len, int write) +{ + if (cache_is_vivt()) { + if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) { + unsigned long addr = (unsigned long)kaddr; + __cpuc_coherent_kern_range(addr, addr + len); + } + return; + } + + if (cache_is_vipt_aliasing()) { + flush_pfn_alias(page_to_pfn(page), uaddr); + return; + } + + /* VIPT non-aliasing cache */ + if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask) && + vma->vm_flags | VM_EXEC) { + unsigned long addr = (unsigned long)kaddr; + /* only flushing the kernel mapping on non-aliasing VIPT */ + __cpuc_coherent_kern_range(addr, addr + len); + } +} #else #define flush_pfn_alias(pfn,vaddr) do { } while (0) #endif diff --git a/include/asm-arm/cacheflush.h b/include/asm-arm/cacheflush.h index fe0c744e026..e4a2569c636 100644 --- a/include/asm-arm/cacheflush.h +++ b/include/asm-arm/cacheflush.h @@ -247,14 +247,12 @@ extern void dmac_flush_range(unsigned long, unsigned long); */ #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ do { \ - flush_cache_page(vma, vaddr, page_to_pfn(page));\ memcpy(dst, src, len); \ - flush_dcache_page(page); \ + flush_ptrace_access(vma, page, vaddr, dst, len, 1);\ } while (0) #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ do { \ - flush_cache_page(vma, vaddr, page_to_pfn(page));\ memcpy(dst, src, len); \ } while (0) @@ -285,10 +283,24 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned l __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags); } } + +static inline void +flush_ptrace_access(struct vm_area_struct *vma, struct page *page, + unsigned long uaddr, void *kaddr, + unsigned long len, int write) +{ + if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) { + unsigned long addr = (unsigned long)kaddr; + __cpuc_coherent_kern_range(addr, addr + len); + } +} #else extern void flush_cache_mm(struct mm_struct *mm); extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn); +extern void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, + unsigned long uaddr, void *kaddr, + unsigned long len, int write); #endif /* |