diff options
Diffstat (limited to 'arch/arm/mm/copypage-v6.c')
| -rw-r--r-- | arch/arm/mm/copypage-v6.c | 101 |
1 files changed, 51 insertions, 50 deletions
diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c index 3adb79257f4..b9bcc9d7917 100644 --- a/arch/arm/mm/copypage-v6.c +++ b/arch/arm/mm/copypage-v6.c @@ -10,12 +10,13 @@ #include <linux/init.h> #include <linux/spinlock.h> #include <linux/mm.h> +#include <linux/highmem.h> -#include <asm/page.h> #include <asm/pgtable.h> #include <asm/shmparam.h> #include <asm/tlbflush.h> #include <asm/cacheflush.h> +#include <asm/cachetype.h> #include "mm.h" @@ -23,69 +24,78 @@ #error FIX ME #endif -#define from_address (0xffff8000) -#define to_address (0xffffc000) - -static DEFINE_SPINLOCK(v6_lock); +static DEFINE_RAW_SPINLOCK(v6_lock); /* * Copy the user page. No aliasing to deal with so we can just * attack the kernel's existing mapping of these pages. */ -static void v6_copy_user_page_nonaliasing(void *kto, const void *kfrom, unsigned long vaddr) +static void v6_copy_user_highpage_nonaliasing(struct page *to, + struct page *from, unsigned long vaddr, struct vm_area_struct *vma) { + void *kto, *kfrom; + + kfrom = kmap_atomic(from); + kto = kmap_atomic(to); copy_page(kto, kfrom); + kunmap_atomic(kto); + kunmap_atomic(kfrom); } /* * Clear the user page. No aliasing to deal with so we can just * attack the kernel's existing mapping of this page. */ -static void v6_clear_user_page_nonaliasing(void *kaddr, unsigned long vaddr) +static void v6_clear_user_highpage_nonaliasing(struct page *page, unsigned long vaddr) { + void *kaddr = kmap_atomic(page); clear_page(kaddr); + kunmap_atomic(kaddr); } /* - * Copy the page, taking account of the cache colour. + * Discard data in the kernel mapping for the new page. + * FIXME: needs this MCRR to be supported. */ -static void v6_copy_user_page_aliasing(void *kto, const void *kfrom, unsigned long vaddr) +static void discard_old_kernel_data(void *kto) { - unsigned int offset = CACHE_COLOUR(vaddr); - unsigned long from, to; - struct page *page = virt_to_page(kfrom); - - if (test_and_clear_bit(PG_dcache_dirty, &page->flags)) - __flush_dcache_page(page_mapping(page), page); - - /* - * Discard data in the kernel mapping for the new page. - * FIXME: needs this MCRR to be supported. - */ __asm__("mcrr p15, 0, %1, %0, c6 @ 0xec401f06" : : "r" (kto), "r" ((unsigned long)kto + PAGE_SIZE - L1_CACHE_BYTES) : "cc"); +} + +/* + * Copy the page, taking account of the cache colour. + */ +static void v6_copy_user_highpage_aliasing(struct page *to, + struct page *from, unsigned long vaddr, struct vm_area_struct *vma) +{ + unsigned int offset = CACHE_COLOUR(vaddr); + unsigned long kfrom, kto; + + if (!test_and_set_bit(PG_dcache_clean, &from->flags)) + __flush_dcache_page(page_mapping(from), from); + + /* FIXME: not highmem safe */ + discard_old_kernel_data(page_address(to)); /* * Now copy the page using the same cache colour as the * pages ultimate destination. */ - spin_lock(&v6_lock); - - set_pte_ext(TOP_PTE(from_address) + offset, pfn_pte(__pa(kfrom) >> PAGE_SHIFT, PAGE_KERNEL), 0); - set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(__pa(kto) >> PAGE_SHIFT, PAGE_KERNEL), 0); + raw_spin_lock(&v6_lock); - from = from_address + (offset << PAGE_SHIFT); - to = to_address + (offset << PAGE_SHIFT); + kfrom = COPYPAGE_V6_FROM + (offset << PAGE_SHIFT); + kto = COPYPAGE_V6_TO + (offset << PAGE_SHIFT); - flush_tlb_kernel_page(from); - flush_tlb_kernel_page(to); + set_top_pte(kfrom, mk_pte(from, PAGE_KERNEL)); + set_top_pte(kto, mk_pte(to, PAGE_KERNEL)); - copy_page((void *)to, (void *)from); + copy_page((void *)kto, (void *)kfrom); - spin_unlock(&v6_lock); + raw_spin_unlock(&v6_lock); } /* @@ -93,44 +103,35 @@ static void v6_copy_user_page_aliasing(void *kto, const void *kfrom, unsigned lo * so remap the kernel page into the same cache colour as the user * page. */ -static void v6_clear_user_page_aliasing(void *kaddr, unsigned long vaddr) +static void v6_clear_user_highpage_aliasing(struct page *page, unsigned long vaddr) { - unsigned int offset = CACHE_COLOUR(vaddr); - unsigned long to = to_address + (offset << PAGE_SHIFT); + unsigned long to = COPYPAGE_V6_TO + (CACHE_COLOUR(vaddr) << PAGE_SHIFT); - /* - * Discard data in the kernel mapping for the new page - * FIXME: needs this MCRR to be supported. - */ - __asm__("mcrr p15, 0, %1, %0, c6 @ 0xec401f06" - : - : "r" (kaddr), - "r" ((unsigned long)kaddr + PAGE_SIZE - L1_CACHE_BYTES) - : "cc"); + /* FIXME: not highmem safe */ + discard_old_kernel_data(page_address(page)); /* * Now clear the page using the same cache colour as * the pages ultimate destination. */ - spin_lock(&v6_lock); + raw_spin_lock(&v6_lock); - set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(__pa(kaddr) >> PAGE_SHIFT, PAGE_KERNEL), 0); - flush_tlb_kernel_page(to); + set_top_pte(to, mk_pte(page, PAGE_KERNEL)); clear_page((void *)to); - spin_unlock(&v6_lock); + raw_spin_unlock(&v6_lock); } struct cpu_user_fns v6_user_fns __initdata = { - .cpu_clear_user_page = v6_clear_user_page_nonaliasing, - .cpu_copy_user_page = v6_copy_user_page_nonaliasing, + .cpu_clear_user_highpage = v6_clear_user_highpage_nonaliasing, + .cpu_copy_user_highpage = v6_copy_user_highpage_nonaliasing, }; static int __init v6_userpage_init(void) { if (cache_is_vipt_aliasing()) { - cpu_user.cpu_clear_user_page = v6_clear_user_page_aliasing; - cpu_user.cpu_copy_user_page = v6_copy_user_page_aliasing; + cpu_user.cpu_clear_user_highpage = v6_clear_user_highpage_aliasing; + cpu_user.cpu_copy_user_highpage = v6_copy_user_highpage_aliasing; } return 0; |
