diff options
Diffstat (limited to 'arch/mips/mm/highmem.c')
| -rw-r--r-- | arch/mips/mm/highmem.c | 79 |
1 files changed, 49 insertions, 30 deletions
diff --git a/arch/mips/mm/highmem.c b/arch/mips/mm/highmem.c index 10dd2af2343..da815d29523 100644 --- a/arch/mips/mm/highmem.c +++ b/arch/mips/mm/highmem.c @@ -1,8 +1,16 @@ +#include <linux/compiler.h> #include <linux/module.h> #include <linux/highmem.h> +#include <linux/sched.h> +#include <linux/smp.h> +#include <asm/fixmap.h> #include <asm/tlbflush.h> -void *__kmap(struct page *page) +static pte_t *kmap_pte; + +unsigned long highstart_pfn, highend_pfn; + +void *kmap(struct page *page) { void *addr; @@ -14,15 +22,16 @@ void *__kmap(struct page *page) return addr; } +EXPORT_SYMBOL(kmap); -void __kunmap(struct page *page) +void kunmap(struct page *page) { - if (in_interrupt()) - BUG(); + BUG_ON(in_interrupt()); if (!PageHighMem(page)) return; kunmap_high(page); } +EXPORT_SYMBOL(kunmap); /* * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because @@ -33,73 +42,80 @@ void __kunmap(struct page *page) * kmaps are appropriate for short, tight code paths only. */ -void *__kmap_atomic(struct page *page, enum km_type type) +void *kmap_atomic(struct page *page) { - enum fixed_addresses idx; unsigned long vaddr; + int idx, type; /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ pagefault_disable(); if (!PageHighMem(page)) return page_address(page); + type = kmap_atomic_idx_push(); idx = type + KM_TYPE_NR*smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); #ifdef CONFIG_DEBUG_HIGHMEM - if (!pte_none(*(kmap_pte-idx))) - BUG(); + BUG_ON(!pte_none(*(kmap_pte - idx))); #endif - set_pte(kmap_pte-idx, mk_pte(page, kmap_prot)); + set_pte(kmap_pte-idx, mk_pte(page, PAGE_KERNEL)); local_flush_tlb_one((unsigned long)vaddr); return (void*) vaddr; } +EXPORT_SYMBOL(kmap_atomic); -void __kunmap_atomic(void *kvaddr, enum km_type type) +void __kunmap_atomic(void *kvaddr) { -#ifdef CONFIG_DEBUG_HIGHMEM unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; - enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); + int type __maybe_unused; if (vaddr < FIXADDR_START) { // FIXME pagefault_enable(); return; } - if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx)) - BUG(); + type = kmap_atomic_idx(); +#ifdef CONFIG_DEBUG_HIGHMEM + { + int idx = type + KM_TYPE_NR * smp_processor_id(); - /* - * force other mappings to Oops if they'll try to access - * this pte without first remap it - */ - pte_clear(&init_mm, vaddr, kmap_pte-idx); - local_flush_tlb_one(vaddr); -#endif + BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); + /* + * force other mappings to Oops if they'll try to access + * this pte without first remap it + */ + pte_clear(&init_mm, vaddr, kmap_pte-idx); + local_flush_tlb_one(vaddr); + } +#endif + kmap_atomic_idx_pop(); pagefault_enable(); } +EXPORT_SYMBOL(__kunmap_atomic); /* * This is the same as kmap_atomic() but can map memory that doesn't * have a struct page associated with it. */ -void *kmap_atomic_pfn(unsigned long pfn, enum km_type type) +void *kmap_atomic_pfn(unsigned long pfn) { - enum fixed_addresses idx; unsigned long vaddr; + int idx, type; pagefault_disable(); + type = kmap_atomic_idx_push(); idx = type + KM_TYPE_NR*smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); - set_pte(kmap_pte-idx, pfn_pte(pfn, kmap_prot)); + set_pte(kmap_pte-idx, pfn_pte(pfn, PAGE_KERNEL)); flush_tlb_one(vaddr); return (void*) vaddr; } -struct page *__kmap_atomic_to_page(void *ptr) +struct page *kmap_atomic_to_page(void *ptr) { unsigned long idx, vaddr = (unsigned long)ptr; pte_t *pte; @@ -112,8 +128,11 @@ struct page *__kmap_atomic_to_page(void *ptr) return pte_page(*pte); } -EXPORT_SYMBOL(__kmap); -EXPORT_SYMBOL(__kunmap); -EXPORT_SYMBOL(__kmap_atomic); -EXPORT_SYMBOL(__kunmap_atomic); -EXPORT_SYMBOL(__kmap_atomic_to_page); +void __init kmap_init(void) +{ + unsigned long kmap_vstart; + + /* cache the first kmap pte */ + kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN); + kmap_pte = kmap_get_fixmap_pte(kmap_vstart); +} |
