aboutsummaryrefslogtreecommitdiff
path: root/arch/parisc/kernel/cache.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/parisc/kernel/cache.c')
-rw-r--r--arch/parisc/kernel/cache.c303
1 files changed, 154 insertions, 149 deletions
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index d054f3da3ff..f6448c7c62b 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -22,17 +22,21 @@
#include <asm/cache.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
-#include <asm/system.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
#include <asm/processor.h>
#include <asm/sections.h>
+#include <asm/shmparam.h>
int split_tlb __read_mostly;
int dcache_stride __read_mostly;
int icache_stride __read_mostly;
EXPORT_SYMBOL(dcache_stride);
+void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
+EXPORT_SYMBOL(flush_dcache_page_asm);
+void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr);
+
/* On some machines (e.g. ones with the Merced bus), there can be
* only a single PxTLB broadcast at a time; this must be guaranteed
@@ -67,18 +71,27 @@ flush_cache_all_local(void)
}
EXPORT_SYMBOL(flush_cache_all_local);
+/* Virtual address of pfn. */
+#define pfn_va(pfn) __va(PFN_PHYS(pfn))
+
void
update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
{
- struct page *page = pte_page(*ptep);
+ unsigned long pfn = pte_pfn(*ptep);
+ struct page *page;
- if (pfn_valid(page_to_pfn(page)) && page_mapping(page) &&
- test_bit(PG_dcache_dirty, &page->flags)) {
+ /* We don't have pte special. As a result, we can be called with
+ an invalid pfn and we don't need to flush the kernel dcache page.
+ This occurs with FireGL card in C8000. */
+ if (!pfn_valid(pfn))
+ return;
- flush_kernel_dcache_page(page);
+ page = pfn_to_page(pfn);
+ if (page_mapping(page) && test_bit(PG_dcache_dirty, &page->flags)) {
+ flush_kernel_dcache_page_addr(pfn_va(pfn));
clear_bit(PG_dcache_dirty, &page->flags);
} else if (parisc_requires_coherency())
- flush_kernel_dcache_page(page);
+ flush_kernel_dcache_page_addr(pfn_va(pfn));
}
void
@@ -259,93 +272,24 @@ void disable_sr_hashing(void)
panic("SpaceID hashing is still on!\n");
}
-/* Simple function to work out if we have an existing address translation
- * for a user space vma. */
-static inline int translation_exists(struct vm_area_struct *vma,
- unsigned long addr, unsigned long pfn)
-{
- pgd_t *pgd = pgd_offset(vma->vm_mm, addr);
- pmd_t *pmd;
- pte_t pte;
-
- if(pgd_none(*pgd))
- return 0;
-
- pmd = pmd_offset(pgd, addr);
- if(pmd_none(*pmd) || pmd_bad(*pmd))
- return 0;
-
- /* We cannot take the pte lock here: flush_cache_page is usually
- * called with pte lock already held. Whereas flush_dcache_page
- * takes flush_dcache_mmap_lock, which is lower in the hierarchy:
- * the vma itself is secure, but the pte might come or go racily.
- */
- pte = *pte_offset_map(pmd, addr);
- /* But pte_unmap() does nothing on this architecture */
-
- /* Filter out coincidental file entries and swap entries */
- if (!(pte_val(pte) & (_PAGE_FLUSH|_PAGE_PRESENT)))
- return 0;
-
- return pte_pfn(pte) == pfn;
-}
-
-/* Private function to flush a page from the cache of a non-current
- * process. cr25 contains the Page Directory of the current user
- * process; we're going to hijack both it and the user space %sr3 to
- * temporarily make the non-current process current. We have to do
- * this because cache flushing may cause a non-access tlb miss which
- * the handlers have to fill in from the pgd of the non-current
- * process. */
static inline void
-flush_user_cache_page_non_current(struct vm_area_struct *vma,
- unsigned long vmaddr)
+__flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
+ unsigned long physaddr)
{
- /* save the current process space and pgd */
- unsigned long space = mfsp(3), pgd = mfctl(25);
-
- /* we don't mind taking interrupts since they may not
- * do anything with user space, but we can't
- * be preempted here */
preempt_disable();
-
- /* make us current */
- mtctl(__pa(vma->vm_mm->pgd), 25);
- mtsp(vma->vm_mm->context, 3);
-
- flush_user_dcache_page(vmaddr);
- if(vma->vm_flags & VM_EXEC)
- flush_user_icache_page(vmaddr);
-
- /* put the old current process back */
- mtsp(space, 3);
- mtctl(pgd, 25);
+ flush_dcache_page_asm(physaddr, vmaddr);
+ if (vma->vm_flags & VM_EXEC)
+ flush_icache_page_asm(physaddr, vmaddr);
preempt_enable();
}
-
-static inline void
-__flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr)
-{
- if (likely(vma->vm_mm->context == mfsp(3))) {
- flush_user_dcache_page(vmaddr);
- if (vma->vm_flags & VM_EXEC)
- flush_user_icache_page(vmaddr);
- } else {
- flush_user_cache_page_non_current(vma, vmaddr);
- }
-}
-
void flush_dcache_page(struct page *page)
{
struct address_space *mapping = page_mapping(page);
struct vm_area_struct *mpnt;
- struct prio_tree_iter iter;
unsigned long offset;
- unsigned long addr;
+ unsigned long addr, old_addr = 0;
pgoff_t pgoff;
- unsigned long pfn = page_to_pfn(page);
-
if (mapping && !mapping_mapped(mapping)) {
set_bit(PG_dcache_dirty, &page->flags);
@@ -365,24 +309,26 @@ void flush_dcache_page(struct page *page)
* to flush one address here for them all to become coherent */
flush_dcache_mmap_lock(mapping);
- vma_prio_tree_foreach(mpnt, &iter, &mapping->i_mmap, pgoff, pgoff) {
+ vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
addr = mpnt->vm_start + offset;
- /* Flush instructions produce non access tlb misses.
- * On PA, we nullify these instructions rather than
- * taking a page fault if the pte doesn't exist.
- * This is just for speed. If the page translation
- * isn't there, there's no point exciting the
- * nadtlb handler into a nullification frenzy.
- *
- * Make sure we really have this page: the private
- * mappings may cover this area but have COW'd this
- * particular page.
- */
- if (translation_exists(mpnt, addr, pfn)) {
- __flush_cache_page(mpnt, addr);
- break;
+ /* The TLB is the engine of coherence on parisc: The
+ * CPU is entitled to speculate any page with a TLB
+ * mapping, so here we kill the mapping then flush the
+ * page along a special flush only alias mapping.
+ * This guarantees that the page is no-longer in the
+ * cache for any process and nor may it be
+ * speculatively read in (until the user or kernel
+ * specifically accesses it, of course) */
+
+ flush_tlb_page(mpnt, addr);
+ if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1))
+ != (addr & (SHM_COLOUR - 1))) {
+ __flush_cache_page(mpnt, addr, page_to_phys(page));
+ if (old_addr)
+ printk(KERN_ERR "INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %s\n", old_addr, addr, mpnt->vm_file ? (char *)mpnt->vm_file->f_path.dentry->d_name.name : "(null)");
+ old_addr = addr;
}
}
flush_dcache_mmap_unlock(mapping);
@@ -395,17 +341,6 @@ EXPORT_SYMBOL(flush_kernel_dcache_page_asm);
EXPORT_SYMBOL(flush_data_cache_local);
EXPORT_SYMBOL(flush_kernel_icache_range_asm);
-void clear_user_page_asm(void *page, unsigned long vaddr)
-{
- unsigned long flags;
- /* This function is implemented in assembly in pacache.S */
- extern void __clear_user_page_asm(void *page, unsigned long vaddr);
-
- purge_tlb_start(flags);
- __clear_user_page_asm(page, vaddr);
- purge_tlb_end(flags);
-}
-
#define FLUSH_THRESHOLD 0x80000 /* 0.5MB */
int parisc_cache_flush_threshold __read_mostly = FLUSH_THRESHOLD;
@@ -439,20 +374,9 @@ void __init parisc_setup_cache_timing(void)
printk(KERN_INFO "Setting cache flush threshold to %x (%d CPUs online)\n", parisc_cache_flush_threshold, num_online_cpus());
}
-extern void purge_kernel_dcache_page(unsigned long);
-extern void clear_user_page_asm(void *page, unsigned long vaddr);
-
-void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
-{
- unsigned long flags;
-
- purge_kernel_dcache_page((unsigned long)page);
- purge_tlb_start(flags);
- pdtlb_kernel(page);
- purge_tlb_end(flags);
- clear_user_page_asm(page, vaddr);
-}
-EXPORT_SYMBOL(clear_user_page);
+extern void purge_kernel_dcache_page_asm(unsigned long);
+extern void clear_user_page_asm(void *, unsigned long);
+extern void copy_user_page_asm(void *, void *, unsigned long);
void flush_kernel_dcache_page_addr(void *addr)
{
@@ -466,24 +390,33 @@ void flush_kernel_dcache_page_addr(void *addr)
EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
- struct page *pg)
+ struct page *pg)
{
- /* no coherency needed (all in kmap/kunmap) */
- copy_user_page_asm(vto, vfrom);
- if (!parisc_requires_coherency())
- flush_kernel_dcache_page_asm(vto);
+ /* Copy using kernel mapping. No coherency is needed (all in
+ kunmap) for the `to' page. However, the `from' page needs to
+ be flushed through a mapping equivalent to the user mapping
+ before it can be accessed through the kernel mapping. */
+ preempt_disable();
+ flush_dcache_page_asm(__pa(vfrom), vaddr);
+ preempt_enable();
+ copy_page_asm(vto, vfrom);
}
EXPORT_SYMBOL(copy_user_page);
-#ifdef CONFIG_PA8X00
-
-void kunmap_parisc(void *addr)
+void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
{
- if (parisc_requires_coherency())
- flush_kernel_dcache_page_addr(addr);
+ unsigned long flags;
+
+ /* Note: purge_tlb_entries can be called at startup with
+ no context. */
+
+ purge_tlb_start(flags);
+ mtsp(mm->context, 1);
+ pdtlb(addr);
+ pitlb(addr);
+ purge_tlb_end(flags);
}
-EXPORT_SYMBOL(kunmap_parisc);
-#endif
+EXPORT_SYMBOL(purge_tlb_entries);
void __flush_tlb_range(unsigned long sid, unsigned long start,
unsigned long end)
@@ -496,8 +429,8 @@ void __flush_tlb_range(unsigned long sid, unsigned long start,
else {
unsigned long flags;
- mtsp(sid, 1);
purge_tlb_start(flags);
+ mtsp(sid, 1);
if (split_tlb) {
while (npages--) {
pdtlb(start);
@@ -524,13 +457,69 @@ void flush_cache_all(void)
on_each_cpu(cacheflush_h_tmp_function, NULL, 1);
}
+static inline unsigned long mm_total_size(struct mm_struct *mm)
+{
+ struct vm_area_struct *vma;
+ unsigned long usize = 0;
+
+ for (vma = mm->mmap; vma; vma = vma->vm_next)
+ usize += vma->vm_end - vma->vm_start;
+ return usize;
+}
+
+static inline pte_t *get_ptep(pgd_t *pgd, unsigned long addr)
+{
+ pte_t *ptep = NULL;
+
+ if (!pgd_none(*pgd)) {
+ pud_t *pud = pud_offset(pgd, addr);
+ if (!pud_none(*pud)) {
+ pmd_t *pmd = pmd_offset(pud, addr);
+ if (!pmd_none(*pmd))
+ ptep = pte_offset_map(pmd, addr);
+ }
+ }
+ return ptep;
+}
+
void flush_cache_mm(struct mm_struct *mm)
{
-#ifdef CONFIG_SMP
- flush_cache_all();
-#else
- flush_cache_all_local();
-#endif
+ struct vm_area_struct *vma;
+ pgd_t *pgd;
+
+ /* Flushing the whole cache on each cpu takes forever on
+ rp3440, etc. So, avoid it if the mm isn't too big. */
+ if (mm_total_size(mm) >= parisc_cache_flush_threshold) {
+ flush_cache_all();
+ return;
+ }
+
+ if (mm->context == mfsp(3)) {
+ for (vma = mm->mmap; vma; vma = vma->vm_next) {
+ flush_user_dcache_range_asm(vma->vm_start, vma->vm_end);
+ if ((vma->vm_flags & VM_EXEC) == 0)
+ continue;
+ flush_user_icache_range_asm(vma->vm_start, vma->vm_end);
+ }
+ return;
+ }
+
+ pgd = mm->pgd;
+ for (vma = mm->mmap; vma; vma = vma->vm_next) {
+ unsigned long addr;
+
+ for (addr = vma->vm_start; addr < vma->vm_end;
+ addr += PAGE_SIZE) {
+ unsigned long pfn;
+ pte_t *ptep = get_ptep(pgd, addr);
+ if (!ptep)
+ continue;
+ pfn = pte_pfn(*ptep);
+ if (!pfn_valid(pfn))
+ continue;
+ __flush_cache_page(vma, addr, PFN_PHYS(pfn));
+ }
+ }
}
void
@@ -551,20 +540,35 @@ flush_user_icache_range(unsigned long start, unsigned long end)
flush_instruction_cache();
}
-
void flush_cache_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
- int sr3;
+ unsigned long addr;
+ pgd_t *pgd;
BUG_ON(!vma->vm_mm->context);
- sr3 = mfsp(3);
- if (vma->vm_mm->context == sr3) {
- flush_user_dcache_range(start,end);
- flush_user_icache_range(start,end);
- } else {
+ if ((end - start) >= parisc_cache_flush_threshold) {
flush_cache_all();
+ return;
+ }
+
+ if (vma->vm_mm->context == mfsp(3)) {
+ flush_user_dcache_range_asm(start, end);
+ if (vma->vm_flags & VM_EXEC)
+ flush_user_icache_range_asm(start, end);
+ return;
+ }
+
+ pgd = vma->vm_mm->pgd;
+ for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
+ unsigned long pfn;
+ pte_t *ptep = get_ptep(pgd, addr);
+ if (!ptep)
+ continue;
+ pfn = pte_pfn(*ptep);
+ if (pfn_valid(pfn))
+ __flush_cache_page(vma, addr, PFN_PHYS(pfn));
}
}
@@ -573,7 +577,8 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long
{
BUG_ON(!vma->vm_mm->context);
- if (likely(translation_exists(vma, vmaddr, pfn)))
- __flush_cache_page(vma, vmaddr);
-
+ if (pfn_valid(pfn)) {
+ flush_tlb_page(vma, vmaddr);
+ __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
+ }
}