aboutsummaryrefslogtreecommitdiff
path: root/arch/mips/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 15:20:36 -0700
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 15:20:36 -0700
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /arch/mips/mm
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'arch/mips/mm')
-rw-r--r--arch/mips/mm/Makefile44
-rw-r--r--arch/mips/mm/c-r3k.c349
-rw-r--r--arch/mips/mm/c-r4k.c1260
-rw-r--r--arch/mips/mm/c-sb1.c558
-rw-r--r--arch/mips/mm/c-tx39.c493
-rw-r--r--arch/mips/mm/cache.c157
-rw-r--r--arch/mips/mm/cerr-sb1.c543
-rw-r--r--arch/mips/mm/cex-gen.S42
-rw-r--r--arch/mips/mm/cex-sb1.S170
-rw-r--r--arch/mips/mm/dma-coherent.c255
-rw-r--r--arch/mips/mm/dma-ip27.c257
-rw-r--r--arch/mips/mm/dma-ip32.c382
-rw-r--r--arch/mips/mm/dma-noncoherent.c400
-rw-r--r--arch/mips/mm/extable.c21
-rw-r--r--arch/mips/mm/fault.c236
-rw-r--r--arch/mips/mm/highmem.c103
-rw-r--r--arch/mips/mm/init.c304
-rw-r--r--arch/mips/mm/ioremap.c202
-rw-r--r--arch/mips/mm/pg-r4k.c489
-rw-r--r--arch/mips/mm/pg-sb1.c287
-rw-r--r--arch/mips/mm/pgtable-32.c97
-rw-r--r--arch/mips/mm/pgtable-64.c58
-rw-r--r--arch/mips/mm/pgtable.c36
-rw-r--r--arch/mips/mm/sc-ip22.c177
-rw-r--r--arch/mips/mm/sc-r5k.c108
-rw-r--r--arch/mips/mm/sc-rm7k.c193
-rw-r--r--arch/mips/mm/tlb-andes.c257
-rw-r--r--arch/mips/mm/tlb-r3k.c289
-rw-r--r--arch/mips/mm/tlb-r4k.c419
-rw-r--r--arch/mips/mm/tlb-r8k.c250
-rw-r--r--arch/mips/mm/tlb-sb1.c376
-rw-r--r--arch/mips/mm/tlbex-fault.S28
-rw-r--r--arch/mips/mm/tlbex.c1815
33 files changed, 10655 insertions, 0 deletions
diff --git a/arch/mips/mm/Makefile b/arch/mips/mm/Makefile
new file mode 100644
index 00000000000..f61e038b444
--- /dev/null
+++ b/arch/mips/mm/Makefile
@@ -0,0 +1,44 @@
+#
+# Makefile for the Linux/MIPS-specific parts of the memory manager.
+#
+
+obj-y += cache.o extable.o fault.o init.o pgtable.o \
+ tlbex.o tlbex-fault.o
+
+obj-$(CONFIG_MIPS32) += ioremap.o pgtable-32.o
+obj-$(CONFIG_MIPS64) += pgtable-64.o
+obj-$(CONFIG_HIGHMEM) += highmem.o
+
+obj-$(CONFIG_CPU_MIPS32) += c-r4k.o cex-gen.o pg-r4k.o tlb-r4k.o
+obj-$(CONFIG_CPU_MIPS64) += c-r4k.o cex-gen.o pg-r4k.o tlb-r4k.o
+obj-$(CONFIG_CPU_NEVADA) += c-r4k.o cex-gen.o pg-r4k.o tlb-r4k.o
+obj-$(CONFIG_CPU_R10000) += c-r4k.o cex-gen.o pg-r4k.o tlb-andes.o
+obj-$(CONFIG_CPU_R3000) += c-r3k.o tlb-r3k.o pg-r4k.o
+obj-$(CONFIG_CPU_R4300) += c-r4k.o cex-gen.o pg-r4k.o tlb-r4k.o
+obj-$(CONFIG_CPU_R4X00) += c-r4k.o cex-gen.o pg-r4k.o tlb-r4k.o
+obj-$(CONFIG_CPU_R5000) += c-r4k.o cex-gen.o pg-r4k.o tlb-r4k.o
+obj-$(CONFIG_CPU_R5432) += c-r4k.o cex-gen.o pg-r4k.o tlb-r4k.o
+obj-$(CONFIG_CPU_R8000) += c-r4k.o cex-gen.o pg-r4k.o tlb-r8k.o
+obj-$(CONFIG_CPU_RM7000) += c-r4k.o cex-gen.o pg-r4k.o tlb-r4k.o
+obj-$(CONFIG_CPU_RM9000) += c-r4k.o cex-gen.o pg-r4k.o tlb-r4k.o
+obj-$(CONFIG_CPU_SB1) += c-sb1.o cerr-sb1.o cex-sb1.o pg-sb1.o \
+ tlb-sb1.o
+obj-$(CONFIG_CPU_TX39XX) += c-tx39.o pg-r4k.o tlb-r3k.o
+obj-$(CONFIG_CPU_TX49XX) += c-r4k.o cex-gen.o pg-r4k.o tlb-r4k.o
+obj-$(CONFIG_CPU_VR41XX) += c-r4k.o cex-gen.o pg-r4k.o tlb-r4k.o
+
+obj-$(CONFIG_IP22_CPU_SCACHE) += sc-ip22.o
+obj-$(CONFIG_R5000_CPU_SCACHE) += sc-r5k.o
+obj-$(CONFIG_RM7000_CPU_SCACHE) += sc-rm7k.o
+
+#
+# Choose one DMA coherency model
+#
+ifndef CONFIG_OWN_DMA
+obj-$(CONFIG_DMA_COHERENT) += dma-coherent.o
+obj-$(CONFIG_DMA_NONCOHERENT) += dma-noncoherent.o
+endif
+obj-$(CONFIG_DMA_IP27) += dma-ip27.o
+obj-$(CONFIG_DMA_IP32) += dma-ip32.o
+
+EXTRA_AFLAGS := $(CFLAGS)
diff --git a/arch/mips/mm/c-r3k.c b/arch/mips/mm/c-r3k.c
new file mode 100644
index 00000000000..c659f99eb39
--- /dev/null
+++ b/arch/mips/mm/c-r3k.c
@@ -0,0 +1,349 @@
+/*
+ * r2300.c: R2000 and R3000 specific mmu/cache code.
+ *
+ * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
+ *
+ * with a lot of changes to make this thing work for R3000s
+ * Tx39XX R4k style caches added. HK
+ * Copyright (C) 1998, 1999, 2000 Harald Koerfgen
+ * Copyright (C) 1998 Gleb Raiko & Vladimir Roganov
+ * Copyright (C) 2001, 2004 Maciej W. Rozycki
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/mmu_context.h>
+#include <asm/system.h>
+#include <asm/isadep.h>
+#include <asm/io.h>
+#include <asm/bootinfo.h>
+#include <asm/cpu.h>
+
+static unsigned long icache_size, dcache_size; /* Size in bytes */
+static unsigned long icache_lsize, dcache_lsize; /* Size in bytes */
+
+#undef DEBUG_CACHE
+
+unsigned long __init r3k_cache_size(unsigned long ca_flags)
+{
+ unsigned long flags, status, dummy, size;
+ volatile unsigned long *p;
+
+ p = (volatile unsigned long *) KSEG0;
+
+ flags = read_c0_status();
+
+ /* isolate cache space */
+ write_c0_status((ca_flags|flags)&~ST0_IEC);
+
+ *p = 0xa5a55a5a;
+ dummy = *p;
+ status = read_c0_status();
+
+ if (dummy != 0xa5a55a5a || (status & ST0_CM)) {
+ size = 0;
+ } else {
+ for (size = 128; size <= 0x40000; size <<= 1)
+ *(p + size) = 0;
+ *p = -1;
+ for (size = 128;
+ (size <= 0x40000) && (*(p + size) == 0);
+ size <<= 1)
+ ;
+ if (size > 0x40000)
+ size = 0;
+ }
+
+ write_c0_status(flags);
+
+ return size * sizeof(*p);
+}
+
+unsigned long __init r3k_cache_lsize(unsigned long ca_flags)
+{
+ unsigned long flags, status, lsize, i;
+ volatile unsigned long *p;
+
+ p = (volatile unsigned long *) KSEG0;
+
+ flags = read_c0_status();
+
+ /* isolate cache space */
+ write_c0_status((ca_flags|flags)&~ST0_IEC);
+
+ for (i = 0; i < 128; i++)
+ *(p + i) = 0;
+ *(volatile unsigned char *)p = 0;
+ for (lsize = 1; lsize < 128; lsize <<= 1) {
+ *(p + lsize);
+ status = read_c0_status();
+ if (!(status & ST0_CM))
+ break;
+ }
+ for (i = 0; i < 128; i += lsize)
+ *(volatile unsigned char *)(p + i) = 0;
+
+ write_c0_status(flags);
+
+ return lsize * sizeof(*p);
+}
+
+static void __init r3k_probe_cache(void)
+{
+ dcache_size = r3k_cache_size(ST0_ISC);
+ if (dcache_size)
+ dcache_lsize = r3k_cache_lsize(ST0_ISC);
+
+ icache_size = r3k_cache_size(ST0_ISC|ST0_SWC);
+ if (icache_size)
+ icache_lsize = r3k_cache_lsize(ST0_ISC|ST0_SWC);
+}
+
+static void r3k_flush_icache_range(unsigned long start, unsigned long end)
+{
+ unsigned long size, i, flags;
+ volatile unsigned char *p;
+
+ size = end - start;
+ if (size > icache_size || KSEGX(start) != KSEG0) {
+ start = KSEG0;
+ size = icache_size;
+ }
+ p = (char *)start;
+
+ flags = read_c0_status();
+
+ /* isolate cache space */
+ write_c0_status((ST0_ISC|ST0_SWC|flags)&~ST0_IEC);
+
+ for (i = 0; i < size; i += 0x080) {
+ asm ( "sb\t$0, 0x000(%0)\n\t"
+ "sb\t$0, 0x004(%0)\n\t"
+ "sb\t$0, 0x008(%0)\n\t"
+ "sb\t$0, 0x00c(%0)\n\t"
+ "sb\t$0, 0x010(%0)\n\t"
+ "sb\t$0, 0x014(%0)\n\t"
+ "sb\t$0, 0x018(%0)\n\t"
+ "sb\t$0, 0x01c(%0)\n\t"
+ "sb\t$0, 0x020(%0)\n\t"
+ "sb\t$0, 0x024(%0)\n\t"
+ "sb\t$0, 0x028(%0)\n\t"
+ "sb\t$0, 0x02c(%0)\n\t"
+ "sb\t$0, 0x030(%0)\n\t"
+ "sb\t$0, 0x034(%0)\n\t"
+ "sb\t$0, 0x038(%0)\n\t"
+ "sb\t$0, 0x03c(%0)\n\t"
+ "sb\t$0, 0x040(%0)\n\t"
+ "sb\t$0, 0x044(%0)\n\t"
+ "sb\t$0, 0x048(%0)\n\t"
+ "sb\t$0, 0x04c(%0)\n\t"
+ "sb\t$0, 0x050(%0)\n\t"
+ "sb\t$0, 0x054(%0)\n\t"
+ "sb\t$0, 0x058(%0)\n\t"
+ "sb\t$0, 0x05c(%0)\n\t"
+ "sb\t$0, 0x060(%0)\n\t"
+ "sb\t$0, 0x064(%0)\n\t"
+ "sb\t$0, 0x068(%0)\n\t"
+ "sb\t$0, 0x06c(%0)\n\t"
+ "sb\t$0, 0x070(%0)\n\t"
+ "sb\t$0, 0x074(%0)\n\t"
+ "sb\t$0, 0x078(%0)\n\t"
+ "sb\t$0, 0x07c(%0)\n\t"
+ : : "r" (p) );
+ p += 0x080;
+ }
+
+ write_c0_status(flags);
+}
+
+static void r3k_flush_dcache_range(unsigned long start, unsigned long end)
+{
+ unsigned long size, i, flags;
+ volatile unsigned char *p;
+
+ size = end - start;
+ if (size > dcache_size || KSEGX(start) != KSEG0) {
+ start = KSEG0;
+ size = dcache_size;
+ }
+ p = (char *)start;
+
+ flags = read_c0_status();
+
+ /* isolate cache space */
+ write_c0_status((ST0_ISC|flags)&~ST0_IEC);
+
+ for (i = 0; i < size; i += 0x080) {
+ asm ( "sb\t$0, 0x000(%0)\n\t"
+ "sb\t$0, 0x004(%0)\n\t"
+ "sb\t$0, 0x008(%0)\n\t"
+ "sb\t$0, 0x00c(%0)\n\t"
+ "sb\t$0, 0x010(%0)\n\t"
+ "sb\t$0, 0x014(%0)\n\t"
+ "sb\t$0, 0x018(%0)\n\t"
+ "sb\t$0, 0x01c(%0)\n\t"
+ "sb\t$0, 0x020(%0)\n\t"
+ "sb\t$0, 0x024(%0)\n\t"
+ "sb\t$0, 0x028(%0)\n\t"
+ "sb\t$0, 0x02c(%0)\n\t"
+ "sb\t$0, 0x030(%0)\n\t"
+ "sb\t$0, 0x034(%0)\n\t"
+ "sb\t$0, 0x038(%0)\n\t"
+ "sb\t$0, 0x03c(%0)\n\t"
+ "sb\t$0, 0x040(%0)\n\t"
+ "sb\t$0, 0x044(%0)\n\t"
+ "sb\t$0, 0x048(%0)\n\t"
+ "sb\t$0, 0x04c(%0)\n\t"
+ "sb\t$0, 0x050(%0)\n\t"
+ "sb\t$0, 0x054(%0)\n\t"
+ "sb\t$0, 0x058(%0)\n\t"
+ "sb\t$0, 0x05c(%0)\n\t"
+ "sb\t$0, 0x060(%0)\n\t"
+ "sb\t$0, 0x064(%0)\n\t"
+ "sb\t$0, 0x068(%0)\n\t"
+ "sb\t$0, 0x06c(%0)\n\t"
+ "sb\t$0, 0x070(%0)\n\t"
+ "sb\t$0, 0x074(%0)\n\t"
+ "sb\t$0, 0x078(%0)\n\t"
+ "sb\t$0, 0x07c(%0)\n\t"
+ : : "r" (p) );
+ p += 0x080;
+ }
+
+ write_c0_status(flags);
+}
+
+static inline unsigned long get_phys_page (unsigned long addr,
+ struct mm_struct *mm)
+{
+ pgd_t *pgd;
+ pmd_t *pmd;
+ pte_t *pte;
+ unsigned long physpage;
+
+ pgd = pgd_offset(mm, addr);
+ pmd = pmd_offset(pgd, addr);
+ pte = pte_offset(pmd, addr);
+
+ if ((physpage = pte_val(*pte)) & _PAGE_VALID)
+ return KSEG0ADDR(physpage & PAGE_MASK);
+
+ return 0;
+}
+
+static inline void r3k_flush_cache_all(void)
+{
+}
+
+static inline void r3k___flush_cache_all(void)
+{
+ r3k_flush_dcache_range(KSEG0, KSEG0 + dcache_size);
+ r3k_flush_icache_range(KSEG0, KSEG0 + icache_size);
+}
+
+static void r3k_flush_cache_mm(struct mm_struct *mm)
+{
+}
+
+static void r3k_flush_cache_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+}
+
+static void r3k_flush_cache_page(struct vm_area_struct *vma, unsigned long page, unsigned long pfn)
+{
+}
+
+static void r3k_flush_data_cache_page(unsigned long addr)
+{
+}
+
+static void r3k_flush_icache_page(struct vm_area_struct *vma, struct page *page)
+{
+ struct mm_struct *mm = vma->vm_mm;
+ unsigned long physpage;
+
+ if (cpu_context(smp_processor_id(), mm) == 0)
+ return;
+
+ if (!(vma->vm_flags & VM_EXEC))
+ return;
+
+#ifdef DEBUG_CACHE
+ printk("cpage[%d,%08lx]", cpu_context(smp_processor_id(), mm), page);
+#endif
+
+ physpage = (unsigned long) page_address(page);
+ if (physpage)
+ r3k_flush_icache_range(physpage, physpage + PAGE_SIZE);
+}
+
+static void r3k_flush_cache_sigtramp(unsigned long addr)
+{
+ unsigned long flags;
+
+#ifdef DEBUG_CACHE
+ printk("csigtramp[%08lx]", addr);
+#endif
+
+ flags = read_c0_status();
+
+ write_c0_status(flags&~ST0_IEC);
+
+ /* Fill the TLB to avoid an exception with caches isolated. */
+ asm ( "lw\t$0, 0x000(%0)\n\t"
+ "lw\t$0, 0x004(%0)\n\t"
+ : : "r" (addr) );
+
+ write_c0_status((ST0_ISC|ST0_SWC|flags)&~ST0_IEC);
+
+ asm ( "sb\t$0, 0x000(%0)\n\t"
+ "sb\t$0, 0x004(%0)\n\t"
+ : : "r" (addr) );
+
+ write_c0_status(flags);
+}
+
+static void r3k_dma_cache_wback_inv(unsigned long start, unsigned long size)
+{
+ /* Catch bad driver code */
+ BUG_ON(size == 0);
+
+ iob();
+ r3k_flush_dcache_range(start, start + size);
+}
+
+void __init ld_mmu_r23000(void)
+{
+ extern void build_clear_page(void);
+ extern void build_copy_page(void);
+
+ r3k_probe_cache();
+
+ flush_cache_all = r3k_flush_cache_all;
+ __flush_cache_all = r3k___flush_cache_all;
+ flush_cache_mm = r3k_flush_cache_mm;
+ flush_cache_range = r3k_flush_cache_range;
+ flush_cache_page = r3k_flush_cache_page;
+ flush_icache_page = r3k_flush_icache_page;
+ flush_icache_range = r3k_flush_icache_range;
+
+ flush_cache_sigtramp = r3k_flush_cache_sigtramp;
+ flush_data_cache_page = r3k_flush_data_cache_page;
+
+ _dma_cache_wback_inv = r3k_dma_cache_wback_inv;
+ _dma_cache_wback = r3k_dma_cache_wback_inv;
+ _dma_cache_inv = r3k_dma_cache_wback_inv;
+
+ printk("Primary instruction cache %ldkB, linesize %ld bytes.\n",
+ icache_size >> 10, icache_lsize);
+ printk("Primary data cache %ldkB, linesize %ld bytes.\n",
+ dcache_size >> 10, dcache_lsize);
+
+ build_clear_page();
+ build_copy_page();
+}
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
new file mode 100644
index 00000000000..a03ebb2cba6
--- /dev/null
+++ b/arch/mips/mm/c-r4k.c
@@ -0,0 +1,1260 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
+ * Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002 Ralf Baechle (ralf@gnu.org)
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ */
+#include <linux/config.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/bitops.h>
+
+#include <asm/bcache.h>
+#include <asm/bootinfo.h>
+#include <asm/cacheops.h>
+#include <asm/cpu.h>
+#include <asm/cpu-features.h>
+#include <asm/io.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/r4kcache.h>
+#include <asm/system.h>
+#include <asm/mmu_context.h>
+#include <asm/war.h>
+
+static unsigned long icache_size, dcache_size, scache_size;
+
+/*
+ * Dummy cache handling routines for machines without boardcaches
+ */
+static void no_sc_noop(void) {}
+
+static struct bcache_ops no_sc_ops = {
+ .bc_enable = (void *)no_sc_noop,
+ .bc_disable = (void *)no_sc_noop,
+ .bc_wback_inv = (void *)no_sc_noop,
+ .bc_inv = (void *)no_sc_noop
+};
+
+struct bcache_ops *bcops = &no_sc_ops;
+
+#define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x2010)
+#define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x2020)
+
+#define R4600_HIT_CACHEOP_WAR_IMPL \
+do { \
+ if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x()) \
+ *(volatile unsigned long *)CKSEG1; \
+ if (R4600_V1_HIT_CACHEOP_WAR) \
+ __asm__ __volatile__("nop;nop;nop;nop"); \
+} while (0)
+
+static void (*r4k_blast_dcache_page)(unsigned long addr);
+
+static inline void r4k_blast_dcache_page_dc32(unsigned long addr)
+{
+ R4600_HIT_CACHEOP_WAR_IMPL;
+ blast_dcache32_page(addr);
+}
+
+static inline void r4k_blast_dcache_page_setup(void)
+{
+ unsigned long dc_lsize = cpu_dcache_line_size();
+
+ if (dc_lsize == 16)
+ r4k_blast_dcache_page = blast_dcache16_page;
+ else if (dc_lsize == 32)
+ r4k_blast_dcache_page = r4k_blast_dcache_page_dc32;
+}
+
+static void (* r4k_blast_dcache_page_indexed)(unsigned long addr);
+
+static inline void r4k_blast_dcache_page_indexed_setup(void)
+{
+ unsigned long dc_lsize = cpu_dcache_line_size();
+
+ if (dc_lsize == 16)
+ r4k_blast_dcache_page_indexed = blast_dcache16_page_indexed;
+ else if (dc_lsize == 32)
+ r4k_blast_dcache_page_indexed = blast_dcache32_page_indexed;
+}
+
+static void (* r4k_blast_dcache)(void);
+
+static inline void r4k_blast_dcache_setup(void)
+{
+ unsigned long dc_lsize = cpu_dcache_line_size();
+
+ if (dc_lsize == 16)
+ r4k_blast_dcache = blast_dcache16;
+ else if (dc_lsize == 32)
+ r4k_blast_dcache = blast_dcache32;
+}
+
+/* force code alignment (used for TX49XX_ICACHE_INDEX_INV_WAR) */
+#define JUMP_TO_ALIGN(order) \
+ __asm__ __volatile__( \
+ "b\t1f\n\t" \
+ ".align\t" #order "\n\t" \
+ "1:\n\t" \
+ )
+#define CACHE32_UNROLL32_ALIGN JUMP_TO_ALIGN(10) /* 32 * 32 = 1024 */
+#define CACHE32_UNROLL32_ALIGN2 JUMP_TO_ALIGN(11)
+
+static inline void blast_r4600_v1_icache32(void)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ blast_icache32();
+ local_irq_restore(flags);
+}
+
+static inline void tx49_blast_icache32(void)
+{
+ unsigned long start = INDEX_BASE;
+ unsigned long end = start + current_cpu_data.icache.waysize;
+ unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
+ unsigned long ws_end = current_cpu_data.icache.ways <<
+ current_cpu_data.icache.waybit;
+ unsigned long ws, addr;
+
+ CACHE32_UNROLL32_ALIGN2;
+ /* I'm in even chunk. blast odd chunks */
+ for (ws = 0; ws < ws_end; ws += ws_inc)
+ for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
+ cache32_unroll32(addr|ws,Index_Invalidate_I);
+ CACHE32_UNROLL32_ALIGN;
+ /* I'm in odd chunk. blast even chunks */
+ for (ws = 0; ws < ws_end; ws += ws_inc)
+ for (addr = start; addr < end; addr += 0x400 * 2)
+ cache32_unroll32(addr|ws,Index_Invalidate_I);
+}
+
+static inline void blast_icache32_r4600_v1_page_indexed(unsigned long page)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ blast_icache32_page_indexed(page);
+ local_irq_restore(flags);
+}
+
+static inline void tx49_blast_icache32_page_indexed(unsigned long page)
+{
+ unsigned long start = page;
+ unsigned long end = start + PAGE_SIZE;
+ unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
+ unsigned long ws_end = current_cpu_data.icache.ways <<
+ current_cpu_data.icache.waybit;
+ unsigned long ws, addr;
+
+ CACHE32_UNROLL32_ALIGN2;
+ /* I'm in even chunk. blast odd chunks */
+ for (ws = 0; ws < ws_end; ws += ws_inc)
+ for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
+ cache32_unroll32(addr|ws,Index_Invalidate_I);
+ CACHE32_UNROLL32_ALIGN;
+ /* I'm in odd chunk. blast even chunks */
+ for (ws = 0; ws < ws_end; ws += ws_inc)
+ for (addr = start; addr < end; addr += 0x400 * 2)
+ cache32_unroll32(addr|ws,Index_Invalidate_I);
+}
+
+static void (* r4k_blast_icache_page)(unsigned long addr);
+
+static inline void r4k_blast_icache_page_setup(void)
+{
+ unsigned long ic_lsize = cpu_icache_line_size();
+
+ if (ic_lsize == 16)
+ r4k_blast_icache_page = blast_icache16_page;
+ else if (ic_lsize == 32)
+ r4k_blast_icache_page = blast_icache32_page;
+ else if (ic_lsize == 64)
+ r4k_blast_icache_page = blast_icache64_page;
+}
+
+
+static void (* r4k_blast_icache_page_indexed)(unsigned long addr);
+
+static inline void r4k_blast_icache_page_indexed_setup(void)
+{
+ unsigned long ic_lsize = cpu_icache_line_size();
+
+ if (ic_lsize == 16)
+ r4k_blast_icache_page_indexed = blast_icache16_page_indexed;
+ else if (ic_lsize == 32) {
+ if (TX49XX_ICACHE_INDEX_INV_WAR)
+ r4k_blast_icache_page_indexed =
+ tx49_blast_icache32_page_indexed;
+ else if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
+ r4k_blast_icache_page_indexed =
+ blast_icache32_r4600_v1_page_indexed;
+ else
+ r4k_blast_icache_page_indexed =
+ blast_icache32_page_indexed;
+ } else if (ic_lsize == 64)
+ r4k_blast_icache_page_indexed = blast_icache64_page_indexed;
+}
+
+static void (* r4k_blast_icache)(void);
+
+static inline void r4k_blast_icache_setup(void)
+{
+ unsigned long ic_lsize = cpu_icache_line_size();
+
+ if (ic_lsize == 16)
+ r4k_blast_icache = blast_icache16;
+ else if (ic_lsize == 32) {
+ if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
+ r4k_blast_icache = blast_r4600_v1_icache32;
+ else if (TX49XX_ICACHE_INDEX_INV_WAR)
+ r4k_blast_icache = tx49_blast_icache32;
+ else
+ r4k_blast_icache = blast_icache32;
+ } else if (ic_lsize == 64)
+ r4k_blast_icache = blast_icache64;
+}
+
+static void (* r4k_blast_scache_page)(unsigned long addr);
+
+static inline void r4k_blast_scache_page_setup(void)
+{
+ unsigned long sc_lsize = cpu_scache_line_size();
+
+ if (sc_lsize == 16)
+ r4k_blast_scache_page = blast_scache16_page;
+ else if (sc_lsize == 32)
+ r4k_blast_scache_page = blast_scache32_page;
+ else if (sc_lsize == 64)
+ r4k_blast_scache_page = blast_scache64_page;
+ else if (sc_lsize == 128)
+ r4k_blast_scache_page = blast_scache128_page;
+}
+
+static void (* r4k_blast_scache_page_indexed)(unsigned long addr);
+
+static inline void r4k_blast_scache_page_indexed_setup(void)
+{
+ unsigned long sc_lsize = cpu_scache_line_size();
+
+ if (sc_lsize == 16)
+ r4k_blast_scache_page_indexed = blast_scache16_page_indexed;
+ else if (sc_lsize == 32)
+ r4k_blast_scache_page_indexed = blast_scache32_page_indexed;
+ else if (sc_lsize == 64)
+ r4k_blast_scache_page_indexed = blast_scache64_page_indexed;
+ else if (sc_lsize == 128)
+ r4k_blast_scache_page_indexed = blast_scache128_page_indexed;
+}
+
+static void (* r4k_blast_scache)(void);
+
+static inline void r4k_blast_scache_setup(void)
+{
+ unsigned long sc_lsize = cpu_scache_line_size();
+
+ if (sc_lsize == 16)
+ r4k_blast_scache = blast_scache16;
+ else if (sc_lsize == 32)
+ r4k_blast_scache = blast_scache32;
+ else if (sc_lsize == 64)
+ r4k_blast_scache = blast_scache64;
+ else if (sc_lsize == 128)
+ r4k_blast_scache = blast_scache128;
+}
+
+/*
+ * This is former mm's flush_cache_all() which really should be
+ * flush_cache_vunmap these days ...
+ */
+static inline void local_r4k_flush_cache_all(void * args)
+{
+ r4k_blast_dcache();
+ r4k_blast_icache();
+}
+
+static void r4k_flush_cache_all(void)
+{
+ if (!cpu_has_dc_aliases)
+ return;
+
+ on_each_cpu(local_r4k_flush_cache_all, NULL, 1, 1);
+}
+
+static inline void local_r4k___flush_cache_all(void * args)
+{
+ r4k_blast_dcache();
+ r4k_blast_icache();
+
+ switch (current_cpu_data.cputype) {
+ case CPU_R4000SC:
+ case CPU_R4000MC:
+ case CPU_R4400SC:
+ case CPU_R4400MC:
+ case CPU_R10000:
+ case CPU_R12000:
+ r4k_blast_scache();
+ }
+}
+
+static void r4k___flush_cache_all(void)
+{
+ on_each_cpu(local_r4k___flush_cache_all, NULL, 1, 1);
+}
+
+static inline void local_r4k_flush_cache_range(void * args)
+{
+ struct vm_area_struct *vma = args;
+ int exec;
+
+ if (!(cpu_context(smp_processor_id(), vma->vm_mm)))
+ return;
+
+ exec = vma->vm_flags & VM_EXEC;
+ if (cpu_has_dc_aliases || exec)
+ r4k_blast_dcache();
+ if (exec)
+ r4k_blast_icache();
+}
+
+static void r4k_flush_cache_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+ on_each_cpu(local_r4k_flush_cache_range, vma, 1, 1);
+}
+
+static inline void local_r4k_flush_cache_mm(void * args)
+{
+ struct mm_struct *mm = args;
+
+ if (!cpu_context(smp_processor_id(), mm))
+ return;
+
+ r4k_blast_dcache();
+ r4k_blast_icache();
+
+ /*
+ * Kludge alert. For obscure reasons R4000SC and R4400SC go nuts if we
+ * only flush the primary caches but R10000 and R12000 behave sane ...
+ */
+ if (current_cpu_data.cputype == CPU_R4000SC ||
+ current_cpu_data.cputype == CPU_R4000MC ||
+ current_cpu_data.cputype == CPU_R4400SC ||
+ current_cpu_data.cputype == CPU_R4400MC)
+ r4k_blast_scache();
+}
+
+static void r4k_flush_cache_mm(struct mm_struct *mm)
+{
+ if (!cpu_has_dc_aliases)
+ return;
+
+ on_each_cpu(local_r4k_flush_cache_mm, mm, 1, 1);
+}
+
+struct flush_cache_page_args {
+ struct vm_area_struct *vma;
+ unsigned long page;
+};
+
+static inline void local_r4k_flush_cache_page(void *args)
+{
+ struct flush_cache_page_args *fcp_args = args;
+ struct vm_area_struct *vma = fcp_args->vma;
+ unsigned long page = fcp_args->page;
+ int exec = vma->vm_flags & VM_EXEC;
+ struct mm_struct *mm = vma->vm_mm;
+ pgd_t *pgdp;
+ pmd_t *pmdp;
+ pte_t *ptep;
+
+ page &= PAGE_MASK;
+ pgdp = pgd_offset(mm, page);
+ pmdp = pmd_offset(pgdp, page);
+ ptep = pte_offset(pmdp, page);
+
+ /*
+ * If the page isn't marked valid, the page cannot possibly be
+ * in the cache.
+ */
+ if (!(pte_val(*ptep) & _PAGE_PRESENT))
+ return;
+
+ /*
+ * Doing flushes for another ASID than the current one is
+ * too difficult since stupid R4k caches do a TLB translation
+ * for every cache flush operation. So we do indexed flushes
+ * in that case, which doesn't overly flush the cache too much.
+ */
+ if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID)) {
+ if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) {
+ r4k_blast_dcache_page(page);
+ if (exec && !cpu_icache_snoops_remote_store)
+ r4k_blast_scache_page(page);
+ }
+ if (exec)
+ r4k_blast_icache_page(page);
+
+ return;
+ }
+
+ /*
+ * Do indexed flush, too much work to get the (possible) TLB refills
+ * to work correctly.
+ */
+ page = INDEX_BASE + (page & (dcache_size - 1));
+ if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) {
+ r4k_blast_dcache_page_indexed(page);
+ if (exec && !cpu_icache_snoops_remote_store)
+ r4k_blast_scache_page_indexed(page);
+ }
+ if (exec) {
+ if (cpu_has_vtag_icache) {
+ int cpu = smp_processor_id();
+
+ if (cpu_context(cpu, vma->vm_mm) != 0)
+ drop_mmu_context(vma->vm_mm, cpu);
+ } else
+ r4k_blast_icache_page_indexed(page);
+ }
+}
+
+static void r4k_flush_cache_page(struct vm_area_struct *vma, unsigned long page, unsigned long pfn)
+{
+ struct flush_cache_page_args args;
+
+ /*
+ * If ownes no valid ASID yet, cannot possibly have gotten
+ * this page into the cache.
+ */
+ if (cpu_context(smp_processor_id(), vma->vm_mm) == 0)
+ return;
+
+ args.vma = vma;
+ args.page = page;
+
+ on_each_cpu(local_r4k_flush_cache_page, &args, 1, 1);
+}
+
+static inline void local_r4k_flush_data_cache_page(void * addr)
+{
+ r4k_blast_dcache_page((unsigned long) addr);
+}
+
+static void r4k_flush_data_cache_page(unsigned long addr)
+{
+ on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr, 1, 1);
+}
+
+struct flush_icache_range_args {
+ unsigned long start;
+ unsigned long end;
+};
+
+static inline void local_r4k_flush_icache_range(void *args)
+{
+ struct flush_icache_range_args *fir_args = args;
+ unsigned long dc_lsize = current_cpu_data.dcache.linesz;
+ unsigned long ic_lsize = current_cpu_data.icache.linesz;
+ unsigned long sc_lsize = current_cpu_data.scache.linesz;
+ unsigned long start = fir_args->start;
+ unsigned long end = fir_args->end;
+ unsigned long addr, aend;
+
+ if (!cpu_has_ic_fills_f_dc) {
+ if (end - start > dcache_size) {
+ r4k_blast_dcache();
+ } else {
+ addr = start & ~(dc_lsize - 1);
+ aend = (end - 1) & ~(dc_lsize - 1);
+
+ while (1) {
+ /* Hit_Writeback_Inv_D */
+ protected_writeback_dcache_line(addr);
+ if (addr == aend)
+ break;
+ addr += dc_lsize;
+ }
+ }
+
+ if (!cpu_icache_snoops_remote_store) {
+ if (end - start > scache_size) {
+ r4k_blast_scache();
+ } else {
+ addr = start & ~(sc_lsize - 1);
+ aend = (end - 1) & ~(sc_lsize - 1);
+
+ while (1) {
+ /* Hit_Writeback_Inv_D */
+ protected_writeback_scache_line(addr);
+ if (addr == aend)
+ break;
+ addr += sc_lsize;
+ }
+ }
+ }
+ }
+
+ if (end - start > icache_size)
+ r4k_blast_icache();
+ else {
+ addr = start & ~(ic_lsize - 1);
+ aend = (end - 1) & ~(ic_lsize - 1);
+ while (1) {
+ /* H