aboutsummaryrefslogtreecommitdiff
path: root/arch/sh/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sh/mm')
-rw-r--r--arch/sh/mm/Kconfig29
-rw-r--r--arch/sh/mm/Makefile_3211
-rw-r--r--arch/sh/mm/cache-debugfs.c8
-rw-r--r--arch/sh/mm/cache-sh2.c45
-rw-r--r--arch/sh/mm/cache-sh2a.c129
-rw-r--r--arch/sh/mm/cache-sh4.c69
-rw-r--r--arch/sh/mm/consistent.c167
-rw-r--r--arch/sh/mm/fault_32.c75
-rw-r--r--arch/sh/mm/fault_64.c2
-rw-r--r--arch/sh/mm/hugetlbpage.c8
-rw-r--r--arch/sh/mm/init.c100
-rw-r--r--arch/sh/mm/numa.c5
-rw-r--r--arch/sh/mm/pg-nommu.c1
-rw-r--r--arch/sh/mm/pg-sh4.c2
-rw-r--r--arch/sh/mm/pg-sh7705.c2
-rw-r--r--arch/sh/mm/pmb.c6
-rw-r--r--arch/sh/mm/tlb-nommu.c1
-rw-r--r--arch/sh/mm/tlb-sh5.c20
18 files changed, 424 insertions, 256 deletions
diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig
index 5fd218430b1..555ec9714b9 100644
--- a/arch/sh/mm/Kconfig
+++ b/arch/sh/mm/Kconfig
@@ -132,7 +132,11 @@ config ARCH_SELECT_MEMORY_MODEL
config ARCH_ENABLE_MEMORY_HOTPLUG
def_bool y
- depends on SPARSEMEM
+ depends on SPARSEMEM && MMU
+
+config ARCH_ENABLE_MEMORY_HOTREMOVE
+ def_bool y
+ depends on SPARSEMEM && MMU
config ARCH_MEMORY_PROBE
def_bool y
@@ -145,32 +149,48 @@ choice
config PAGE_SIZE_4KB
bool "4kB"
- depends on !X2TLB
+ depends on !MMU || !X2TLB
help
This is the default page size used by all SuperH CPUs.
config PAGE_SIZE_8KB
bool "8kB"
- depends on X2TLB
+ depends on !MMU || X2TLB
help
This enables 8kB pages as supported by SH-X2 and later MMUs.
+config PAGE_SIZE_16KB
+ bool "16kB"
+ depends on !MMU
+ help
+ This enables 16kB pages on MMU-less SH systems.
+
config PAGE_SIZE_64KB
bool "64kB"
- depends on CPU_SH4 || CPU_SH5
+ depends on !MMU || CPU_SH4 || CPU_SH5
help
This enables support for 64kB pages, possible on all SH-4
CPUs and later.
endchoice
+config ENTRY_OFFSET
+ hex
+ default "0x00001000" if PAGE_SIZE_4KB
+ default "0x00002000" if PAGE_SIZE_8KB
+ default "0x00004000" if PAGE_SIZE_16KB
+ default "0x00010000" if PAGE_SIZE_64KB
+ default "0x00000000"
+
choice
prompt "HugeTLB page size"
depends on HUGETLB_PAGE && (CPU_SH4 || CPU_SH5) && MMU
+ default HUGETLB_PAGE_SIZE_1MB if PAGE_SIZE_64KB
default HUGETLB_PAGE_SIZE_64K
config HUGETLB_PAGE_SIZE_64K
bool "64kB"
+ depends on !PAGE_SIZE_64KB
config HUGETLB_PAGE_SIZE_256K
bool "256kB"
@@ -223,7 +243,6 @@ choice
config CACHE_WRITEBACK
bool "Write-back"
- depends on CPU_SH2A || CPU_SH3 || CPU_SH4 || CPU_SH5
config CACHE_WRITETHROUGH
bool "Write-through"
diff --git a/arch/sh/mm/Makefile_32 b/arch/sh/mm/Makefile_32
index e295db60b91..70e0906023c 100644
--- a/arch/sh/mm/Makefile_32
+++ b/arch/sh/mm/Makefile_32
@@ -5,12 +5,15 @@
obj-y := init.o extable_32.o consistent.o
ifndef CONFIG_CACHE_OFF
-obj-$(CONFIG_CPU_SH2) += cache-sh2.o
-obj-$(CONFIG_CPU_SH3) += cache-sh3.o
-obj-$(CONFIG_CPU_SH4) += cache-sh4.o
-obj-$(CONFIG_SH7705_CACHE_32KB) += cache-sh7705.o
+cache-$(CONFIG_CPU_SH2) := cache-sh2.o
+cache-$(CONFIG_CPU_SH2A) := cache-sh2a.o
+cache-$(CONFIG_CPU_SH3) := cache-sh3.o
+cache-$(CONFIG_CPU_SH4) := cache-sh4.o
+cache-$(CONFIG_SH7705_CACHE_32KB) += cache-sh7705.o
endif
+obj-y += $(cache-y)
+
mmu-y := tlb-nommu.o pg-nommu.o
mmu-$(CONFIG_MMU) := fault_32.o tlbflush_32.o ioremap_32.o
diff --git a/arch/sh/mm/cache-debugfs.c b/arch/sh/mm/cache-debugfs.c
index c5b56d52b7d..5ba067b2659 100644
--- a/arch/sh/mm/cache-debugfs.c
+++ b/arch/sh/mm/cache-debugfs.c
@@ -120,7 +120,7 @@ static const struct file_operations cache_debugfs_fops = {
.open = cache_debugfs_open,
.read = seq_read,
.llseek = seq_lseek,
- .release = seq_release,
+ .release = single_release,
};
static int __init cache_debugfs_init(void)
@@ -130,12 +130,18 @@ static int __init cache_debugfs_init(void)
dcache_dentry = debugfs_create_file("dcache", S_IRUSR, sh_debugfs_root,
(unsigned int *)CACHE_TYPE_DCACHE,
&cache_debugfs_fops);
+ if (!dcache_dentry)
+ return -ENOMEM;
if (IS_ERR(dcache_dentry))
return PTR_ERR(dcache_dentry);
icache_dentry = debugfs_create_file("icache", S_IRUSR, sh_debugfs_root,
(unsigned int *)CACHE_TYPE_ICACHE,
&cache_debugfs_fops);
+ if (!icache_dentry) {
+ debugfs_remove(dcache_dentry);
+ return -ENOMEM;
+ }
if (IS_ERR(icache_dentry)) {
debugfs_remove(dcache_dentry);
return PTR_ERR(icache_dentry);
diff --git a/arch/sh/mm/cache-sh2.c b/arch/sh/mm/cache-sh2.c
index 6614033f6be..c4e80d2b764 100644
--- a/arch/sh/mm/cache-sh2.c
+++ b/arch/sh/mm/cache-sh2.c
@@ -2,6 +2,7 @@
* arch/sh/mm/cache-sh2.c
*
* Copyright (C) 2002 Paul Mundt
+ * Copyright (C) 2008 Yoshinori Sato
*
* Released under the terms of the GNU GPL v2.0.
*/
@@ -24,8 +25,15 @@ void __flush_wback_region(void *start, int size)
end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
& ~(L1_CACHE_BYTES-1);
for (v = begin; v < end; v+=L1_CACHE_BYTES) {
- /* FIXME cache purge */
- ctrl_outl((v & 0x1ffffc00), (v & 0x00000ff0) | 0x00000008);
+ unsigned long addr = CACHE_OC_ADDRESS_ARRAY | (v & 0x00000ff0);
+ int way;
+ for (way = 0; way < 4; way++) {
+ unsigned long data = ctrl_inl(addr | (way << 12));
+ if ((data & CACHE_PHYSADDR_MASK) == (v & CACHE_PHYSADDR_MASK)) {
+ data &= ~SH_CACHE_UPDATED;
+ ctrl_outl(data, addr | (way << 12));
+ }
+ }
}
}
@@ -37,21 +45,40 @@ void __flush_purge_region(void *start, int size)
begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
& ~(L1_CACHE_BYTES-1);
- for (v = begin; v < end; v+=L1_CACHE_BYTES) {
- ctrl_outl((v & 0x1ffffc00), (v & 0x00000ff0) | 0x00000008);
- }
+
+ for (v = begin; v < end; v+=L1_CACHE_BYTES)
+ ctrl_outl((v & CACHE_PHYSADDR_MASK),
+ CACHE_OC_ADDRESS_ARRAY | (v & 0x00000ff0) | 0x00000008);
}
void __flush_invalidate_region(void *start, int size)
{
+#ifdef CONFIG_CACHE_WRITEBACK
+ /*
+ * SH-2 does not support individual line invalidation, only a
+ * global invalidate.
+ */
+ unsigned long ccr;
+ unsigned long flags;
+ local_irq_save(flags);
+ jump_to_uncached();
+
+ ccr = ctrl_inl(CCR);
+ ccr |= CCR_CACHE_INVALIDATE;
+ ctrl_outl(ccr, CCR);
+
+ back_to_cached();
+ local_irq_restore(flags);
+#else
unsigned long v;
unsigned long begin, end;
begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
& ~(L1_CACHE_BYTES-1);
- for (v = begin; v < end; v+=L1_CACHE_BYTES) {
- ctrl_outl((v & 0x1ffffc00), (v & 0x00000ff0) | 0x00000008);
- }
-}
+ for (v = begin; v < end; v+=L1_CACHE_BYTES)
+ ctrl_outl((v & CACHE_PHYSADDR_MASK),
+ CACHE_OC_ADDRESS_ARRAY | (v & 0x00000ff0) | 0x00000008);
+#endif
+}
diff --git a/arch/sh/mm/cache-sh2a.c b/arch/sh/mm/cache-sh2a.c
new file mode 100644
index 00000000000..62c0c5f3512
--- /dev/null
+++ b/arch/sh/mm/cache-sh2a.c
@@ -0,0 +1,129 @@
+/*
+ * arch/sh/mm/cache-sh2a.c
+ *
+ * Copyright (C) 2008 Yoshinori Sato
+ *
+ * Released under the terms of the GNU GPL v2.0.
+ */
+
+#include <linux/init.h>
+#include <linux/mm.h>
+
+#include <asm/cache.h>
+#include <asm/addrspace.h>
+#include <asm/processor.h>
+#include <asm/cacheflush.h>
+#include <asm/io.h>
+
+void __flush_wback_region(void *start, int size)
+{
+ unsigned long v;
+ unsigned long begin, end;
+ unsigned long flags;
+
+ begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
+ end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
+ & ~(L1_CACHE_BYTES-1);
+
+ local_irq_save(flags);
+ jump_to_uncached();
+
+ for (v = begin; v < end; v+=L1_CACHE_BYTES) {
+ unsigned long addr = CACHE_OC_ADDRESS_ARRAY | (v & 0x000007f0);
+ int way;
+ for (way = 0; way < 4; way++) {
+ unsigned long data = ctrl_inl(addr | (way << 11));
+ if ((data & CACHE_PHYSADDR_MASK) == (v & CACHE_PHYSADDR_MASK)) {
+ data &= ~SH_CACHE_UPDATED;
+ ctrl_outl(data, addr | (way << 11));
+ }
+ }
+ }
+
+ back_to_cached();
+ local_irq_restore(flags);
+}
+
+void __flush_purge_region(void *start, int size)
+{
+ unsigned long v;
+ unsigned long begin, end;
+ unsigned long flags;
+
+ begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
+ end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
+ & ~(L1_CACHE_BYTES-1);
+
+ local_irq_save(flags);
+ jump_to_uncached();
+
+ for (v = begin; v < end; v+=L1_CACHE_BYTES) {
+ ctrl_outl((v & CACHE_PHYSADDR_MASK),
+ CACHE_OC_ADDRESS_ARRAY | (v & 0x000003f0) | 0x00000008);
+ }
+ back_to_cached();
+ local_irq_restore(flags);
+}
+
+void __flush_invalidate_region(void *start, int size)
+{
+ unsigned long v;
+ unsigned long begin, end;
+ unsigned long flags;
+
+ begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
+ end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
+ & ~(L1_CACHE_BYTES-1);
+ local_irq_save(flags);
+ jump_to_uncached();
+
+#ifdef CONFIG_CACHE_WRITEBACK
+ ctrl_outl(ctrl_inl(CCR) | CCR_OCACHE_INVALIDATE, CCR);
+ /* I-cache invalidate */
+ for (v = begin; v < end; v+=L1_CACHE_BYTES) {
+ ctrl_outl((v & CACHE_PHYSADDR_MASK),
+ CACHE_IC_ADDRESS_ARRAY | (v & 0x000003f0) | 0x00000008);
+ }
+#else
+ for (v = begin; v < end; v+=L1_CACHE_BYTES) {
+ ctrl_outl((v & CACHE_PHYSADDR_MASK),
+ CACHE_IC_ADDRESS_ARRAY | (v & 0x000003f0) | 0x00000008);
+ ctrl_outl((v & CACHE_PHYSADDR_MASK),
+ CACHE_OC_ADDRESS_ARRAY | (v & 0x000003f0) | 0x00000008);
+ }
+#endif
+ back_to_cached();
+ local_irq_restore(flags);
+}
+
+/* WBack O-Cache and flush I-Cache */
+void flush_icache_range(unsigned long start, unsigned long end)
+{
+ unsigned long v;
+ unsigned long flags;
+
+ start = start & ~(L1_CACHE_BYTES-1);
+ end = (end + L1_CACHE_BYTES-1) & ~(L1_CACHE_BYTES-1);
+
+ local_irq_save(flags);
+ jump_to_uncached();
+
+ for (v = start; v < end; v+=L1_CACHE_BYTES) {
+ unsigned long addr = (v & 0x000007f0);
+ int way;
+ /* O-Cache writeback */
+ for (way = 0; way < 4; way++) {
+ unsigned long data = ctrl_inl(CACHE_OC_ADDRESS_ARRAY | addr | (way << 11));
+ if ((data & CACHE_PHYSADDR_MASK) == (v & CACHE_PHYSADDR_MASK)) {
+ data &= ~SH_CACHE_UPDATED;
+ ctrl_outl(data, CACHE_OC_ADDRESS_ARRAY | addr | (way << 11));
+ }
+ }
+ /* I-Cache invalidate */
+ ctrl_outl(addr,
+ CACHE_IC_ADDRESS_ARRAY | addr | 0x00000008);
+ }
+
+ back_to_cached();
+ local_irq_restore(flags);
+}
diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c
index 43d7ff6b6ec..5cfe08dbb59 100644
--- a/arch/sh/mm/cache-sh4.c
+++ b/arch/sh/mm/cache-sh4.c
@@ -4,6 +4,7 @@
* Copyright (C) 1999, 2000, 2002 Niibe Yutaka
* Copyright (C) 2001 - 2007 Paul Mundt
* Copyright (C) 2003 Richard Curnow
+ * Copyright (c) 2007 STMicroelectronics (R&D) Ltd.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
@@ -22,6 +23,7 @@
* entirety.
*/
#define MAX_DCACHE_PAGES 64 /* XXX: Tune for ways */
+#define MAX_ICACHE_PAGES 32
static void __flush_dcache_segment_1way(unsigned long start,
unsigned long extent);
@@ -178,42 +180,45 @@ void __flush_invalidate_region(void *start, int size)
/*
* Write back the range of D-cache, and purge the I-cache.
*
- * Called from kernel/module.c:sys_init_module and routine for a.out format.
+ * Called from kernel/module.c:sys_init_module and routine for a.out format,
+ * signal handler code and kprobes code
*/
void flush_icache_range(unsigned long start, unsigned long end)
{
- flush_cache_all();
-}
-
-/*
- * Write back the D-cache and purge the I-cache for signal trampoline.
- * .. which happens to be the same behavior as flush_icache_range().
- * So, we simply flush out a line.
- */
-void __uses_jump_to_uncached flush_cache_sigtramp(unsigned long addr)
-{
- unsigned long v, index;
- unsigned long flags;
+ int icacheaddr;
+ unsigned long flags, v;
int i;
- v = addr & ~(L1_CACHE_BYTES-1);
- asm volatile("ocbwb %0"
- : /* no output */
- : "m" (__m(v)));
-
- index = CACHE_IC_ADDRESS_ARRAY |
- (v & boot_cpu_data.icache.entry_mask);
-
- local_irq_save(flags);
- jump_to_uncached();
-
- for (i = 0; i < boot_cpu_data.icache.ways;
- i++, index += boot_cpu_data.icache.way_incr)
- ctrl_outl(0, index); /* Clear out Valid-bit */
-
- back_to_cached();
- wmb();
- local_irq_restore(flags);
+ /* If there are too many pages then just blow the caches */
+ if (((end - start) >> PAGE_SHIFT) >= MAX_ICACHE_PAGES) {
+ flush_cache_all();
+ } else {
+ /* selectively flush d-cache then invalidate the i-cache */
+ /* this is inefficient, so only use for small ranges */
+ start &= ~(L1_CACHE_BYTES-1);
+ end += L1_CACHE_BYTES-1;
+ end &= ~(L1_CACHE_BYTES-1);
+
+ local_irq_save(flags);
+ jump_to_uncached();
+
+ for (v = start; v < end; v+=L1_CACHE_BYTES) {
+ asm volatile("ocbwb %0"
+ : /* no output */
+ : "m" (__m(v)));
+
+ icacheaddr = CACHE_IC_ADDRESS_ARRAY | (
+ v & cpu_data->icache.entry_mask);
+
+ for (i = 0; i < cpu_data->icache.ways;
+ i++, icacheaddr += cpu_data->icache.way_incr)
+ /* Clear i-cache line valid-bit */
+ ctrl_outl(0, icacheaddr);
+ }
+
+ back_to_cached();
+ local_irq_restore(flags);
+ }
}
static inline void flush_cache_4096(unsigned long start,
@@ -256,7 +261,7 @@ void flush_dcache_page(struct page *page)
}
/* TODO: Selective icache invalidation through IC address array.. */
-static inline void __uses_jump_to_uncached flush_icache_all(void)
+static void __uses_jump_to_uncached flush_icache_all(void)
{
unsigned long flags, ccr;
diff --git a/arch/sh/mm/consistent.c b/arch/sh/mm/consistent.c
index d3c33fc5b1c..9f8ea3ada4d 100644
--- a/arch/sh/mm/consistent.c
+++ b/arch/sh/mm/consistent.c
@@ -10,38 +10,20 @@
* for more details.
*/
#include <linux/mm.h>
+#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <asm/cacheflush.h>
#include <asm/addrspace.h>
#include <asm/io.h>
-struct dma_coherent_mem {
- void *virt_base;
- u32 device_base;
- int size;
- int flags;
- unsigned long *bitmap;
-};
-
void *dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp)
{
void *ret, *ret_nocache;
- struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
int order = get_order(size);
- if (mem) {
- int page = bitmap_find_free_region(mem->bitmap, mem->size,
- order);
- if (page >= 0) {
- *dma_handle = mem->device_base + (page << PAGE_SHIFT);
- ret = mem->virt_base + (page << PAGE_SHIFT);
- memset(ret, 0, size);
- return ret;
- }
- if (mem->flags & DMA_MEMORY_EXCLUSIVE)
- return NULL;
- }
+ if (dma_alloc_from_coherent(dev, size, dma_handle, &ret))
+ return ret;
ret = (void *)__get_free_pages(gfp, order);
if (!ret)
@@ -54,7 +36,7 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
*/
dma_cache_sync(dev, ret, size, DMA_BIDIRECTIONAL);
- ret_nocache = ioremap_nocache(virt_to_phys(ret), size);
+ ret_nocache = (void __force *)ioremap_nocache(virt_to_phys(ret), size);
if (!ret_nocache) {
free_pages((unsigned long)ret, order);
return NULL;
@@ -68,99 +50,16 @@ EXPORT_SYMBOL(dma_alloc_coherent);
void dma_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle)
{
- struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
int order = get_order(size);
- if (mem && vaddr >= mem->virt_base && vaddr < (mem->virt_base + (mem->size << PAGE_SHIFT))) {
- int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
-
- bitmap_release_region(mem->bitmap, page, order);
- } else {
+ if (!dma_release_from_coherent(dev, order, vaddr)) {
WARN_ON(irqs_disabled()); /* for portability */
- BUG_ON(mem && mem->flags & DMA_MEMORY_EXCLUSIVE);
free_pages((unsigned long)phys_to_virt(dma_handle), order);
iounmap(vaddr);
}
}
EXPORT_SYMBOL(dma_free_coherent);
-int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
- dma_addr_t device_addr, size_t size, int flags)
-{
- void __iomem *mem_base = NULL;
- int pages = size >> PAGE_SHIFT;
- int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
-
- if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0)
- goto out;
- if (!size)
- goto out;
- if (dev->dma_mem)
- goto out;
-
- /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
-
- mem_base = ioremap_nocache(bus_addr, size);
- if (!mem_base)
- goto out;
-
- dev->dma_mem = kmalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
- if (!dev->dma_mem)
- goto out;
- dev->dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
- if (!dev->dma_mem->bitmap)
- goto free1_out;
-
- dev->dma_mem->virt_base = mem_base;
- dev->dma_mem->device_base = device_addr;
- dev->dma_mem->size = pages;
- dev->dma_mem->flags = flags;
-
- if (flags & DMA_MEMORY_MAP)
- return DMA_MEMORY_MAP;
-
- return DMA_MEMORY_IO;
-
- free1_out:
- kfree(dev->dma_mem);
- out:
- if (mem_base)
- iounmap(mem_base);
- return 0;
-}
-EXPORT_SYMBOL(dma_declare_coherent_memory);
-
-void dma_release_declared_memory(struct device *dev)
-{
- struct dma_coherent_mem *mem = dev->dma_mem;
-
- if (!mem)
- return;
- dev->dma_mem = NULL;
- iounmap(mem->virt_base);
- kfree(mem->bitmap);
- kfree(mem);
-}
-EXPORT_SYMBOL(dma_release_declared_memory);
-
-void *dma_mark_declared_memory_occupied(struct device *dev,
- dma_addr_t device_addr, size_t size)
-{
- struct dma_coherent_mem *mem = dev->dma_mem;
- int pages = (size + (device_addr & ~PAGE_MASK) + PAGE_SIZE - 1) >> PAGE_SHIFT;
- int pos, err;
-
- if (!mem)
- return ERR_PTR(-EINVAL);
-
- pos = (device_addr - mem->device_base) >> PAGE_SHIFT;
- err = bitmap_allocate_region(mem->bitmap, pos, get_order(pages));
- if (err != 0)
- return ERR_PTR(err);
- return mem->virt_base + (pos << PAGE_SHIFT);
-}
-EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
-
void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction)
{
@@ -185,3 +84,59 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
}
}
EXPORT_SYMBOL(dma_cache_sync);
+
+static int __init memchunk_setup(char *str)
+{
+ return 1; /* accept anything that begins with "memchunk." */
+}
+__setup("memchunk.", memchunk_setup);
+
+static void __init memchunk_cmdline_override(char *name, unsigned long *sizep)
+{
+ char *p = boot_command_line;
+ int k = strlen(name);
+
+ while ((p = strstr(p, "memchunk."))) {
+ p += 9; /* strlen("memchunk.") */
+ if (!strncmp(name, p, k) && p[k] == '=') {
+ p += k + 1;
+ *sizep = memparse(p, NULL);
+ pr_info("%s: forcing memory chunk size to 0x%08lx\n",
+ name, *sizep);
+ break;
+ }
+ }
+}
+
+int __init platform_resource_setup_memory(struct platform_device *pdev,
+ char *name, unsigned long memsize)
+{
+ struct resource *r;
+ dma_addr_t dma_handle;
+ void *buf;
+
+ r = pdev->resource + pdev->num_resources - 1;
+ if (r->flags) {
+ pr_warning("%s: unable to find empty space for resource\n",
+ name);
+ return -EINVAL;
+ }
+
+ memchunk_cmdline_override(name, &memsize);
+ if (!memsize)
+ return 0;
+
+ buf = dma_alloc_coherent(NULL, memsize, &dma_handle, GFP_KERNEL);
+ if (!buf) {
+ pr_warning("%s: unable to allocate memory\n", name);
+ return -ENOMEM;
+ }
+
+ memset(buf, 0, memsize);
+
+ r->flags = IORESOURCE_MEM;
+ r->start = dma_handle;
+ r->end = r->start + memsize - 1;
+ r->name = name;
+ return 0;
+}
diff --git a/arch/sh/mm/fault_32.c b/arch/sh/mm/fault_32.c
index d1fa27594c6..898d477e47c 100644
--- a/arch/sh/mm/fault_32.c
+++ b/arch/sh/mm/fault_32.c
@@ -2,7 +2,7 @@
* Page fault handler for SH with an MMU.
*
* Copyright (C) 1999 Niibe Yutaka
- * Copyright (C) 2003 - 2007 Paul Mundt
+ * Copyright (C) 2003 - 2008 Paul Mundt
*
* Based on linux/arch/i386/mm/fault.c:
* Copyright (C) 1995 Linus Torvalds
@@ -15,6 +15,7 @@
#include <linux/mm.h>
#include <linux/hardirq.h>
#include <linux/kprobes.h>
+#include <linux/marker.h>
#include <asm/io_trapped.h>
#include <asm/system.h>
#include <asm/mmu_context.h>
@@ -37,16 +38,12 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
int fault;
siginfo_t info;
- trace_hardirqs_on();
- local_irq_enable();
-
-#ifdef CONFIG_SH_KGDB
- if (kgdb_nofault && kgdb_bus_err_hook)
- kgdb_bus_err_hook();
-#endif
+ /*
+ * We don't bother with any notifier callbacks here, as they are
+ * all handled through the __do_page_fault() fast-path.
+ */
tsk = current;
- mm = tsk->mm;
si_code = SEGV_MAPERR;
if (unlikely(address >= TASK_SIZE)) {
@@ -65,7 +62,6 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
pgd = get_TTB() + offset;
pgd_k = swapper_pg_dir + offset;
- /* This will never happen with the folded page table. */
if (!pgd_present(*pgd)) {
if (!pgd_present(*pgd_k))
goto bad_area_nosemaphore;
@@ -75,9 +71,13 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
pud = pud_offset(pgd, address);
pud_k = pud_offset(pgd_k, address);
- if (pud_present(*pud) || !pud_present(*pud_k))
- goto bad_area_nosemaphore;
- set_pud(pud, *pud_k);
+
+ if (!pud_present(*pud)) {
+ if (!pud_present(*pud_k))
+ goto bad_area_nosemaphore;
+ set_pud(pud, *pud_k);
+ return;
+ }
pmd = pmd_offset(pud, address);
pmd_k = pmd_offset(pud_k, address);
@@ -88,6 +88,14 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
return;
}
+ /* Only enable interrupts if they were on before the fault */
+ if ((regs->sr & SR_IMASK) != SR_IMASK) {
+ trace_hardirqs_on();
+ local_irq_enable();
+ }
+
+ mm = tsk->mm;
+
/*
* If we're in an interrupt or have no user
* context, we must not take the fault..
@@ -238,6 +246,25 @@ do_sigbus:
goto no_context;
}
+static inline int notify_page_fault(struct pt_regs *regs, int trap)
+{
+ int ret = 0;
+
+ trace_mark(kernel_arch_trap_entry, "trap_id %d ip #p%ld",
+ trap >> 5, instruction_pointer(regs));
+
+#ifdef CONFIG_KPROBES
+ if (!user_mode(regs)) {
+ preempt_disable();
+ if (kprobe_running() && kprobe_fault_handler(regs, trap))
+ ret = 1;
+ preempt_enable();
+ }
+#endif
+
+ return ret;
+}
+
#ifdef CONFIG_SH_STORE_QUEUES
/*
* This is a special case for the SH-4 store queues, as pages for this
@@ -261,12 +288,18 @@ asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs,
pmd_t *pmd;
pte_t *pte;
pte_t entry;
+ int ret = 0;
+
+ if (notify_page_fault(regs, lookup_exception_vector()))
+ goto out;
#ifdef CONFIG_SH_KGDB
if (kgdb_nofault && kgdb_bus_err_hook)
kgdb_bus_err_hook();
#endif
+ ret = 1;
+
/*
* We don't take page faults for P1, P2, and parts of P4, these
* are always mapped, whether it be due to legacy behaviour in
@@ -276,24 +309,23 @@ asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs,
pgd = pgd_offset_k(address);
} else {
if (unlikely(address >= TASK_SIZE || !current->mm))
- return 1;
+ goto out;
pgd = pgd_offset(current->mm, address);
}
pud = pud_offset(pgd, address);
if (pud_none_or_clear_bad(pud))
- return 1;
+ goto out;
pmd = pmd_offset(pud, address);
if (pmd_none_or_clear_bad(pmd))
- return 1;
-
+ goto out;
pte = pte_offset_kernel(pmd, address);
entry = *pte;
if (unlikely(pte_none(entry) || pte_not_present(entry)))
- return 1;
+ goto out;
if (unlikely(writeaccess && !pte_write(entry)))
- return 1;
+ goto out;
if (writeaccess)
entry = pte_mkdirty(entry);
@@ -310,5 +342,8 @@ asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs,
set_pte(pte, entry);
update_mmu_cache(NULL, address, entry);
- return 0;
+ ret = 0;
+out:
+ trace_mark(kernel_arch_trap_exit, MARK_NOARGS);
+ return ret;
}
diff --git a/arch/sh/mm/fault_64.c b/arch/sh/mm/fault_64.c
index 399d53710d2..bd63b961b2a 100644
--- a/arch/sh/mm/fault_64.c
+++ b/arch/sh/mm/fault_64.c
@@ -39,7 +39,7 @@
#include <asm/uaccess.h>
#include <asm/pgalloc.h>
#include <asm/mmu_context.h>
-#include <asm/cpu/registers.h>
+#include <cpu/registers.h>
/* Callable from fault.c, so not static */
inline void __do_tlb_refill(unsigned long address,
diff --git a/arch/sh/mm/hugetlbpage.c b/arch/sh/mm/hugetlbpage.c
index ae8c321d6e2..9304117039c 100644
--- a/arch/sh/mm/hugetlbpage.c
+++ b/arch/sh/mm/hugetlbpage.c
@@ -22,7 +22,8 @@
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>
-pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
+pte_t *huge_pte_alloc(struct mm_struct *mm,
+ unsigned long addr, unsigned long sz)
{
pgd_t *pgd;
pud_t *pud;
@@ -78,6 +79,11 @@ int pmd_huge(pmd_t pmd)
return 0;
}
+int pud_huge(pud_t pud)
+{
+ return 0;
+}
+
struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
pmd_t *pmd, int write)
{
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index d7df26bd1e5..4abf00031da 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -23,48 +23,19 @@
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
pgd_t swapper_pg_dir[PTRS_PER_PGD];
-unsigned long cached_to_uncached = 0;
-void show_mem(void)
-{
- int total = 0, reserved = 0, free = 0;
- int shared = 0, cached = 0, slab = 0;
- pg_data_t *pgdat;
-
- printk("Mem-info:\n");
- show_free_areas();
-
- for_each_online_pgdat(pgdat) {
- unsigned long flags, i;
-
- pgdat_resize_lock(pgdat, &flags);
- for (i = 0; i < pgdat->node_spanned_pages; i++) {
- struct page *page = pgdat_page_nr(pgdat, i);
- total++;
- if (PageReserved(page))
- reserved++;
- else if (PageSwapCache(page))
- cached++;
- else if (PageSlab(page))
- slab++;
- else if (!page_count(page))
- free++;
- else
- shared += page_count(page) - 1;
- }
- pgdat_resize_unlock(pgdat, &flags);
- }
-
- printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
- printk("%d pages of RAM\n", total);
- printk("%d free pages\n", free);
- printk("%d reserved pages\n", reserved);
- printk("%d slab pages\n", slab);
- printk("%d pages shared\n", shared);
- printk("%d pages swap cached\n", cached);
- printk(KERN_INFO "Total of %ld pages in page table cache\n",
- quicklist_total_size());
-}
+#ifdef CONFIG_SUPERH32
+/*
+ * Handle trivial transitions between cached and uncached
+ * segments, making use of the 1:1 mapping relationship in
+ * 512MB lowmem.
+ *
+ * This is the offset of the uncached section from its cached alias.
+ * Default value only valid in 29 bit mode, in 32bit mode will be
+ * overridden in pmb_init.
+ */
+unsigned long cached_to_uncached = P2SEG - P1SEG;
+#endif
#ifdef CONFIG_MMU
static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
@@ -99,9 +70,7 @@ static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
}
set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot));
-
- if (cached_to_uncached)
- flush_tlb_one(get_asid(), addr);
+ flush_tlb_one(get_asid(), addr);
}
/*
@@ -154,7 +123,6 @@ void __init page_table_range_init(unsigned long start, unsigned long end,
if (!pmd_present(*pmd)) {
pte_t *pte_table;
pte_table = (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
- memset(pte_table, 0, PAGE_SIZE);
pmd_populate_kernel(&init_mm, pmd, pte_table);
}
@@ -191,7 +159,7 @@ void __init paging_init(void)
pg_data_t *pgdat = NODE_DATA(nid);
unsigned long low, start_pfn;
- start_pfn = pgdat->bdata->node_boot_start >> PAGE_SHIFT;
+ start_pfn = pgdat->bdata->node_min_pfn;
low = pgdat->bdata->node_low_pfn;
if (max_zone_pfns[ZONE_NORMAL] < low)
@@ -206,15 +174,6 @@ void __init paging_init(void)
#ifdef CONFIG_SUPERH32
/* Set up the uncached fixmap */
set_fixmap_nocache(FIX_UNCACHED, __pa(&__uncached_start));
-
-#ifdef CONFIG_29BIT
- /*
- * Handle trivial transitions between cached and uncached
- * segments, making use of the 1:1 mapping relationship in
- * 512MB lowmem.
- */
- cached_to_uncached = P2SEG - P1SEG;
-#endif
#endif
}
@@ -306,6 +265,35 @@ void free_initrd_mem(unsigned long start, unsigned long end)
}
#endif
+#if THREAD_SHIFT < PAGE_SHIFT
+static struct kmem_cache *thread_info_cache;
+
+struct thread_info *alloc_thread_info(struct task_struct *tsk)
+{
+ struct thread_info *ti;
+
+ ti = kmem_cache_alloc(thread_info_cache, GFP_KERNEL);
+ if (unlikely(ti == NULL))
+ return NULL;
+#ifdef CONFIG_DEBUG_STACK_USAGE
+ memset(ti, 0, THREAD_SIZE);
+#endif
+ return ti;
+}
+
+void free_thread_info(struct thread_info *ti)
+{
+ kmem_cache_free(thread_info_cache, ti);
+}
+
+void thread_info_cache_init(void)
+{
+ thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
+ THREAD_SIZE, 0, NULL);
+ BUG_ON(thread_info_cache == NULL);
+}
+#endif /* THREAD_SHIFT < PAGE_SHIFT */
+
#ifdef CONFIG_MEMORY_HOTPLUG
int arch_add_memory(int nid, u64 start, u64 size)
{
@@ -333,4 +321,4 @@ int memory_add_physaddr_to_nid(u64 addr)
}
EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
#endif
-#endif
+#endif /* CONFIG_MEMORY_HOTPLUG */
diff --git a/arch/sh/mm/numa.c b/arch/sh/mm/numa.c
index 1663199ce88..095d93bec7c 100644
--- a/arch/sh/mm/numa.c
+++ b/arch/sh/mm/numa.c
@@ -14,7 +14,6 @@
#include <linux/pfn.h>
#include <asm/sections.h>
-static bootmem_data_t plat_node_bdata[MAX_NUMNODES];
struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
EXPORT_SYMBOL_GPL(node_data);
@@ -35,7 +34,7 @@ void __init setup_memory(void)
NODE_DATA(0) = pfn_to_kaddr(free_pfn);
memset(NODE_DATA(0), 0, sizeof(struct pglist_data));
free_pfn += PFN_UP(sizeof(struct pglist_data));
- NODE_DATA(0)->bdata = &plat_node_bdata[0];
+ NODE_DATA(0)->bdata = &bootmem_node_data[0];
/* Set up node 0 */
setup_bootmem_allocator(free_pfn);
@@ -66,7 +65,7 @@ void __init setup_bootmem_node(int nid, unsigned long start, unsigned long end)
free_pfn += PFN_UP(sizeof(struct pglist_data));
memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
- NODE_DATA(nid)->bdata = &plat_node_bdata[nid];
+ NODE_DATA(nid)->bdata = &bootmem_node_data[nid];
NODE_DATA(nid)->node_start_pfn = start_pfn;
NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
diff --git a/arch/sh/mm/pg-nommu.c b/arch/sh/mm/pg-nommu.c
index 677dd57f087..91ed4e695ff 100644
--- a/arch/sh/mm/pg-nommu.c
+++ b/arch/sh/mm/pg-nommu.c
@@ -13,6 +13,7 @@
#include <linux/kernel.h>
#include <linux/string.h>
#include <asm/page.h>
+#include <asm/uaccess.h>
void copy_page(void *to, void *from)
{
diff --git a/arch/sh/mm/pg-sh4.c b/arch/sh/mm/pg-sh4.c
index 8c7a9ca7987..38870e0fc18 100644
--- a/arch/sh/mm/pg-sh4.c
+++ b/arch/sh/mm/pg-sh4.c
@@ -111,7 +111,7 @@ EXPORT_SYMBOL(copy_user_highpage);
/*
* For SH-4, we have our own implementation for ptep_get_and_clear
*/
-inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
pte_t pte = *ptep;
diff --git a/arch/sh/mm/pg-sh7705.c b/arch/sh/mm/pg-sh7705.c
index 7f885b7f8af..eaf25147194 100644
--- a/arch/sh/mm/pg-sh7705.c
+++ b/arch/sh/mm/pg-sh7705.c
@@ -118,7 +118,7 @@ void copy_user_page(void *to, void *from, unsigned long address, struct page *pg
* For SH7705, we have our own implementation for ptep_get_and_clear
* Copied from pg-sh4.c
*/
-inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
pte_t pte = *ptep;
diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c
index 0b0ec6e0475..84241676265 100644
--- a/arch/sh/mm/pmb.c
+++ b/arch/sh/mm/pmb.c
@@ -293,7 +293,7 @@ void pmb_unmap(unsigned long addr)
} while (pmbe);
}
-static void pmb_cache_ctor(struct kmem_cache *cachep, void *pmb)
+static void pmb_cache_ctor(void *pmb)
{
struct pmb_entry *pmbe = pmb;
@@ -385,7 +385,7 @@ static const struct file_operations pmb_debugfs_fops = {
.open = pmb_debugfs_open,
.read = seq_read,
.llseek = seq_lseek,
- .release = seq_release,
+ .release = single_release,
};
static int __init pmb_debugfs_init(void)
@@ -394,6 +394,8 @@ static int __init pmb_debugfs_init(void)
dentry = debugfs_create_file("pmb", S_IFREG | S_IRUGO,
sh_debugfs_root, NULL, &pmb_debugfs_fops);
+ if (!dentry)
+ return -ENOMEM;
if (IS_ERR(dentry))
return PTR_ERR(dentry);
diff --git a/arch/sh/mm/tlb-nommu.c b/arch/sh/mm/tlb-nommu.c
index 15111bc7ddd..71c742b5aee 100644
--- a/arch/sh/mm/tlb-nommu.c
+++ b/arch/sh/mm/tlb-nommu.c
@@ -10,6 +10,7 @@
#include <linux/kernel.h>
#include <linux/mm.h>
#include <asm/pgtable.h>
+#include <asm/tlbflush.h>
/*
* Nothing too terribly exciting here ..
diff --git a/arch/sh/mm/tlb-sh5.c b/arch/sh/mm/tlb-sh5.c
index f34274a1ded..dae131243bc 100644
--- a/arch/sh/mm/tlb-sh5.c
+++ b/arch/sh/mm/tlb-sh5.c
@@ -15,9 +15,7 @@
#include <asm/mmu_context.h>
/**
- * sh64_tlb_init
- *
- * Perform initial setup for the DTLB and ITLB.
+ * sh64_tlb_init - Perform initial setup for the DTLB and ITLB.
*/
int __init sh64_tlb_init(void)
{
@@ -46,9 +44,7 @@ int __init sh64_tlb_init(void)
}
/**
- * sh64_next_free_dtlb_entry
- *
- * Find the next available DTLB entry
+ * sh64_next_free_dtlb_entry - Find the next available DTLB entry
*/
unsigned long long sh64_next_free_dtlb_entry(void)
{
@@ -56,9 +52,7 @@ unsigned long long sh64_next_free_dtlb_entry(void)
}
/**
- * sh64_get_wired_dtlb_entry
- *
- * Allocate a wired (locked-in) entry in the DTLB
+ * sh64_get_wired_dtlb_entry - Allocate a wired (locked-in) entry in the DTLB
*/
unsigned long long sh64_get_wired_dtlb_entry(void)
{
@@ -71,12 +65,10 @@ unsigned long long sh64_get_wired_dtlb_entry(void)
}
/**
- * sh64_put_wired_dtlb_entry
+ * sh64_put_wired_dtlb_entry - Free a wired (locked-in) entry in the DTLB.
*
* @entry: Address of TLB slot.
*
- * Free a wired (locked-in) entry in the DTLB.
- *
* Works like a stack, last one to allocate must be first one to free.
*/
int sh64_put_wired_dtlb_entry(unsigned long long entry)
@@ -115,7 +107,7 @@ int sh64_put_wired_dtlb_entry(unsigned long long entry)
}
/**
- * sh64_setup_tlb_slot
+ * sh64_setup_tlb_slot - Load up a translation in a wired slot.
*
* @config_addr: Address of TLB slot.
* @eaddr: Virtual address.
@@ -154,7 +146,7 @@ inline void sh64_setup_tlb_slot(unsigned long long config_addr,
}
/**
- * sh64_teardown_tlb_slot
+ * sh64_teardown_tlb_slot - Teardown a translation.
*
* @config_addr: Address of TLB slot.
*