diff options
Diffstat (limited to 'arch/sh/mm')
-rw-r--r-- | arch/sh/mm/Kconfig | 48 | ||||
-rw-r--r-- | arch/sh/mm/Makefile | 12 | ||||
-rw-r--r-- | arch/sh/mm/alignment.c | 189 | ||||
-rw-r--r-- | arch/sh/mm/cache-debugfs.c | 7 | ||||
-rw-r--r-- | arch/sh/mm/cache-sh2.c | 12 | ||||
-rw-r--r-- | arch/sh/mm/cache-sh2a.c | 20 | ||||
-rw-r--r-- | arch/sh/mm/cache-sh3.c | 6 | ||||
-rw-r--r-- | arch/sh/mm/cache-sh4.c | 27 | ||||
-rw-r--r-- | arch/sh/mm/cache-sh7705.c | 12 | ||||
-rw-r--r-- | arch/sh/mm/cache.c | 13 | ||||
-rw-r--r-- | arch/sh/mm/fault_32.c | 5 | ||||
-rw-r--r-- | arch/sh/mm/init.c | 166 | ||||
-rw-r--r-- | arch/sh/mm/ioremap.c | 136 | ||||
-rw-r--r-- | arch/sh/mm/ioremap_32.c | 145 | ||||
-rw-r--r-- | arch/sh/mm/ioremap_64.c | 326 | ||||
-rw-r--r-- | arch/sh/mm/ioremap_fixed.c | 135 | ||||
-rw-r--r-- | arch/sh/mm/nommu.c | 4 | ||||
-rw-r--r-- | arch/sh/mm/numa.c | 3 | ||||
-rw-r--r-- | arch/sh/mm/pgtable.c | 56 | ||||
-rw-r--r-- | arch/sh/mm/pmb.c | 840 | ||||
-rw-r--r-- | arch/sh/mm/tlb-pteaex.c | 3 | ||||
-rw-r--r-- | arch/sh/mm/tlb-sh3.c | 6 | ||||
-rw-r--r-- | arch/sh/mm/tlb-sh4.c | 13 | ||||
-rw-r--r-- | arch/sh/mm/tlb-sh5.c | 39 | ||||
-rw-r--r-- | arch/sh/mm/tlb-urb.c | 81 | ||||
-rw-r--r-- | arch/sh/mm/tlbflush_32.c | 4 | ||||
-rw-r--r-- | arch/sh/mm/tlbflush_64.c | 2 | ||||
-rw-r--r-- | arch/sh/mm/uncached.c | 34 |
28 files changed, 1502 insertions, 842 deletions
diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig index 986a71b88ca..1445ca6257d 100644 --- a/arch/sh/mm/Kconfig +++ b/arch/sh/mm/Kconfig @@ -75,52 +75,25 @@ config MEMORY_SIZE config 29BIT def_bool !32BIT depends on SUPERH32 + select UNCACHED_MAPPING config 32BIT bool default y if CPU_SH5 -config PMB_ENABLE - bool "Support 32-bit physical addressing through PMB" - depends on MMU && EXPERIMENTAL && CPU_SH4A && !CPU_SH4AL_DSP - help - If you say Y here, physical addressing will be extended to - 32-bits through the SH-4A PMB. If this is not set, legacy - 29-bit physical addressing will be used. - -choice - prompt "PMB handling type" - depends on PMB_ENABLE - default PMB_FIXED - config PMB - bool "PMB" + bool "Support 32-bit physical addressing through PMB" depends on MMU && EXPERIMENTAL && CPU_SH4A && !CPU_SH4AL_DSP + select 32BIT + select UNCACHED_MAPPING help If you say Y here, physical addressing will be extended to 32-bits through the SH-4A PMB. If this is not set, legacy 29-bit physical addressing will be used. -config PMB_FIXED - bool "fixed PMB" - depends on MMU && EXPERIMENTAL && CPU_SH4A && !CPU_SH4AL_DSP - select 32BIT - help - If this option is enabled, fixed PMB mappings are inherited - from the boot loader, and the kernel does not attempt dynamic - management. This is the closest to legacy 29-bit physical mode, - and allows systems to support up to 512MiB of system memory. - -endchoice - config X2TLB - bool "Enable extended TLB mode" - depends on (CPU_SHX2 || CPU_SHX3) && MMU && EXPERIMENTAL - help - Selecting this option will enable the extended mode of the SH-X2 - TLB. For legacy SH-X behaviour and interoperability, say N. For - all of the fun new features and a willingless to submit bug reports, - say Y. + def_bool y + depends on (CPU_SHX2 || CPU_SHX3) && MMU config VSYSCALL bool "Support vsyscall page" @@ -188,14 +161,19 @@ config ARCH_MEMORY_PROBE def_bool y depends on MEMORY_HOTPLUG +config IOREMAP_FIXED + def_bool y + depends on X2TLB || SUPERH64 + +config UNCACHED_MAPPING + bool + choice prompt "Kernel page size" - default PAGE_SIZE_8KB if X2TLB default PAGE_SIZE_4KB config PAGE_SIZE_4KB bool "4kB" - depends on !MMU || !X2TLB help This is the default page size used by all SuperH CPUs. diff --git a/arch/sh/mm/Makefile b/arch/sh/mm/Makefile index 8a70535fa7c..3dc8a8a6382 100644 --- a/arch/sh/mm/Makefile +++ b/arch/sh/mm/Makefile @@ -2,7 +2,7 @@ # Makefile for the Linux SuperH-specific parts of the memory manager. # -obj-y := cache.o init.o consistent.o mmap.o +obj-y := alignment.o cache.o init.o consistent.o mmap.o cacheops-$(CONFIG_CPU_SH2) := cache-sh2.o cacheops-$(CONFIG_CPU_SH2A) := cache-sh2a.o @@ -15,7 +15,7 @@ obj-y += $(cacheops-y) mmu-y := nommu.o extable_32.o mmu-$(CONFIG_MMU) := extable_$(BITS).o fault_$(BITS).o \ - ioremap_$(BITS).o kmap.o tlbflush_$(BITS).o + ioremap.o kmap.o pgtable.o tlbflush_$(BITS).o obj-y += $(mmu-y) obj-$(CONFIG_DEBUG_FS) += asids-debugfs.o @@ -26,15 +26,17 @@ endif ifdef CONFIG_MMU tlb-$(CONFIG_CPU_SH3) := tlb-sh3.o -tlb-$(CONFIG_CPU_SH4) := tlb-sh4.o +tlb-$(CONFIG_CPU_SH4) := tlb-sh4.o tlb-urb.o tlb-$(CONFIG_CPU_SH5) := tlb-sh5.o -tlb-$(CONFIG_CPU_HAS_PTEAEX) := tlb-pteaex.o +tlb-$(CONFIG_CPU_HAS_PTEAEX) := tlb-pteaex.o tlb-urb.o obj-y += $(tlb-y) endif obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o -obj-$(CONFIG_PMB_ENABLE) += pmb.o +obj-$(CONFIG_PMB) += pmb.o obj-$(CONFIG_NUMA) += numa.o +obj-$(CONFIG_IOREMAP_FIXED) += ioremap_fixed.o +obj-$(CONFIG_UNCACHED_MAPPING) += uncached.o # Special flags for fault_64.o. This puts restrictions on the number of # caller-save registers that the compiler can target when building this file. diff --git a/arch/sh/mm/alignment.c b/arch/sh/mm/alignment.c new file mode 100644 index 00000000000..b2595b8548e --- /dev/null +++ b/arch/sh/mm/alignment.c @@ -0,0 +1,189 @@ +/* + * Alignment access counters and corresponding user-space interfaces. + * + * Copyright (C) 2009 ST Microelectronics + * Copyright (C) 2009 - 2010 Paul Mundt + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/seq_file.h> +#include <linux/proc_fs.h> +#include <linux/uaccess.h> +#include <asm/alignment.h> +#include <asm/processor.h> + +static unsigned long se_user; +static unsigned long se_sys; +static unsigned long se_half; +static unsigned long se_word; +static unsigned long se_dword; +static unsigned long se_multi; +/* bitfield: 1: warn 2: fixup 4: signal -> combinations 2|4 && 1|2|4 are not + valid! */ +static int se_usermode = UM_WARN | UM_FIXUP; +/* 0: no warning 1: print a warning message, disabled by default */ +static int se_kernmode_warn; + +core_param(alignment, se_usermode, int, 0600); + +void inc_unaligned_byte_access(void) +{ + se_half++; +} + +void inc_unaligned_word_access(void) +{ + se_word++; +} + +void inc_unaligned_dword_access(void) +{ + se_dword++; +} + +void inc_unaligned_multi_access(void) +{ + se_multi++; +} + +void inc_unaligned_user_access(void) +{ + se_user++; +} + +void inc_unaligned_kernel_access(void) +{ + se_sys++; +} + +/* + * This defaults to the global policy which can be set from the command + * line, while processes can overload their preferences via prctl(). + */ +unsigned int unaligned_user_action(void) +{ + unsigned int action = se_usermode; + + if (current->thread.flags & SH_THREAD_UAC_SIGBUS) { + action &= ~UM_FIXUP; + action |= UM_SIGNAL; + } + + if (current->thread.flags & SH_THREAD_UAC_NOPRINT) + action &= ~UM_WARN; + + return action; +} + +int get_unalign_ctl(struct task_struct *tsk, unsigned long addr) +{ + return put_user(tsk->thread.flags & SH_THREAD_UAC_MASK, + (unsigned int __user *)addr); +} + +int set_unalign_ctl(struct task_struct *tsk, unsigned int val) +{ + tsk->thread.flags = (tsk->thread.flags & ~SH_THREAD_UAC_MASK) | + (val & SH_THREAD_UAC_MASK); + return 0; +} + +void unaligned_fixups_notify(struct task_struct *tsk, insn_size_t insn, + struct pt_regs *regs) +{ + if (user_mode(regs) && (se_usermode & UM_WARN) && printk_ratelimit()) + pr_notice("Fixing up unaligned userspace access " + "in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n", + tsk->comm, task_pid_nr(tsk), + (void *)instruction_pointer(regs), insn); + else if (se_kernmode_warn && printk_ratelimit()) + pr_notice("Fixing up unaligned kernel access " + "in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n", + tsk->comm, task_pid_nr(tsk), + (void *)instruction_pointer(regs), insn); +} + +static const char *se_usermode_action[] = { + "ignored", + "warn", + "fixup", + "fixup+warn", + "signal", + "signal+warn" +}; + +static int alignment_proc_show(struct seq_file *m, void *v) +{ + seq_printf(m, "User:\t\t%lu\n", se_user); + seq_printf(m, "System:\t\t%lu\n", se_sys); + seq_printf(m, "Half:\t\t%lu\n", se_half); + seq_printf(m, "Word:\t\t%lu\n", se_word); + seq_printf(m, "DWord:\t\t%lu\n", se_dword); + seq_printf(m, "Multi:\t\t%lu\n", se_multi); + seq_printf(m, "User faults:\t%i (%s)\n", se_usermode, + se_usermode_action[se_usermode]); + seq_printf(m, "Kernel faults:\t%i (fixup%s)\n", se_kernmode_warn, + se_kernmode_warn ? "+warn" : ""); + return 0; +} + +static int alignment_proc_open(struct inode *inode, struct file *file) +{ + return single_open(file, alignment_proc_show, NULL); +} + +static ssize_t alignment_proc_write(struct file *file, + const char __user *buffer, size_t count, loff_t *pos) +{ + int *data = PDE(file->f_path.dentry->d_inode)->data; + char mode; + + if (count > 0) { + if (get_user(mode, buffer)) + return -EFAULT; + if (mode >= '0' && mode <= '5') + *data = mode - '0'; + } + return count; +} + +static const struct file_operations alignment_proc_fops = { + .owner = THIS_MODULE, + .open = alignment_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = alignment_proc_write, +}; + +/* + * This needs to be done after sysctl_init, otherwise sys/ will be + * overwritten. Actually, this shouldn't be in sys/ at all since + * it isn't a sysctl, and it doesn't contain sysctl information. + * We now locate it in /proc/cpu/alignment instead. + */ +static int __init alignment_init(void) +{ + struct proc_dir_entry *dir, *res; + + dir = proc_mkdir("cpu", NULL); + if (!dir) + return -ENOMEM; + + res = proc_create_data("alignment", S_IWUSR | S_IRUGO, dir, + &alignment_proc_fops, &se_usermode); + if (!res) + return -ENOMEM; + + res = proc_create_data("kernel_alignment", S_IWUSR | S_IRUGO, dir, + &alignment_proc_fops, &se_kernmode_warn); + if (!res) + return -ENOMEM; + + return 0; +} +fs_initcall(alignment_init); diff --git a/arch/sh/mm/cache-debugfs.c b/arch/sh/mm/cache-debugfs.c index 5ba067b2659..690ed010d00 100644 --- a/arch/sh/mm/cache-debugfs.c +++ b/arch/sh/mm/cache-debugfs.c @@ -22,8 +22,7 @@ enum cache_type { CACHE_TYPE_UNIFIED, }; -static int __uses_jump_to_uncached cache_seq_show(struct seq_file *file, - void *iter) +static int cache_seq_show(struct seq_file *file, void *iter) { unsigned int cache_type = (unsigned int)file->private; struct cache_info *cache; @@ -37,7 +36,7 @@ static int __uses_jump_to_uncached cache_seq_show(struct seq_file *file, */ jump_to_uncached(); - ccr = ctrl_inl(CCR); + ccr = __raw_readl(CCR); if ((ccr & CCR_CACHE_ENABLE) == 0) { back_to_cached(); @@ -90,7 +89,7 @@ static int __uses_jump_to_uncached cache_seq_show(struct seq_file *file, for (addr = addrstart, line = 0; addr < addrstart + waysize; addr += cache->linesz, line++) { - unsigned long data = ctrl_inl(addr); + unsigned long data = __raw_readl(addr); /* Check the V bit, ignore invalid cachelines */ if ((data & 1) == 0) diff --git a/arch/sh/mm/cache-sh2.c b/arch/sh/mm/cache-sh2.c index 699a71f4632..defcf719f2e 100644 --- a/arch/sh/mm/cache-sh2.c +++ b/arch/sh/mm/cache-sh2.c @@ -28,10 +28,10 @@ static void sh2__flush_wback_region(void *start, int size) unsigned long addr = CACHE_OC_ADDRESS_ARRAY | (v & 0x00000ff0); int way; for (way = 0; way < 4; way++) { - unsigned long data = ctrl_inl(addr | (way << 12)); + unsigned long data = __raw_readl(addr | (way << 12)); if ((data & CACHE_PHYSADDR_MASK) == (v & CACHE_PHYSADDR_MASK)) { data &= ~SH_CACHE_UPDATED; - ctrl_outl(data, addr | (way << 12)); + __raw_writel(data, addr | (way << 12)); } } } @@ -47,7 +47,7 @@ static void sh2__flush_purge_region(void *start, int size) & ~(L1_CACHE_BYTES-1); for (v = begin; v < end; v+=L1_CACHE_BYTES) - ctrl_outl((v & CACHE_PHYSADDR_MASK), + __raw_writel((v & CACHE_PHYSADDR_MASK), CACHE_OC_ADDRESS_ARRAY | (v & 0x00000ff0) | 0x00000008); } @@ -63,9 +63,9 @@ static void sh2__flush_invalidate_region(void *start, int size) local_irq_save(flags); jump_to_uncached(); - ccr = ctrl_inl(CCR); + ccr = __raw_readl(CCR); ccr |= CCR_CACHE_INVALIDATE; - ctrl_outl(ccr, CCR); + __raw_writel(ccr, CCR); back_to_cached(); local_irq_restore(flags); @@ -78,7 +78,7 @@ static void sh2__flush_invalidate_region(void *start, int size) & ~(L1_CACHE_BYTES-1); for (v = begin; v < end; v+=L1_CACHE_BYTES) - ctrl_outl((v & CACHE_PHYSADDR_MASK), + __raw_writel((v & CACHE_PHYSADDR_MASK), CACHE_OC_ADDRESS_ARRAY | (v & 0x00000ff0) | 0x00000008); #endif } diff --git a/arch/sh/mm/cache-sh2a.c b/arch/sh/mm/cache-sh2a.c index 975899d8356..1f51225426a 100644 --- a/arch/sh/mm/cache-sh2a.c +++ b/arch/sh/mm/cache-sh2a.c @@ -32,10 +32,10 @@ static void sh2a__flush_wback_region(void *start, int size) unsigned long addr = CACHE_OC_ADDRESS_ARRAY | (v & 0x000007f0); int way; for (way = 0; way < 4; way++) { - unsigned long data = ctrl_inl(addr | (way << 11)); + unsigned long data = __raw_readl(addr | (way << 11)); if ((data & CACHE_PHYSADDR_MASK) == (v & CACHE_PHYSADDR_MASK)) { data &= ~SH_CACHE_UPDATED; - ctrl_outl(data, addr | (way << 11)); + __raw_writel(data, addr | (way << 11)); } } } @@ -58,7 +58,7 @@ static void sh2a__flush_purge_region(void *start, int size) jump_to_uncached(); for (v = begin; v < end; v+=L1_CACHE_BYTES) { - ctrl_outl((v & CACHE_PHYSADDR_MASK), + __raw_writel((v & CACHE_PHYSADDR_MASK), CACHE_OC_ADDRESS_ARRAY | (v & 0x000007f0) | 0x00000008); } back_to_cached(); @@ -78,17 +78,17 @@ static void sh2a__flush_invalidate_region(void *start, int size) jump_to_uncached(); #ifdef CONFIG_CACHE_WRITEBACK - ctrl_outl(ctrl_inl(CCR) | CCR_OCACHE_INVALIDATE, CCR); + __raw_writel(__raw_readl(CCR) | CCR_OCACHE_INVALIDATE, CCR); /* I-cache invalidate */ for (v = begin; v < end; v+=L1_CACHE_BYTES) { - ctrl_outl((v & CACHE_PHYSADDR_MASK), + __raw_writel((v & CACHE_PHYSADDR_MASK), CACHE_IC_ADDRESS_ARRAY | (v & 0x000007f0) | 0x00000008); } #else for (v = begin; v < end; v+=L1_CACHE_BYTES) { - ctrl_outl((v & CACHE_PHYSADDR_MASK), + __raw_writel((v & CACHE_PHYSADDR_MASK), CACHE_IC_ADDRESS_ARRAY | (v & 0x000007f0) | 0x00000008); - ctrl_outl((v & CACHE_PHYSADDR_MASK), + __raw_writel((v & CACHE_PHYSADDR_MASK), CACHE_OC_ADDRESS_ARRAY | (v & 0x000007f0) | 0x00000008); } #endif @@ -115,14 +115,14 @@ static void sh2a_flush_icache_range(void *args) int way; /* O-Cache writeback */ for (way = 0; way < 4; way++) { - unsigned long data = ctrl_inl(CACHE_OC_ADDRESS_ARRAY | addr | (way << 11)); + unsigned long data = __raw_readl(CACHE_OC_ADDRESS_ARRAY | addr | (way << 11)); if ((data & CACHE_PHYSADDR_MASK) == (v & CACHE_PHYSADDR_MASK)) { data &= ~SH_CACHE_UPDATED; - ctrl_outl(data, CACHE_OC_ADDRESS_ARRAY | addr | (way << 11)); + __raw_writel(data, CACHE_OC_ADDRESS_ARRAY | addr | (way << 11)); } } /* I-Cache invalidate */ - ctrl_outl(addr, + __raw_writel(addr, CACHE_IC_ADDRESS_ARRAY | addr | 0x00000008); } diff --git a/arch/sh/mm/cache-sh3.c b/arch/sh/mm/cache-sh3.c index faef80c9813..e37523f6519 100644 --- a/arch/sh/mm/cache-sh3.c +++ b/arch/sh/mm/cache-sh3.c @@ -50,12 +50,12 @@ static void sh3__flush_wback_region(void *start, int size) p = __pa(v); addr = addrstart | (v & current_cpu_data.dcache.entry_mask); local_irq_save(flags); - data = ctrl_inl(addr); + data = __raw_readl(addr); if ((data & CACHE_PHYSADDR_MASK) == (p & CACHE_PHYSADDR_MASK)) { data &= ~SH_CACHE_UPDATED; - ctrl_outl(data, addr); + __raw_writel(data, addr); local_irq_restore(flags); break; } @@ -86,7 +86,7 @@ static void sh3__flush_purge_region(void *start, int size) data = (v & 0xfffffc00); /* _Virtual_ address, ~U, ~V */ addr = CACHE_OC_ADDRESS_ARRAY | (v & current_cpu_data.dcache.entry_mask) | SH_CACHE_ASSOC; - ctrl_outl(data, addr); + __raw_writel(data, addr); } } diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c index 560ddb6bc8a..2cfae81914a 100644 --- a/arch/sh/mm/cache-sh4.c +++ b/arch/sh/mm/cache-sh4.c @@ -36,7 +36,7 @@ static void __flush_cache_one(unsigned long addr, unsigned long phys, * Called from kernel/module.c:sys_init_module and routine for a.out format, * signal handler code and kprobes code */ -static void __uses_jump_to_uncached sh4_flush_icache_range(void *args) +static void sh4_flush_icache_range(void *args) { struct flusher_data *data = args; unsigned long start, end; @@ -109,6 +109,7 @@ static inline void flush_cache_one(unsigned long start, unsigned long phys) static void sh4_flush_dcache_page(void *arg) { struct page *page = arg; + unsigned long addr = (unsigned long)page_address(page); #ifndef CONFIG_SMP struct address_space *mapping = page_mapping(page); @@ -116,22 +117,14 @@ static void sh4_flush_dcache_page(void *arg) set_bit(PG_dcache_dirty, &page->flags); else #endif - { - unsigned long phys = page_to_phys(page); - unsigned long addr = CACHE_OC_ADDRESS_ARRAY; - int i, n; - - /* Loop all the D-cache */ - n = boot_cpu_data.dcache.n_aliases; - for (i = 0; i < n; i++, addr += PAGE_SIZE) - flush_cache_one(addr, phys); - } + flush_cache_one(CACHE_OC_ADDRESS_ARRAY | + (addr & shm_align_mask), page_to_phys(page)); wmb(); } /* TODO: Selective icache invalidation through IC address array.. */ -static void __uses_jump_to_uncached flush_icache_all(void) +static void flush_icache_all(void) { unsigned long flags, ccr; @@ -139,9 +132,9 @@ static void __uses_jump_to_uncached flush_icache_all(void) jump_to_uncached(); /* Flush I-cache */ - ccr = ctrl_inl(CCR); + ccr = __raw_readl(CCR); ccr |= CCR_CACHE_ICI; - ctrl_outl(ccr, CCR); + __raw_writel(ccr, CCR); /* * back_to_cached() will take care of the barrier for us, don't add @@ -384,9 +377,9 @@ extern void __weak sh4__flush_region_init(void); void __init sh4_cache_init(void) { printk("PVR=%08x CVR=%08x PRR=%08x\n", - ctrl_inl(CCN_PVR), - ctrl_inl(CCN_CVR), - ctrl_inl(CCN_PRR)); + __raw_readl(CCN_PVR), + __raw_readl(CCN_CVR), + __raw_readl(CCN_PRR)); local_flush_icache_range = sh4_flush_icache_range; local_flush_dcache_page = sh4_flush_dcache_page; diff --git a/arch/sh/mm/cache-sh7705.c b/arch/sh/mm/cache-sh7705.c index f527fb70fce..f498da1cce7 100644 --- a/arch/sh/mm/cache-sh7705.c +++ b/arch/sh/mm/cache-sh7705.c @@ -48,10 +48,10 @@ static inline void cache_wback_all(void) unsigned long data; int v = SH_CACHE_UPDATED | SH_CACHE_VALID; - data = ctrl_inl(addr); + data = __raw_readl(addr); if ((data & v) == v) - ctrl_outl(data & ~v, addr); + __raw_writel(data & ~v, addr); } @@ -78,7 +78,7 @@ static void sh7705_flush_icache_range(void *args) /* * Writeback&Invalidate the D-cache of the page */ -static void __uses_jump_to_uncached __flush_dcache_page(unsigned long phys) +static void __flush_dcache_page(unsigned long phys) { unsigned long ways, waysize, addrstart; unsigned long flags; @@ -115,10 +115,10 @@ static void __uses_jump_to_uncached __flush_dcache_page(unsigned long phys) addr += current_cpu_data.dcache.linesz) { unsigned long data; - data = ctrl_inl(addr) & (0x1ffffC00 | SH_CACHE_VALID); + data = __raw_readl(addr) & (0x1ffffC00 | SH_CACHE_VALID); if (data == phys) { data &= ~(SH_CACHE_VALID | SH_CACHE_UPDATED); - ctrl_outl(data, addr); + __raw_writel(data, addr); } } @@ -144,7 +144,7 @@ static void sh7705_flush_dcache_page(void *arg) __flush_dcache_page(__pa(page_address(page))); } -static void __uses_jump_to_uncached sh7705_flush_cache_all(void *args) +static void sh7705_flush_cache_all(void *args) { unsigned long flags; diff --git a/arch/sh/mm/cache.c b/arch/sh/mm/cache.c index b8607fa7ae1..0f4095d7ac8 100644 --- a/arch/sh/mm/cache.c +++ b/arch/sh/mm/cache.c @@ -2,7 +2,7 @@ * arch/sh/mm/cache.c * * Copyright (C) 1999, 2000, 2002 Niibe Yutaka - * Copyright (C) 2002 - 2009 Paul Mundt + * Copyright (C) 2002 - 2010 Paul Mundt * * Released under the terms of the GNU GPL v2.0. */ @@ -41,8 +41,17 @@ static inline void cacheop_on_each_cpu(void (*func) (void *info), void *info, int wait) { preempt_disable(); - smp_call_function(func, info, wait); + + /* + * It's possible that this gets called early on when IRQs are + * still disabled due to ioremapping by the boot CPU, so don't + * even attempt IPIs unless there are other CPUs online. + */ + if (num_online_cpus() > 1) + smp_call_function(func, info, wait); + func(info); + preempt_enable(); } diff --git a/arch/sh/mm/fault_32.c b/arch/sh/mm/fault_32.c index 47530104e0a..8bf79e3b7bd 100644 --- a/arch/sh/mm/fault_32.c +++ b/arch/sh/mm/fault_32.c @@ -53,6 +53,9 @@ static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address) if (!pud_present(*pud_k)) return NULL; + if (!pud_present(*pud)) + set_pud(pud, *pud_k); + pmd = pmd_offset(pud, address); pmd_k = pmd_offset(pud_k, address); if (!pmd_present(*pmd_k)) @@ -371,7 +374,7 @@ handle_tlbmiss(struct pt_regs *regs, unsigned long writeaccess, local_flush_tlb_one(get_asid(), address & PAGE_MASK); #endif - update_mmu_cache(NULL, address, entry); + update_mmu_cache(NULL, address, pte); return 0; } diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index 432acd07e76..68028e8f26c 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -21,25 +21,13 @@ #include <asm/cacheflush.h> #include <asm/sections.h> #include <asm/cache.h> +#include <asm/sizes.h> DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); pgd_t swapper_pg_dir[PTRS_PER_PGD]; -#ifdef CONFIG_SUPERH32 -/* - * Handle trivial transitions between cached and uncached - * segments, making use of the 1:1 mapping relationship in - * 512MB lowmem. - * - * This is the offset of the uncached section from its cached alias. - * Default value only valid in 29 bit mode, in 32bit mode will be - * overridden in pmb_init. - */ -unsigned long cached_to_uncached = P2SEG - P1SEG; -#endif - #ifdef CONFIG_MMU -static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot) +static pte_t *__get_pte_phys(unsigned long addr) { pgd_t *pgd; pud_t *pud; @@ -49,22 +37,30 @@ static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot) pgd = pgd_offset_k(addr); if (pgd_none(*pgd)) { pgd_ERROR(*pgd); - return; + return NULL; } pud = pud_alloc(NULL, pgd, addr); if (unlikely(!pud)) { pud_ERROR(*pud); - return; + return NULL; } pmd = pmd_alloc(NULL, pud, addr); if (unlikely(!pmd)) { pmd_ERROR(*pmd); - return; + return NULL; } pte = pte_offset_kernel(pmd, addr); + return pte; +} + +static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot) +{ + pte_t *pte; + + pte = __get_pte_phys(addr); if (!pte_none(*pte)) { pte_ERROR(*pte); return; @@ -72,23 +68,24 @@ static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot) set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot)); local_flush_tlb_one(get_asid(), addr); + + if (pgprot_val(prot) & _PAGE_WIRED) + tlb_wire_entry(NULL, addr, *pte); +} + +static void clear_pte_phys(unsigned long addr, pgprot_t prot) +{ + pte_t *pte; + + pte = __get_pte_phys(addr); + + if (pgprot_val(prot) & _PAGE_WIRED) + tlb_unwire_entry(); + + set_pte(pte, pfn_pte(0, __pgprot(0))); + local_flush_tlb_one(get_asid(), addr); } -/* - * As a performance optimization, other platforms preserve the fixmap mapping - * across a context switch, we don't presently do this, but this could be done - * in a similar fashion as to the wired TLB interface that sh64 uses (by way - * of the memory mapped UTLB configuration) -- this unfortunately forces us to - * give up a TLB entry for each mapping we want to preserve. While this may be - * viable for a small number of fixmaps, it's not particularly useful for - * everything and needs to be carefully evaluated. (ie, we may want this for - * the vsyscall page). - * - * XXX: Perhaps add a _PAGE_WIRED flag or something similar that we can pass - * in at __set_fixmap() time to determine the appropriate behavior to follow. - * - * -- PFM. - */ void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot) { unsigned long address = __fix_to_virt(idx); @@ -101,6 +98,18 @@ void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot) set_pte_phys(address, phys, prot); } +void __clear_fixmap(enum fixed_addresses idx, pgprot_t prot) +{ + unsigned long address = __fix_to_virt(idx); + + if (idx >= __end_of_fixed_addresses) { + BUG(); + return; + } + + clear_pte_phys(address, prot); +} + void __init page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base) { @@ -120,7 +129,13 @@ void __init page_table_range_init(unsigned long start, unsigned long end, for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) { pud = (pud_t *)pgd; for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) { +#ifdef __PAGETABLE_PMD_FOLDED pmd = (pmd_t *)pud; +#else + pmd = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE); + pud_populate(&init_mm, pud, pmd); + pmd += k; +#endif for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) { if (pmd_none(*pmd)) { pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); @@ -182,9 +197,6 @@ void __init paging_init(void) } free_area_init_nodes(max_zone_pfns); - - /* Set up the uncached fixmap */ - set_fixmap_nocache(FIX_UNCACHED, __pa(&__uncached_start)); } /* @@ -195,6 +207,8 @@ static void __init iommu_init(void) no_iommu_init(); } +unsigned int mem_init_done = 0; + void __init mem_init(void) { int codesize, datasize, initsize; @@ -231,6 +245,8 @@ void __init mem_init(void) memset(empty_zero_page, 0, PAGE_SIZE); __flush_wback_region(empty_zero_page, PAGE_SIZE); + vsyscall_init(); + codesize = (unsigned long) &_etext - (unsigned long) &_text; datasize = (unsigned long) &_edata - (unsigned long) &_etext; initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; @@ -243,8 +259,48 @@ void __init mem_init(void) datasize >> 10, initsize >> 10); - /* Initialize the vDSO */ - vsyscall_init(); + printk(KERN_INFO "virtual kernel memory layout:\n" + " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" +#ifdef CONFIG_HIGHMEM + " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n" +#endif + " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n" + " lowmem : 0x%08lx - 0x%08lx (%4ld MB) (cached)\n" +#ifdef CONFIG_UNCACHED_MAPPING + " : 0x%08lx - 0x%08lx (%4ld MB) (uncached)\n" +#endif + " .init : 0x%08lx - 0x%08lx (%4ld kB)\n" + " .data : 0x%08lx - 0x%08lx (%4ld kB)\n" + " .text : 0x%08lx - 0x%08lx (%4ld kB)\n", + FIXADDR_START, FIXADDR_TOP, + (FIXADDR_TOP - FIXADDR_START) >> 10, + +#ifdef CONFIG_HIGHMEM + PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, + (LAST_PKMAP*PAGE_SIZE) >> 10, +#endif + + (unsigned long)VMALLOC_START, VMALLOC_END, + (VMALLOC_END - VMALLOC_START) >> 20, + + (unsigned long)memory_start, (unsigned long)high_memory, + ((unsigned long)high_memory - (unsigned long)memory_start) >> 20, + +#ifdef CONFIG_UNCACHED_MAPPING + uncached_start, uncached_end, uncached_size >> 20, +#endif + + (unsigned long)&__init_begin, (unsigned long)&__init_end, + ((unsigned long)&__init_end - + (unsigned long)&__init_begin) >> 10, + + (unsigned long)&_etext, (unsigned long)&_edata, + ((unsigned long)&_edata - (unsigned long)&_etext) >> 10, + + (unsigned long)&_text, (unsigned long)&_etext, + ((unsigned long)&_etext - (unsigned long)&_text) >> 10); + + mem_init_done = 1; } void free_initmem(void) @@ -277,35 +333,6 @@ void free_initrd_mem(unsigned long start, unsigned long end) |