diff options
Diffstat (limited to 'arch/sh/mm')
| -rw-r--r-- | arch/sh/mm/Kconfig | 12 | ||||
| -rw-r--r-- | arch/sh/mm/alignment.c | 2 | ||||
| -rw-r--r-- | arch/sh/mm/cache-debugfs.c | 2 | ||||
| -rw-r--r-- | arch/sh/mm/cache-sh2.c | 4 | ||||
| -rw-r--r-- | arch/sh/mm/cache-sh2a.c | 6 | ||||
| -rw-r--r-- | arch/sh/mm/cache-sh4.c | 4 | ||||
| -rw-r--r-- | arch/sh/mm/cache-shx3.c | 4 | ||||
| -rw-r--r-- | arch/sh/mm/cache.c | 4 | ||||
| -rw-r--r-- | arch/sh/mm/fault.c | 37 | ||||
| -rw-r--r-- | arch/sh/mm/init.c | 82 | ||||
| -rw-r--r-- | arch/sh/mm/mmap.c | 139 | ||||
| -rw-r--r-- | arch/sh/mm/tlb-sh5.c | 2 |
12 files changed, 89 insertions, 209 deletions
diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig index cb8f9920f4d..dba285e8680 100644 --- a/arch/sh/mm/Kconfig +++ b/arch/sh/mm/Kconfig @@ -83,7 +83,7 @@ config 32BIT config PMB bool "Support 32-bit physical addressing through PMB" - depends on MMU && EXPERIMENTAL && CPU_SH4A && !CPU_SH4AL_DSP + depends on MMU && CPU_SH4A && !CPU_SH4AL_DSP select 32BIT select UNCACHED_MAPPING help @@ -110,7 +110,8 @@ config VSYSCALL config NUMA bool "Non Uniform Memory Access (NUMA) Support" - depends on MMU && SYS_SUPPORTS_NUMA && EXPERIMENTAL + depends on MMU && SYS_SUPPORTS_NUMA + select ARCH_WANT_NUMA_VARIABLE_LOCALITY default n help Some SH systems have many various memories scattered around @@ -136,13 +137,6 @@ config ARCH_SPARSEMEM_ENABLE config ARCH_SPARSEMEM_DEFAULT def_bool y -config MAX_ACTIVE_REGIONS - int - default "6" if (CPU_SUBTYPE_SHX3 && SPARSEMEM) - default "2" if SPARSEMEM && (CPU_SUBTYPE_SH7722 || \ - CPU_SUBTYPE_SH7785) - default "1" - config ARCH_SELECT_MEMORY_MODEL def_bool y diff --git a/arch/sh/mm/alignment.c b/arch/sh/mm/alignment.c index 620fa7ff9ee..ec2b2530242 100644 --- a/arch/sh/mm/alignment.c +++ b/arch/sh/mm/alignment.c @@ -140,7 +140,7 @@ static int alignment_proc_open(struct inode *inode, struct file *file) static ssize_t alignment_proc_write(struct file *file, const char __user *buffer, size_t count, loff_t *pos) { - int *data = PDE(file->f_path.dentry->d_inode)->data; + int *data = PDE_DATA(file_inode(file)); char mode; if (count > 0) { diff --git a/arch/sh/mm/cache-debugfs.c b/arch/sh/mm/cache-debugfs.c index 11572519803..777e50f33c0 100644 --- a/arch/sh/mm/cache-debugfs.c +++ b/arch/sh/mm/cache-debugfs.c @@ -36,7 +36,7 @@ static int cache_seq_show(struct seq_file *file, void *iter) */ jump_to_uncached(); - ccr = __raw_readl(CCR); + ccr = __raw_readl(SH_CCR); if ((ccr & CCR_CACHE_ENABLE) == 0) { back_to_cached(); diff --git a/arch/sh/mm/cache-sh2.c b/arch/sh/mm/cache-sh2.c index defcf719f2e..a74259f2f98 100644 --- a/arch/sh/mm/cache-sh2.c +++ b/arch/sh/mm/cache-sh2.c @@ -63,9 +63,9 @@ static void sh2__flush_invalidate_region(void *start, int size) local_irq_save(flags); jump_to_uncached(); - ccr = __raw_readl(CCR); + ccr = __raw_readl(SH_CCR); ccr |= CCR_CACHE_INVALIDATE; - __raw_writel(ccr, CCR); + __raw_writel(ccr, SH_CCR); back_to_cached(); local_irq_restore(flags); diff --git a/arch/sh/mm/cache-sh2a.c b/arch/sh/mm/cache-sh2a.c index 949e2d3138a..ee87d081259 100644 --- a/arch/sh/mm/cache-sh2a.c +++ b/arch/sh/mm/cache-sh2a.c @@ -134,7 +134,8 @@ static void sh2a__flush_invalidate_region(void *start, int size) /* If there are too many pages then just blow the cache */ if (((end - begin) >> PAGE_SHIFT) >= MAX_OCACHE_PAGES) { - __raw_writel(__raw_readl(CCR) | CCR_OCACHE_INVALIDATE, CCR); + __raw_writel(__raw_readl(SH_CCR) | CCR_OCACHE_INVALIDATE, + SH_CCR); } else { for (v = begin; v < end; v += L1_CACHE_BYTES) sh2a_invalidate_line(CACHE_OC_ADDRESS_ARRAY, v); @@ -167,7 +168,8 @@ static void sh2a_flush_icache_range(void *args) /* I-Cache invalidate */ /* If there are too many pages then just blow the cache */ if (((end - start) >> PAGE_SHIFT) >= MAX_ICACHE_PAGES) { - __raw_writel(__raw_readl(CCR) | CCR_ICACHE_INVALIDATE, CCR); + __raw_writel(__raw_readl(SH_CCR) | CCR_ICACHE_INVALIDATE, + SH_CCR); } else { for (v = start; v < end; v += L1_CACHE_BYTES) sh2a_invalidate_line(CACHE_IC_ADDRESS_ARRAY, v); diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c index 0e529285b28..51d8f7f31d1 100644 --- a/arch/sh/mm/cache-sh4.c +++ b/arch/sh/mm/cache-sh4.c @@ -133,9 +133,9 @@ static void flush_icache_all(void) jump_to_uncached(); /* Flush I-cache */ - ccr = __raw_readl(CCR); + ccr = __raw_readl(SH_CCR); ccr |= CCR_CACHE_ICI; - __raw_writel(ccr, CCR); + __raw_writel(ccr, SH_CCR); /* * back_to_cached() will take care of the barrier for us, don't add diff --git a/arch/sh/mm/cache-shx3.c b/arch/sh/mm/cache-shx3.c index c0adbee97b5..24c58b7dc02 100644 --- a/arch/sh/mm/cache-shx3.c +++ b/arch/sh/mm/cache-shx3.c @@ -19,7 +19,7 @@ void __init shx3_cache_init(void) { unsigned int ccr; - ccr = __raw_readl(CCR); + ccr = __raw_readl(SH_CCR); /* * If we've got cache aliases, resolve them in hardware. @@ -40,5 +40,5 @@ void __init shx3_cache_init(void) ccr |= CCR_CACHE_IBE; #endif - writel_uncached(ccr, CCR); + writel_uncached(ccr, SH_CCR); } diff --git a/arch/sh/mm/cache.c b/arch/sh/mm/cache.c index 616966a96cb..097c2cdd117 100644 --- a/arch/sh/mm/cache.c +++ b/arch/sh/mm/cache.c @@ -285,8 +285,8 @@ void __init cpu_cache_init(void) { unsigned int cache_disabled = 0; -#ifdef CCR - cache_disabled = !(__raw_readl(CCR) & CCR_CACHE_ENABLE); +#ifdef SH_CCR + cache_disabled = !(__raw_readl(SH_CCR) & CCR_CACHE_ENABLE); #endif compute_alias(&boot_cpu_data.icache); diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c index 1fc25d85e51..541dc610150 100644 --- a/arch/sh/mm/fault.c +++ b/arch/sh/mm/fault.c @@ -58,11 +58,15 @@ static void show_pte(struct mm_struct *mm, unsigned long addr) { pgd_t *pgd; - if (mm) + if (mm) { pgd = mm->pgd; - else + } else { pgd = get_TTB(); + if (unlikely(!pgd)) + pgd = swapper_pg_dir; + } + printk(KERN_ALERT "pgd = %p\n", pgd); pgd += pgd_index(addr); printk(KERN_ALERT "[%08lx] *pgd=%0*Lx", addr, @@ -297,17 +301,6 @@ bad_area_access_error(struct pt_regs *regs, unsigned long error_code, __bad_area(regs, error_code, address, SEGV_ACCERR); } -static void out_of_memory(void) -{ - /* - * We ran out of memory, call the OOM killer, and return the userspace - * (which will retry the fault, or kill us if we got oom-killed): - */ - up_read(¤t->mm->mmap_sem); - - pagefault_out_of_memory(); -} - static void do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address) { @@ -349,8 +342,14 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code, no_context(regs, error_code, address); return 1; } + up_read(¤t->mm->mmap_sem); - out_of_memory(); + /* + * We ran out of memory, call the OOM killer, and return the + * userspace (which will retry the fault, or kill us if we got + * oom-killed): + */ + pagefault_out_of_memory(); } else { if (fault & VM_FAULT_SIGBUS) do_sigbus(regs, error_code, address); @@ -401,9 +400,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, struct mm_struct *mm; struct vm_area_struct * vma; int fault; - int write = error_code & FAULT_CODE_WRITE; - unsigned int flags = (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE | - (write ? FAULT_FLAG_WRITE : 0)); + unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; tsk = current; mm = tsk->mm; @@ -477,6 +474,11 @@ good_area: set_thread_fault_code(error_code); + if (user_mode(regs)) + flags |= FAULT_FLAG_USER; + if (error_code & FAULT_CODE_WRITE) + flags |= FAULT_FLAG_WRITE; + /* * If for any reason at all we couldn't handle the fault, * make sure we exit gracefully rather than endlessly redo @@ -500,6 +502,7 @@ good_area: } if (fault & VM_FAULT_RETRY) { flags &= ~FAULT_FLAG_ALLOW_RETRY; + flags |= FAULT_FLAG_TRIED; /* * No need to up_read(&mm->mmap_sem) as we would diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index 82cc576fab1..2d089fe2cba 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -231,7 +231,7 @@ static void __init bootmem_init_one_node(unsigned int nid) if (!p->node_spanned_pages) return; - end_pfn = p->node_start_pfn + p->node_spanned_pages; + end_pfn = pgdat_end_pfn(p); total_pages = bootmem_bootmap_pages(p->node_spanned_pages); @@ -407,32 +407,16 @@ unsigned int mem_init_done = 0; void __init mem_init(void) { - int codesize, datasize, initsize; - int nid; + pg_data_t *pgdat; iommu_init(); - num_physpages = 0; high_memory = NULL; + for_each_online_pgdat(pgdat) + high_memory = max_t(void *, high_memory, + __va(pgdat_end_pfn(pgdat) << PAGE_SHIFT)); - for_each_online_node(nid) { - pg_data_t *pgdat = NODE_DATA(nid); - unsigned long node_pages = 0; - void *node_high_memory; - - num_physpages += pgdat->node_present_pages; - - if (pgdat->node_spanned_pages) - node_pages = free_all_bootmem_node(pgdat); - - totalram_pages += node_pages; - - node_high_memory = (void *)__va((pgdat->node_start_pfn + - pgdat->node_spanned_pages) << - PAGE_SHIFT); - if (node_high_memory > high_memory) - high_memory = node_high_memory; - } + free_all_bootmem(); /* Set this up early, so we can take care of the zero page */ cpu_cache_init(); @@ -443,19 +427,8 @@ void __init mem_init(void) vsyscall_init(); - codesize = (unsigned long) &_etext - (unsigned long) &_text; - datasize = (unsigned long) &_edata - (unsigned long) &_etext; - initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; - - printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, " - "%dk data, %dk init)\n", - nr_free_pages() << (PAGE_SHIFT-10), - num_physpages << (PAGE_SHIFT-10), - codesize >> 10, - datasize >> 10, - initsize >> 10); - - printk(KERN_INFO "virtual kernel memory layout:\n" + mem_init_print_info(NULL); + pr_info("virtual kernel memory layout:\n" " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" #ifdef CONFIG_HIGHMEM " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n" @@ -501,31 +474,13 @@ void __init mem_init(void) void free_initmem(void) { - unsigned long addr; - - addr = (unsigned long)(&__init_begin); - for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) { - ClearPageReserved(virt_to_page(addr)); - init_page_count(virt_to_page(addr)); - free_page(addr); - totalram_pages++; - } - printk("Freeing unused kernel memory: %ldk freed\n", - ((unsigned long)&__init_end - - (unsigned long)&__init_begin) >> 10); + free_initmem_default(-1); } #ifdef CONFIG_BLK_DEV_INITRD void free_initrd_mem(unsigned long start, unsigned long end) { - unsigned long p; - for (p = start; p < end; p += PAGE_SIZE) { - ClearPageReserved(virt_to_page(p)); - init_page_count(virt_to_page(p)); - free_page(p); - totalram_pages++; - } - printk("Freeing initrd memory: %ldk freed\n", (end - start) >> 10); + free_reserved_area((void *)start, (void *)end, -1, "initrd"); } #endif @@ -558,4 +513,21 @@ int memory_add_physaddr_to_nid(u64 addr) EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid); #endif +#ifdef CONFIG_MEMORY_HOTREMOVE +int arch_remove_memory(u64 start, u64 size) +{ + unsigned long start_pfn = start >> PAGE_SHIFT; + unsigned long nr_pages = size >> PAGE_SHIFT; + struct zone *zone; + int ret; + + zone = page_zone(pfn_to_page(start_pfn)); + ret = __remove_pages(zone, start_pfn, nr_pages); + if (unlikely(ret)) + pr_warn("%s: Failed, __remove_pages() == %d\n", __func__, + ret); + + return ret; +} +#endif #endif /* CONFIG_MEMORY_HOTPLUG */ diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c index afeb710ec5c..6777177807c 100644 --- a/arch/sh/mm/mmap.c +++ b/arch/sh/mm/mmap.c @@ -30,25 +30,13 @@ static inline unsigned long COLOUR_ALIGN(unsigned long addr, return base + off; } -static inline unsigned long COLOUR_ALIGN_DOWN(unsigned long addr, - unsigned long pgoff) -{ - unsigned long base = addr & ~shm_align_mask; - unsigned long off = (pgoff << PAGE_SHIFT) & shm_align_mask; - - if (base + off <= addr) - return base + off; - - return base - off; -} - unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; - unsigned long start_addr; int do_colour_align; + struct vm_unmapped_area_info info; if (flags & MAP_FIXED) { /* We do not accept a shared mapping if it would violate @@ -79,47 +67,13 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, return addr; } - if (len > mm->cached_hole_size) { - start_addr = addr = mm->free_area_cache; - } else { - mm->cached_hole_size = 0; - start_addr = addr = TASK_UNMAPPED_BASE; - } - -full_search: - if (do_colour_align) - addr = COLOUR_ALIGN(addr, pgoff); - else - addr = PAGE_ALIGN(mm->free_area_cache); - - for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { - /* At this point: (!vma || addr < vma->vm_end). */ - if (unlikely(TASK_SIZE - len < addr)) { - /* - * Start a new search - just in case we missed - * some holes. - */ - if (start_addr != TASK_UNMAPPED_BASE) { - start_addr = addr = TASK_UNMAPPED_BASE; - mm->cached_hole_size = 0; - goto full_search; - } - return -ENOMEM; - } - if (likely(!vma || addr + len <= vma->vm_start)) { - /* - * Remember the place where we stopped the search: - */ - mm->free_area_cache = addr + len; - return addr; - } - if (addr + mm->cached_hole_size < vma->vm_start) - mm->cached_hole_size = vma->vm_start - addr; - - addr = vma->vm_end; - if (do_colour_align) - addr = COLOUR_ALIGN(addr, pgoff); - } + info.flags = 0; + info.length = len; + info.low_limit = TASK_UNMAPPED_BASE; + info.high_limit = TASK_SIZE; + info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0; + info.align_offset = pgoff << PAGE_SHIFT; + return vm_unmapped_area(&info); } unsigned long @@ -131,6 +85,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, struct mm_struct *mm = current->mm; unsigned long addr = addr0; int do_colour_align; + struct vm_unmapped_area_info info; if (flags & MAP_FIXED) { /* We do not accept a shared mapping if it would violate @@ -162,73 +117,27 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, return addr; } - /* check if free_area_cache is useful for us */ - if (len <= mm->cached_hole_size) { - mm->cached_hole_size = 0; - mm->free_area_cache = mm->mmap_base; - } - - /* either no address requested or can't fit in requested address hole */ - addr = mm->free_area_cache; - if (do_colour_align) { - unsigned long base = COLOUR_ALIGN_DOWN(addr-len, pgoff); + info.flags = VM_UNMAPPED_AREA_TOPDOWN; + info.length = len; + info.low_limit = PAGE_SIZE; + info.high_limit = mm->mmap_base; + info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0; + info.align_offset = pgoff << PAGE_SHIFT; + addr = vm_unmapped_area(&info); - addr = base + len; - } - - /* make sure it can fit in the remaining address space */ - if (likely(addr > len)) { - vma = find_vma(mm, addr-len); - if (!vma || addr <= vma->vm_start) { - /* remember the address as a hint for next time */ - return (mm->free_area_cache = addr-len); - } - } - - if (unlikely(mm->mmap_base < len)) - goto bottomup; - - addr = mm->mmap_base-len; - if (do_colour_align) - addr = COLOUR_ALIGN_DOWN(addr, pgoff); - - do { - /* - * Lookup failure means no vma is above this address, - * else if new region fits below vma->vm_start, - * return with success: - */ - vma = find_vma(mm, addr); - if (likely(!vma || addr+len <= vma->vm_start)) { - /* remember the address as a hint for next time */ - return (mm->free_area_cache = addr); - } - - /* remember the largest hole we saw so far */ - if (addr + mm->cached_hole_size < vma->vm_start) - mm->cached_hole_size = vma->vm_start - addr; - - /* try just below the current vma->vm_start */ - addr = vma->vm_start-len; - if (do_colour_align) - addr = COLOUR_ALIGN_DOWN(addr, pgoff); - } while (likely(len < vma->vm_start)); - -bottomup: /* * A failed mmap() very likely causes application failure, * so fall back to the bottom-up function here. This scenario * can happen with large stack limits and large mmap() * allocations. */ - mm->cached_hole_size = ~0UL; - mm->free_area_cache = TASK_UNMAPPED_BASE; - addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags); - /* - * Restore the topdown base: - */ - mm->free_area_cache = mm->mmap_base; - mm->cached_hole_size = ~0UL; + if (addr & ~PAGE_MASK) { + VM_BUG_ON(addr != -ENOMEM); + info.flags = 0; + info.low_limit = TASK_UNMAPPED_BASE; + info.high_limit = TASK_SIZE; + addr = vm_unmapped_area(&info); + } return addr; } @@ -238,7 +147,7 @@ bottomup: * You really shouldn't be using read() or write() on /dev/mem. This * might go away in the future. */ -int valid_phys_addr_range(unsigned long addr, size_t count) +int valid_phys_addr_range(phys_addr_t addr, size_t count) { if (addr < __MEMORY_START) return 0; diff --git a/arch/sh/mm/tlb-sh5.c b/arch/sh/mm/tlb-sh5.c index 3aea25dc431..e4bb2a8e0a6 100644 --- a/arch/sh/mm/tlb-sh5.c +++ b/arch/sh/mm/tlb-sh5.c @@ -17,7 +17,7 @@ /** * sh64_tlb_init - Perform initial setup for the DTLB and ITLB. */ -int __init sh64_tlb_init(void) +int sh64_tlb_init(void) { /* Assign some sane DTLB defaults */ cpu_data->dtlb.entries = 64; |
