diff options
Diffstat (limited to 'arch/arm/mm')
-rw-r--r-- | arch/arm/mm/Kconfig | 8 | ||||
-rw-r--r-- | arch/arm/mm/cache-fa.S | 12 | ||||
-rw-r--r-- | arch/arm/mm/cache-l2x0.c | 78 | ||||
-rw-r--r-- | arch/arm/mm/cache-v3.S | 10 | ||||
-rw-r--r-- | arch/arm/mm/cache-v4.S | 10 | ||||
-rw-r--r-- | arch/arm/mm/cache-v4wb.S | 12 | ||||
-rw-r--r-- | arch/arm/mm/cache-v4wt.S | 12 | ||||
-rw-r--r-- | arch/arm/mm/dma-mapping.c | 2 | ||||
-rw-r--r-- | arch/arm/mm/fault-armv.c | 32 | ||||
-rw-r--r-- | arch/arm/mm/highmem.c | 24 | ||||
-rw-r--r-- | arch/arm/mm/init.c | 155 | ||||
-rw-r--r-- | arch/arm/mm/mmu.c | 73 | ||||
-rw-r--r-- | arch/arm/mm/pgd.c | 4 | ||||
-rw-r--r-- | arch/arm/mm/proc-arm1020.S | 15 | ||||
-rw-r--r-- | arch/arm/mm/proc-arm1020e.S | 15 | ||||
-rw-r--r-- | arch/arm/mm/proc-arm1022.S | 15 | ||||
-rw-r--r-- | arch/arm/mm/proc-arm1026.S | 15 | ||||
-rw-r--r-- | arch/arm/mm/proc-arm920.S | 12 | ||||
-rw-r--r-- | arch/arm/mm/proc-arm922.S | 12 | ||||
-rw-r--r-- | arch/arm/mm/proc-arm925.S | 12 | ||||
-rw-r--r-- | arch/arm/mm/proc-arm926.S | 12 | ||||
-rw-r--r-- | arch/arm/mm/proc-arm940.S | 12 | ||||
-rw-r--r-- | arch/arm/mm/proc-arm946.S | 12 | ||||
-rw-r--r-- | arch/arm/mm/proc-feroceon.S | 13 | ||||
-rw-r--r-- | arch/arm/mm/proc-xsc3.S | 12 | ||||
-rw-r--r-- | arch/arm/mm/proc-xscale.S | 12 |
26 files changed, 498 insertions, 103 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index a0a2928ae4d..4414a01e1e8 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -779,6 +779,14 @@ config CACHE_L2X0 help This option enables the L2x0 PrimeCell. +config CACHE_PL310 + bool + depends on CACHE_L2X0 + default y if CPU_V7 && !CPU_V6 + help + This option enables optimisations for the PL310 cache + controller. + config CACHE_TAUROS2 bool "Enable the Tauros2 L2 cache controller" depends on (ARCH_DOVE || ARCH_MMP) diff --git a/arch/arm/mm/cache-fa.S b/arch/arm/mm/cache-fa.S index 7148e53e607..1fa6f71470d 100644 --- a/arch/arm/mm/cache-fa.S +++ b/arch/arm/mm/cache-fa.S @@ -38,6 +38,17 @@ #define CACHE_DLIMIT (CACHE_DSIZE * 2) /* + * flush_icache_all() + * + * Unconditionally clean and invalidate the entire icache. + */ +ENTRY(fa_flush_icache_all) + mov r0, #0 + mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache + mov pc, lr +ENDPROC(fa_flush_icache_all) + +/* * flush_user_cache_all() * * Clean and invalidate all cache entries in a particular address @@ -233,6 +244,7 @@ ENDPROC(fa_dma_unmap_area) .type fa_cache_fns, #object ENTRY(fa_cache_fns) + .long fa_flush_icache_all .long fa_flush_kern_cache_all .long fa_flush_user_cache_all .long fa_flush_user_cache_range diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index 9982eb385c0..170c9bb9586 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c @@ -28,14 +28,24 @@ static void __iomem *l2x0_base; static DEFINE_SPINLOCK(l2x0_lock); static uint32_t l2x0_way_mask; /* Bitmask of active ways */ +static uint32_t l2x0_size; -static inline void cache_wait(void __iomem *reg, unsigned long mask) +static inline void cache_wait_way(void __iomem *reg, unsigned long mask) { - /* wait for the operation to complete */ + /* wait for cache operation by line or way to complete */ while (readl_relaxed(reg) & mask) ; } +#ifdef CONFIG_CACHE_PL310 +static inline void cache_wait(void __iomem *reg, unsigned long mask) +{ + /* cache operations by line are atomic on PL310 */ +} +#else +#define cache_wait cache_wait_way +#endif + static inline void cache_sync(void) { void __iomem *base = l2x0_base; @@ -103,14 +113,40 @@ static void l2x0_cache_sync(void) spin_unlock_irqrestore(&l2x0_lock, flags); } -static inline void l2x0_inv_all(void) +static void l2x0_flush_all(void) +{ + unsigned long flags; + + /* clean all ways */ + spin_lock_irqsave(&l2x0_lock, flags); + writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_INV_WAY); + cache_wait_way(l2x0_base + L2X0_CLEAN_INV_WAY, l2x0_way_mask); + cache_sync(); + spin_unlock_irqrestore(&l2x0_lock, flags); +} + +static void l2x0_clean_all(void) +{ + unsigned long flags; + + /* clean all ways */ + spin_lock_irqsave(&l2x0_lock, flags); + writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_WAY); + cache_wait_way(l2x0_base + L2X0_CLEAN_WAY, l2x0_way_mask); + cache_sync(); + spin_unlock_irqrestore(&l2x0_lock, flags); +} + +static void l2x0_inv_all(void) { unsigned long flags; /* invalidate all ways */ spin_lock_irqsave(&l2x0_lock, flags); + /* Invalidating when L2 is enabled is a nono */ + BUG_ON(readl(l2x0_base + L2X0_CTRL) & 1); writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY); - cache_wait(l2x0_base + L2X0_INV_WAY, l2x0_way_mask); + cache_wait_way(l2x0_base + L2X0_INV_WAY, l2x0_way_mask); cache_sync(); spin_unlock_irqrestore(&l2x0_lock, flags); } @@ -159,6 +195,11 @@ static void l2x0_clean_range(unsigned long start, unsigned long end) void __iomem *base = l2x0_base; unsigned long flags; + if ((end - start) >= l2x0_size) { + l2x0_clean_all(); + return; + } + spin_lock_irqsave(&l2x0_lock, flags); start &= ~(CACHE_LINE_SIZE - 1); while (start < end) { @@ -184,6 +225,11 @@ static void l2x0_flush_range(unsigned long start, unsigned long end) void __iomem *base = l2x0_base; unsigned long flags; + if ((end - start) >= l2x0_size) { + l2x0_flush_all(); + return; + } + spin_lock_irqsave(&l2x0_lock, flags); start &= ~(CACHE_LINE_SIZE - 1); while (start < end) { @@ -206,10 +252,20 @@ static void l2x0_flush_range(unsigned long start, unsigned long end) spin_unlock_irqrestore(&l2x0_lock, flags); } +static void l2x0_disable(void) +{ + unsigned long flags; + + spin_lock_irqsave(&l2x0_lock, flags); + writel(0, l2x0_base + L2X0_CTRL); + spin_unlock_irqrestore(&l2x0_lock, flags); +} + void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask) { __u32 aux; __u32 cache_id; + __u32 way_size = 0; int ways; const char *type; @@ -244,6 +300,13 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask) l2x0_way_mask = (1 << ways) - 1; /* + * L2 cache Size = Way size * Number of ways + */ + way_size = (aux & L2X0_AUX_CTRL_WAY_SIZE_MASK) >> 17; + way_size = 1 << (way_size + 3); + l2x0_size = ways * way_size * SZ_1K; + + /* * Check if l2x0 controller is already enabled. * If you are booting from non-secure mode * accessing the below registers will fault. @@ -263,8 +326,11 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask) outer_cache.clean_range = l2x0_clean_range; outer_cache.flush_range = l2x0_flush_range; outer_cache.sync = l2x0_cache_sync; + outer_cache.flush_all = l2x0_flush_all; + outer_cache.inv_all = l2x0_inv_all; + outer_cache.disable = l2x0_disable; printk(KERN_INFO "%s cache controller enabled\n", type); - printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x\n", - ways, cache_id, aux); + printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n", + ways, cache_id, aux, l2x0_size); } diff --git a/arch/arm/mm/cache-v3.S b/arch/arm/mm/cache-v3.S index c2ff3c599fe..2e2bc406a18 100644 --- a/arch/arm/mm/cache-v3.S +++ b/arch/arm/mm/cache-v3.S @@ -13,6 +13,15 @@ #include "proc-macros.S" /* + * flush_icache_all() + * + * Unconditionally clean and invalidate the entire icache. + */ +ENTRY(v3_flush_icache_all) + mov pc, lr +ENDPROC(v3_flush_icache_all) + +/* * flush_user_cache_all() * * Invalidate all cache entries in a particular address @@ -122,6 +131,7 @@ ENDPROC(v3_dma_map_area) .type v3_cache_fns, #object ENTRY(v3_cache_fns) + .long v3_flush_icache_all .long v3_flush_kern_cache_all .long v3_flush_user_cache_all .long v3_flush_user_cache_range diff --git a/arch/arm/mm/cache-v4.S b/arch/arm/mm/cache-v4.S index 4810f7e3e81..a8fefb523f1 100644 --- a/arch/arm/mm/cache-v4.S +++ b/arch/arm/mm/cache-v4.S @@ -13,6 +13,15 @@ #include "proc-macros.S" /* + * flush_icache_all() + * + * Unconditionally clean and invalidate the entire icache. + */ +ENTRY(v4_flush_icache_all) + mov pc, lr +ENDPROC(v4_flush_icache_all) + +/* * flush_user_cache_all() * * Invalidate all cache entries in a particular address @@ -134,6 +143,7 @@ ENDPROC(v4_dma_map_area) .type v4_cache_fns, #object ENTRY(v4_cache_fns) + .long v4_flush_icache_all .long v4_flush_kern_cache_all .long v4_flush_user_cache_all .long v4_flush_user_cache_range diff --git a/arch/arm/mm/cache-v4wb.S b/arch/arm/mm/cache-v4wb.S index df8368afa10..d3644db467b 100644 --- a/arch/arm/mm/cache-v4wb.S +++ b/arch/arm/mm/cache-v4wb.S @@ -51,6 +51,17 @@ flush_base: .text /* + * flush_icache_all() + * + * Unconditionally clean and invalidate the entire icache. + */ +ENTRY(v4wb_flush_icache_all) + mov r0, #0 + mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache + mov pc, lr +ENDPROC(v4wb_flush_icache_all) + +/* * flush_user_cache_all() * * Clean and invalidate all cache entries in a particular address @@ -244,6 +255,7 @@ ENDPROC(v4wb_dma_unmap_area) .type v4wb_cache_fns, #object ENTRY(v4wb_cache_fns) + .long v4wb_flush_icache_all .long v4wb_flush_kern_cache_all .long v4wb_flush_user_cache_all .long v4wb_flush_user_cache_range diff --git a/arch/arm/mm/cache-v4wt.S b/arch/arm/mm/cache-v4wt.S index 45c70312f43..49c2b66cf3d 100644 --- a/arch/arm/mm/cache-v4wt.S +++ b/arch/arm/mm/cache-v4wt.S @@ -41,6 +41,17 @@ #define CACHE_DLIMIT 16384 /* + * flush_icache_all() + * + * Unconditionally clean and invalidate the entire icache. + */ +ENTRY(v4wt_flush_icache_all) + mov r0, #0 + mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache + mov pc, lr +ENDPROC(v4wt_flush_icache_all) + +/* * flush_user_cache_all() * * Invalidate all cache entries in a particular address @@ -188,6 +199,7 @@ ENDPROC(v4wt_dma_map_area) .type v4wt_cache_fns, #object ENTRY(v4wt_cache_fns) + .long v4wt_flush_icache_all .long v4wt_flush_kern_cache_all .long v4wt_flush_user_cache_all .long v4wt_flush_user_cache_range diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index e4dd0646e85..ac6a36142fc 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -198,7 +198,7 @@ __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot) * fragmentation of the DMA space, and also prevents allocations * smaller than a section from crossing a section boundary. */ - bit = fls(size - 1) + 1; + bit = fls(size - 1); if (bit > SECTION_SHIFT) bit = SECTION_SHIFT; align = 1 << bit; diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c index 8440d952ba6..83e59f87042 100644 --- a/arch/arm/mm/fault-armv.c +++ b/arch/arm/mm/fault-armv.c @@ -66,6 +66,30 @@ static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address, return ret; } +#if USE_SPLIT_PTLOCKS +/* + * If we are using split PTE locks, then we need to take the page + * lock here. Otherwise we are using shared mm->page_table_lock + * which is already locked, thus cannot take it. + */ +static inline void do_pte_lock(spinlock_t *ptl) +{ + /* + * Use nested version here to indicate that we are already + * holding one similar spinlock. + */ + spin_lock_nested(ptl, SINGLE_DEPTH_NESTING); +} + +static inline void do_pte_unlock(spinlock_t *ptl) +{ + spin_unlock(ptl); +} +#else /* !USE_SPLIT_PTLOCKS */ +static inline void do_pte_lock(spinlock_t *ptl) {} +static inline void do_pte_unlock(spinlock_t *ptl) {} +#endif /* USE_SPLIT_PTLOCKS */ + static int adjust_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn) { @@ -89,13 +113,13 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address, * open-code the spin-locking. */ ptl = pte_lockptr(vma->vm_mm, pmd); - pte = pte_offset_map_nested(pmd, address); - spin_lock(ptl); + pte = pte_offset_map(pmd, address); + do_pte_lock(ptl); ret = do_adjust_pte(vma, address, pfn, pte); - spin_unlock(ptl); - pte_unmap_nested(pte); + do_pte_unlock(ptl); + pte_unmap(pte); return ret; } diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c index 1fbdb55bfd1..c435fd9e1da 100644 --- a/arch/arm/mm/highmem.c +++ b/arch/arm/mm/highmem.c @@ -36,18 +36,17 @@ void kunmap(struct page *page) } EXPORT_SYMBOL(kunmap); -void *kmap_atomic(struct page *page, enum km_type type) +void *__kmap_atomic(struct page *page) { unsigned int idx; unsigned long vaddr; void *kmap; + int type; pagefault_disable(); if (!PageHighMem(page)) return page_address(page); - debug_kmap_atomic(type); - #ifdef CONFIG_DEBUG_HIGHMEM /* * There is no cache coherency issue when non VIVT, so force the @@ -61,6 +60,8 @@ void *kmap_atomic(struct page *page, enum km_type type) if (kmap) return kmap; + type = kmap_atomic_idx_push(); + idx = type + KM_TYPE_NR * smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); #ifdef CONFIG_DEBUG_HIGHMEM @@ -80,14 +81,17 @@ void *kmap_atomic(struct page *page, enum km_type type) return (void *)vaddr; } -EXPORT_SYMBOL(kmap_atomic); +EXPORT_SYMBOL(__kmap_atomic); -void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type) +void __kunmap_atomic(void *kvaddr) { unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; - unsigned int idx = type + KM_TYPE_NR * smp_processor_id(); + int idx, type; if (kvaddr >= (void *)FIXADDR_START) { + type = kmap_atomic_idx(); + idx = type + KM_TYPE_NR * smp_processor_id(); + if (cache_is_vivt()) __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); #ifdef CONFIG_DEBUG_HIGHMEM @@ -97,21 +101,23 @@ void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type) #else (void) idx; /* to kill a warning */ #endif + kmap_atomic_idx_pop(); } else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) { /* this address was obtained through kmap_high_get() */ kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)])); } pagefault_enable(); } -EXPORT_SYMBOL(kunmap_atomic_notypecheck); +EXPORT_SYMBOL(__kunmap_atomic); -void *kmap_atomic_pfn(unsigned long pfn, enum km_type type) +void *kmap_atomic_pfn(unsigned long pfn) { - unsigned int idx; unsigned long vaddr; + int idx, type; pagefault_disable(); + type = kmap_atomic_idx_push(); idx = type + KM_TYPE_NR * smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); #ifdef CONFIG_DEBUG_HIGHMEM diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 7fd9b5eb177..5164069ced4 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -18,6 +18,7 @@ #include <linux/highmem.h> #include <linux/gfp.h> #include <linux/memblock.h> +#include <linux/sort.h> #include <asm/mach-types.h> #include <asm/sections.h> @@ -121,9 +122,10 @@ void show_mem(void) printk("%d pages swap cached\n", cached); } -static void __init find_limits(struct meminfo *mi, - unsigned long *min, unsigned long *max_low, unsigned long *max_high) +static void __init find_limits(unsigned long *min, unsigned long *max_low, + unsigned long *max_high) { + struct meminfo *mi = &meminfo; int i; *min = -1UL; @@ -147,14 +149,13 @@ static void __init find_limits(struct meminfo *mi, } } -static void __init arm_bootmem_init(struct meminfo *mi, - unsigned long start_pfn, unsigned long end_pfn) +static void __init arm_bootmem_init(unsigned long start_pfn, + unsigned long end_pfn) { struct memblock_region *reg; unsigned int boot_pages; phys_addr_t bitmap; pg_data_t *pgdat; - int i; /* * Allocate the bootmem bitmap page. This must be in a region @@ -172,30 +173,39 @@ static void __init arm_bootmem_init(struct meminfo *mi, pgdat = NODE_DATA(0); init_bootmem_node(pgdat, __phys_to_pfn(bitmap), start_pfn, end_pfn); - for_each_bank(i, mi) { - struct membank *bank = &mi->bank[i]; - if (!bank->highmem) - free_bootmem(bank_phys_start(bank), bank_phys_size(bank)); + /* Free the lowmem regions from memblock into bootmem. */ + for_each_memblock(memory, reg) { + unsigned long start = memblock_region_memory_base_pfn(reg); + unsigned long end = memblock_region_memory_end_pfn(reg); + + if (end >= end_pfn) + end = end_pfn; + if (start >= end) + break; + + free_bootmem(__pfn_to_phys(start), (end - start) << PAGE_SHIFT); } - /* - * Reserve the memblock reserved regions in bootmem. - */ + /* Reserve the lowmem memblock reserved regions in bootmem. */ for_each_memblock(reserved, reg) { - phys_addr_t start = memblock_region_reserved_base_pfn(reg); - phys_addr_t end = memblock_region_reserved_end_pfn(reg); - if (start >= start_pfn && end <= end_pfn) - reserve_bootmem_node(pgdat, __pfn_to_phys(start), - (end - start) << PAGE_SHIFT, - BOOTMEM_DEFAULT); + unsigned long start = memblock_region_reserved_base_pfn(reg); + unsigned long end = memblock_region_reserved_end_pfn(reg); + + if (end >= end_pfn) + end = end_pfn; + if (start >= end) + break; + + reserve_bootmem(__pfn_to_phys(start), + (end - start) << PAGE_SHIFT, BOOTMEM_DEFAULT); } } -static void __init arm_bootmem_free(struct meminfo *mi, unsigned long min, - unsigned long max_low, unsigned long max_high) +static void __init arm_bootmem_free(unsigned long min, unsigned long max_low, + unsigned long max_high) { unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES]; - int i; + struct memblock_region *reg; /* * initialise the zones. @@ -217,13 +227,20 @@ static void __init arm_bootmem_free(struct meminfo *mi, unsigned long min, * holes = node_size - sum(bank_sizes) */ memcpy(zhole_size, zone_size, sizeof(zhole_size)); - for_each_bank(i, mi) { - int idx = 0; + for_each_memblock(memory, reg) { + unsigned long start = memblock_region_memory_base_pfn(reg); + unsigned long end = memblock_region_memory_end_pfn(reg); + + if (start < max_low) { + unsigned long low_end = min(end, max_low); + zhole_size[0] -= low_end - start; + } #ifdef CONFIG_HIGHMEM - if (mi->bank[i].highmem) - idx = ZONE_HIGHMEM; + if (end > max_low) { + unsigned long high_start = max(start, max_low); + zhole_size[ZONE_HIGHMEM] -= end - high_start; + } #endif - zhole_size[idx] -= bank_pfn_size(&mi->bank[i]); } /* @@ -256,10 +273,19 @@ static void arm_memory_present(void) } #endif +static int __init meminfo_cmp(const void *_a, const void *_b) +{ + const struct membank *a = _a, *b = _b; + long cmp = bank_pfn_start(a) - bank_pfn_start(b); + return cmp < 0 ? -1 : cmp > 0 ? 1 : 0; +} + void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc) { int i; + sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL); + memblock_init(); for (i = 0; i < mi->nr_banks; i++) memblock_add(mi->bank[i].start, mi->bank[i].size); @@ -292,14 +318,13 @@ void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc) void __init bootmem_init(void) { - struct meminfo *mi = &meminfo; unsigned long min, max_low, max_high; max_low = max_high = 0; - find_limits(mi, &min, &max_low, &max_high); + find_limits(&min, &max_low, &max_high); - arm_bootmem_init(mi, min, max_low); + arm_bootmem_init(min, max_low); /* * Sparsemem tries to allocate bootmem in memory_present(), @@ -317,7 +342,7 @@ void __init bootmem_init(void) * the sparse mem_map arrays initialized by sparse_init() * for memmap_init_zone(), otherwise all PFNs are invalid. */ - arm_bootmem_free(mi, min, max_low, max_high); + arm_bootmem_free(min, max_low, max_high); high_memory = __va((max_low << PAGE_SHIFT) - 1) + 1; @@ -411,6 +436,56 @@ static void __init free_unused_memmap(struct meminfo *mi) } } +static void __init free_highpages(void) +{ +#ifdef CONFIG_HIGHMEM + unsigned long max_low = max_low_pfn + PHYS_PFN_OFFSET; + struct memblock_region *mem, *res; + + /* set highmem page free */ + for_each_memblock(memory, mem) { + unsigned long start = memblock_region_memory_base_pfn(mem); + unsigned long end = memblock_region_memory_end_pfn(mem); + + /* Ignore complete lowmem entries */ + if (end <= max_low) + continue; + + /* Truncate partial highmem entries */ + if (start < max_low) + start = max_low; + + /* Find and exclude any reserved regions */ + for_each_memblock(reserved, res) { + unsigned long res_start, res_end; + + res_start = memblock_region_reserved_base_pfn(res); + res_end = memblock_region_reserved_end_pfn(res); + + if (res_end < start) + continue; + if (res_start < start) + res_start = start; + if (res_start > end) + res_start = end; + if (res_end > end) + res_end = end; + if (res_start != start) + totalhigh_pages += free_area(start, res_start, + NULL); + start = res_end; + if (start == end) + break; + } + + /* And now free anything which remains */ + if (start < end) + totalhigh_pages += free_area(start, end, NULL); + } + totalram_pages += totalhigh_pages; +#endif +} + /* * mem_init() marks the free areas in the mem_map and tells us how much * memory is free. This is done after various parts of the system have @@ -419,6 +494,7 @@ static void __init free_unused_memmap(struct meminfo *mi) void __init mem_init(void) { unsigned long reserved_pages, free_pages; + struct memblock_region *reg; int i; #ifdef CONFIG_HAVE_TCM /* These pointers are filled in on TCM detection */ @@ -439,16 +515,7 @@ void __init mem_init(void) __phys_to_pfn(__pa(swapper_pg_dir)), NULL); #endif -#ifdef CONFIG_HIGHMEM - /* set highmem page free */ - for_each_bank (i, &meminfo) { - unsigned long start = bank_pfn_start(&meminfo.bank[i]); - unsigned long end = bank_pfn_end(&meminfo.bank[i]); - if (start >= max_low_pfn + PHYS_PFN_OFFSET) - totalhigh_pages += free_area(start, end, NULL); - } - totalram_pages += totalhigh_pages; -#endif + free_highpages(); reserved_pages = free_pages = 0; @@ -478,9 +545,11 @@ void __init mem_init(void) */ printk(KERN_INFO "Memory:"); num_physpages = 0; - for (i = 0; i < meminfo.nr_banks; i++) { - num_physpages += bank_pfn_size(&meminfo.bank[i]); - printk(" %ldMB", bank_phys_size(&meminfo.bank[i]) >> 20); + for_each_memblock(memory, reg) { + unsigned long pages = memblock_region_memory_end_pfn(reg) - + memblock_region_memory_base_pfn(reg); + num_physpages += pages; + printk(" %ldMB", pages >> (20 - PAGE_SHIFT)); } printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT)); diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index c32f731d56d..72ad3e1f56c 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -14,7 +14,6 @@ #include <linux/mman.h> #include <linux/nodemask.h> #include <linux/memblock.h> -#include <linux/sort.h> #include <linux/fs.h> #include <asm/cputype.h> @@ -265,17 +264,17 @@ static struct mem_type mem_types[] = { .domain = DOMAIN_KERNEL, }, [MT_MEMORY_DTCM] = { - .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | - L_PTE_DIRTY | L_PTE_WRITE, - .prot_l1 = PMD_TYPE_TABLE, - .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN, - .domain = DOMAIN_KERNEL, + .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | + L_PTE_WRITE, + .prot_l1 = PMD_TYPE_TABLE, + .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN, + .domain = DOMAIN_KERNEL, }, [MT_MEMORY_ITCM] = { .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | - L_PTE_USER | L_PTE_EXEC, + L_PTE_WRITE | L_PTE_EXEC, .prot_l1 = PMD_TYPE_TABLE, - .domain = DOMAIN_IO, + .domain = DOMAIN_KERNEL, }, }; @@ -745,13 +744,14 @@ static int __init early_vmalloc(char *arg) } early_param("vmalloc", early_vmalloc); -phys_addr_t lowmem_end_addr; +static phys_addr_t lowmem_limit __initdata = 0; static void __init sanity_check_meminfo(void) { int i, j, highmem = 0; - lowmem_end_addr = __pa(vmalloc_min - 1) + 1; + lowmem_limit = __pa(vmalloc_min - 1) + 1; + memblock_set_current_limit(lowmem_limit); for (i = 0, j = 0; i < meminfo.nr_banks; i++) { struct membank *bank = &meminfo.bank[j]; @@ -852,6 +852,7 @@ static void __init sanity_check_meminfo(void) static inline void prepare_page_table(void) { unsigned long addr; + phys_addr_t end; /* * Clear out all the mappings below the kernel image. @@ -867,10 +868,17 @@ static inline void prepare_page_table(void) pmd_clear(pmd_off_k(addr)); /* + * Find the end of the first block of lowmem. + */ + end = memblock.memory.regions[0].base + memblock.memory.regions[0].size; + if (end >= lowmem_limit) + end = lowmem_limit; + + /* * Clear out all the kernel space mappings, except for the first * memory bank, up to the end of the vmalloc region. */ - for (addr = __phys_to_virt(bank_phys_end(&meminfo.bank[0])); + for (addr = __phys_to_virt(end); addr < VMALLOC_END; addr += PGDIR_SIZE) pmd_clear(pmd_off_k(addr)); } @@ -987,37 +995,28 @@ static void __init kmap_init(void) #endif } -static inline void map_memory_bank(struct membank *bank) -{ - struct map_desc map; - - map.pfn = bank_pfn_start(bank); - map.virtual = __phys_to_virt(bank_phys_start(bank)); - map.length = bank_phys_size(bank); - map.type = MT_MEMORY; - - create_mapping(&map); -} - static void __init map_lowmem(void) { - struct meminfo *mi = &meminfo; - int i; + struct memblock_region *reg; /* Map all the lowmem memory banks. */ - for (i = 0; i < mi->nr_banks; i++) { - struct membank *bank = &mi->bank[i]; + for_each_memblock(memory, reg) { + phys_addr_t start = reg->base; + phys_addr_t end = start + reg->size; + struct map_desc map; + + if (end > lowmem_limit) + end = lowmem_limit; + if (start >= end) + break; - if (!bank->highmem) - map_memory_bank(bank); - } -} + map.pfn = __phys_to_pfn(start); + map.virtual = __phys_to_virt(start); + map.length = end - start; + map.type = MT_MEMORY; -static int __init meminfo_cmp(const void *_a, const void *_b) -{ - const struct membank *a = _a, *b = _b; - long cmp = bank_pfn_start(a) - bank_pfn_start(b); - return cmp < 0 ? -1 : cmp > 0 ? 1 : 0; + create_mapping(&map); + } } /* @@ -1028,8 +1027,6 @@ void __init paging_init(struct machine_desc *mdesc) { void *zero_page; - sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL); - build_mem_type_table(); sanity_check_meminfo(); prepare_page_table(); diff --git a/arch/arm/mm/pgd.c b/arch/arm/mm/pgd.c index be5f58e153b..69bbfc6645a 100644 --- a/arch/arm/mm/pgd.c +++ b/arch/arm/mm/pgd.c @@ -57,9 +57,9 @@ pgd_t *get_pgd_slow(struct mm_struct *mm) goto no_pte; init_pmd = pmd_offset(init_pgd, 0); - init_pte = pte_offset_map_nested(init_pmd, 0); + init_pte = pte_offset_map(init_pmd, 0); set_pte_ext(new_pte, *init_pte, 0); - pte_unmap_nested(init_pte); + pte_unmap(init_pte); pte_unmap(new_pte); } diff --git a/arch/arm/mm/proc-arm1020.S b/arch/arm/mm/proc-arm1020.S index a6f5f8475b9..bcf748d9f4e 100644 --- a/arch/arm/mm/proc-arm1020.S +++ b/arch/arm/mm/proc-arm1020.S @@ -119,6 +119,20 @@ ENTRY(cpu_arm1020_do_idle) /* ================================= CACHE ================================ */ .align 5 + +/* + * flush_icache_all() + * + * Unconditionally clean and invalidate the entire icache. + */ +ENTRY(arm1020_flush_icache_all) +#ifndef CONFIG_CPU_ICACHE_DISABLE + mov r0, #0 + mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache +#endif + mov pc, lr +ENDPROC(arm1020_flush_icache_all) + /* * flush_user_cache_all() * @@ -351,6 +365,7 @@ ENTRY(arm1020_dma_unmap_area) ENDPROC(arm1020_dma_unmap_area) ENTRY(arm1020_cache_fns) + .long arm1020_flush_icache_all .long arm1020_flush_kern_cache_all .long arm1020_flush_user_cache_all .long arm1020_flush_user_cache_range diff --git a/arch/arm/mm/proc-arm1020e.S b/arch/arm/mm/proc-arm1020e.S index afc06b9c313..ab7ec26657e 100644 --- a/arch/arm/mm/proc-arm1020e.S +++ b/arch/arm/mm/proc-arm1020e.S @@ -119,6 +119,20 @@ ENTRY(cpu_arm1020e_do_idle) /* ================================= CACHE ================================ */ .align 5 + +/* + * flush_icache_all() + * + * Unconditionally clean and invalidate the entire icache. + */ +ENTRY(arm1020e_flush_icache_all) +#ifndef CONFIG_CPU_ICACHE_DISABLE + mov r0, #0 + mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache +#endif + mov pc, lr +ENDPROC(arm1020e_flush_icache_all) + /* * flush_user_cache_all() * @@ -337,6 +351,7 @@ ENTRY(arm1020e_dma_unmap_area) ENDPROC(arm1020e_dma_unmap_area) ENTRY(arm1020e_cache_fns) + .long arm1020e_flush_icache_all .long arm1020e_flush_kern_cache_all .long arm1020e_flush_user_cache_all .long arm1020e_flush_user_cache_range diff --git a/arch/arm/mm/proc-arm1022.S b/arch/arm/mm/proc-arm1022.S index 8915e0ba3fe..831c5e54e22 100644 --- a/arch/arm/mm/proc-arm1022.S +++ b/arch/arm/mm/proc-arm1022.S @@ -108,6 +108,20 @@ ENTRY(cpu_arm1022_do_idle) /* ================================= CACHE ================================ */ .align 5 + +/* + * flush_icache_all() + * + * Unconditionally clean and invalidate the entire icache. + */ +ENTRY(arm1022_flush_icache_all) +#ifndef CONFIG_CPU_ICACHE_DISABLE + mov r0, #0 + mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache +#endif + mov pc, lr +ENDPROC(arm1022_flush_icache_all) + /* * flush_user_cache_all() * @@ -326,6 +340,7 @@ ENTRY(arm1022_dma_unmap_area) ENDPROC(arm1022_dma_unmap_area) ENTRY(arm1022_cache_fns) + .long arm1022_flush_icache_all .long arm1022_flush_kern_cache_all .long arm1022_flush_user_cache_all .long arm1022_flush_user_cache_range diff --git a/arch/arm/mm/proc-arm1026.S b/arch/arm/mm/proc-arm1026.S index ff446c5d476..e3f7e9a166b 100644 --- a/arch/arm/mm/proc-arm1026.S +++ b/arch/arm/mm/proc-arm1026.S @@ -108,6 +108,20 @@ ENTRY(cpu_arm1026_do_idle) /* ================================= CACHE ================================ */ .align 5 + +/* + * flush_icache_all() + * + * Unconditionally clean and invalidate the entire icache. + */ +ENTRY(arm1026_flush_icache_all) +#ifndef CONFIG_CPU_ICACHE_DISABLE + mov r0, #0 + mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache +#endif + mov pc, lr +ENDPROC(arm1026_flush_icache_all) + /* * flush_user_cache_all() * @@ -320,6 +334,7 @@ ENTRY(arm1026_dma_unmap_area) ENDPROC(arm1026_dma_unmap_area) ENTRY(arm1026_cache_fns) + .long arm1026_flush_icache_all .long arm1026_flush_kern_cache_all .long arm1026_flush_user_cache_all .long arm1026_flush_user_cache_range diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S index fecf570939f..6109f278a90 100644 --- a/arch/arm/mm/proc-arm920.S +++ b/arch/arm/mm/proc-arm920.S @@ -110,6 +110,17 @@ ENTRY(cpu_arm920_do_idle) #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH /* + * flush_icache_all() + * + * Unconditionally clean and invalidate the entire icache. + */ +ENTRY(arm920_flush_icache_all) + mov r0, #0 + mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache + mov pc, lr +ENDPROC(arm920_flush_icache_all) + +/* * flush_user_cache_all() * * Invalidate all cache entries in a particular address @@ -305,6 +316,7 @@ ENTRY(arm920_dma_unmap_area) ENDPROC(arm920_dma_unmap_area) ENTRY(arm920_cache_fns) + .long arm920_flush_icache_all .long arm920_flush_kern_cache_all .long arm920_flush_user_cache_all .long arm920_flush_user_cache_range diff --git a/arch/arm/mm/proc-arm922.S b/arch/arm/mm/proc-arm922.S index e3cbf87c948..bb2f0f46a5e 100644 --- a/arch/arm/mm/proc-arm922.S +++ b/arch/arm/mm/proc-arm922.S @@ -112,6 +112,17 @@ ENTRY(cpu_arm922_do_idle) #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH /* + * flush_icache_all() + * + * Unconditionally clean and invalidate the entire icache. + */ +ENTRY(arm922_flush_icache_all) + mov r0, #0 + mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache + mov pc, lr +ENDPROC(arm922_flush_icache_all) + +/* * flush_user_cache_all() * * Clean and invalidate all cache entries in a particular @@ -307,6 +318,7 @@ ENTRY(arm922_dma_unmap_area) ENDPROC(arm922_dma_unmap_area) ENTRY(arm922_cache_fns) + .long arm922_flush_icache_all .long arm922_flush_kern_cache_all .long arm922_flush_user_cache_all .long arm922_flush_user_cache_range diff --git a/arch/arm/mm/proc-arm925.S b/arch/arm/mm/proc-arm925.S index 572424c867b..c13e01accfe 100644 --- a/arch/arm/mm/proc-arm925.S +++ b/arch/arm/mm/proc-arm925.S @@ -145,6 +145,17 @@ ENTRY(cpu_arm925_do_idle) mov pc, lr /* + * flush_icache_all() + * + * Unconditionally clean and invalidate the entire icache. + */ +ENTRY(arm925_flush_icache_all) + mov r0, #0 + mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache + mov pc, lr +ENDPROC(arm925_flush_icache_all) + +/* * flush_user_cache_all() * * Clean and invalidate all cache entries in a particular @@ -362,6 +373,7 @@ ENTRY(arm925_dma_unmap_area) ENDPROC(arm925_dma_unmap_area) ENTRY(arm925_cache_fns) + .long arm925_flush_icache_all .long arm925_flush_kern_cache_all .long arm925_flush_user_cache_all .long arm925_flush_user_cache_range diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S index 63d168b4ebe..42eb4315740 100644 --- a/arch/arm/mm/proc-arm926.S +++ b/arch/arm/mm/proc-arm926.S @@ -111,6 +111,17 @@ ENTRY(cpu_arm926_do_idle) mov pc, lr /* + * flush_icache_all() + * + * Unconditionally clean and invalidate the entire icache. + */ +ENTRY(arm926_flush_icache_all) + mov r0, #0 + mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache + mov pc, lr +ENDPROC(arm926_flush_icache_all) + +/* * flush_user_cache_all() * * Clean and invalidate all cache entries in a particular @@ -325,6 +336,7 @@ ENTRY(arm926_dma_unmap_area) ENDPROC(arm926_dma_unmap_area) ENTRY(arm926_cache_fns) + .long arm926_flush_icache_all .long arm926_flush_kern_cache_all .long arm926_flush_user_cache_all .long arm926_flush_user_cache_range diff --git a/arch/arm/mm/proc-arm940.S b/arch/arm/mm/proc-arm940.S index f6a62822418..7b11cdb9935 100644 --- a/arch/arm/mm/proc-arm940.S +++ b/arch/arm/mm/proc-arm940.S @@ -68,6 +68,17 @@ ENTRY(cpu_arm940_do_idle) mov pc, lr /* + * flush_icache_all() + * + * Unconditionally clean and invalidate the entire icache. + */ +ENTRY(arm940_flush_icache_all) + mov r0, #0 + mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache + mov pc, lr +ENDPROC(arm940_flush_icache_all) + +/* * flush_user_cache_all() */ ENTRY(arm940_flush_user_cache_all) @@ -254,6 +265,7 @@ ENTRY(arm940_dma_unmap_area) ENDPROC(arm940_dma_unmap_area) ENTRY(arm940_cache_fns) + .long arm940_flush_icache_all .long arm940_flush_kern_cache_all .long arm940_flush_user_cache_all .long arm940_flush_user_cache_range diff --git a/arch/arm/mm/proc-arm946.S b/arch/arm/mm/proc-arm946.S index ea2e7f2eb95..1a5bbf08034 100644 --- a/arch/arm/mm/proc-arm946.S +++ b/arch/arm/mm/proc-arm946.S @@ -75,6 +75,17 @@ ENTRY(cpu_arm946_do_idle) mov pc, lr /* + * flush_icache_all() + * + * Unconditionally clean and invalidate the entire icache. + */ +ENTRY(arm946_flush_icache_all) + mov r0, #0 + mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache + mov pc, lr +ENDPROC(arm946_flush_icache_all) + +/* * flush_user_cache_all() */ ENTRY(arm946_flush_user_cache_all) @@ -296,6 +307,7 @@ ENTRY(arm946_dma_unmap_area) ENDPROC(arm946_dma_unmap_area) ENTRY(arm946_cache_fns) + .long arm946_flush_icache_all .long arm946_flush_kern_cache_all .long arm946_flush_user_cache_all .long arm946_flush_user_cache_range diff --git a/arch/arm/mm/proc-feroceon.S b/arch/arm/mm/proc-feroceon.S index 578da69200c..b4597edbff9 100644 --- a/arch/arm/mm/proc-feroceon.S +++ b/arch/arm/mm/proc-feroceon.S @@ -124,6 +124,17 @@ ENTRY(cpu_feroceon_do_idle) mov pc, lr /* + * flush_icache_all() + * + * Unconditionally clean and invalidate the entire icache. + */ +ENTRY(feroceon_flush_icache_all) + mov r0, #0 + mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache + mov pc, lr +ENDPROC(feroceon_flush_icache_all) + +/* * flush_user_cache_all() * * Clean and invalidate all cache entries in a particular @@ -401,6 +412,7 @@ ENTRY(feroceon_dma_unmap_area) ENDPROC(feroceon_dma_unmap_area) ENTRY(feroceon_cache_fns) + .long feroceon_flush_icache_all .long feroceon_flush_kern_cache_all .long feroceon_flush_user_cache_all .long feroceon_flush_user_cache_range @@ -412,6 +424,7 @@ ENTRY(feroceon_cache_fns) .long feroceon_dma_flush_range ENTRY(feroceon_range_cache_fns) + .long feroceon_flush_icache_all .long feroceon_flush_kern_cache_all .long feroceon_flush_user_cache_all .long feroceon_flush_user_cache_range diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S index cad07e40304..ec26355cb7c 100644 --- a/arch/arm/mm/proc-xsc3.S +++ b/arch/arm/mm/proc-xsc3.S @@ -141,6 +141,17 @@ ENTRY(cpu_xsc3_do_idle) /* ================================= CACHE ================================ */ /* + * flush_icache_all() + * + * Unconditionally clean and invalidate the entire icache. + */ +ENTRY(xsc3_flush_icache_all) + mov r0, #0 + mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache + mov pc, lr +ENDPROC(xsc3_flush_icache_all) + +/* * flush_user_cache_all() * * Invalidate all cache entries in a particular address @@ -325,6 +336,7 @@ ENTRY(xsc3_dma_unmap_area) ENDPROC(xsc3_dma_unmap_area) ENTRY(xsc3_cache_fns) + .long xsc3_flush_icache_all .long xsc3_flush_kern_cache_all .long xsc3_flush_user_cache_all .long xsc3_flush_user_cache_range diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S index cb245edb2c2..523408c0bb3 100644 --- a/arch/arm/mm/proc-xscale.S +++ b/arch/arm/mm/proc-xscale.S @@ -181,6 +181,17 @@ ENTRY(cpu_xscale_do_idle) /* ================================= CACHE ================================ */ /* + * flush_icache_all() + * + * Unconditionally clean and invalidate the entire icache. + */ +ENTRY(xscale_flush_icache_all) + mov r0, #0 + mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache + mov pc, lr +ENDPROC(xscale_flush_icache_all) + +/* * flush_user_cache_all() * * Invalidate all cache entries in a particular address @@ -397,6 +408,7 @@ ENTRY(xscale_dma_unmap_area) ENDPROC(xscale_dma_unmap_area) ENTRY(xscale_cache_fns) + .long xscale_flush_icache_all .long xscale_flush_kern_cache_all .long xscale_flush_user_cache_all .long xscale_flush_user_cache_range |