diff options
Diffstat (limited to 'arch/mn10300/mm')
30 files changed, 3162 insertions, 701 deletions
diff --git a/arch/mn10300/mm/Kconfig.cache b/arch/mn10300/mm/Kconfig.cache new file mode 100644 index 00000000000..bfbe52691f2 --- /dev/null +++ b/arch/mn10300/mm/Kconfig.cache @@ -0,0 +1,147 @@ +# +# MN10300 CPU cache options +# + +choice + prompt "CPU Caching mode" + default MN10300_CACHE_WBACK + help + This option determines the caching mode for the kernel. + + Write-Back caching mode involves the all reads and writes causing + the affected cacheline to be read into the cache first before being + operated upon. Memory is not then updated by a write until the cache + is filled and a cacheline needs to be displaced from the cache to + make room. Only at that point is it written back. + + Write-Through caching only fetches cachelines from memory on a + read. Writes always get written directly to memory. If the affected + cacheline is also in cache, it will be updated too. + + The final option is to turn of caching entirely. + +config MN10300_CACHE_WBACK + bool "Write-Back" + help + The dcache operates in delayed write-back mode. It must be manually + flushed if writes are made that subsequently need to be executed or + to be DMA'd by a device. + +config MN10300_CACHE_WTHRU + bool "Write-Through" + help + The dcache operates in immediate write-through mode. Writes are + committed to RAM immediately in addition to being stored in the + cache. This means that the written data is immediately available for + execution or DMA. + + This is not available for use with an SMP kernel if cache flushing + and invalidation by automatic purge register is not selected. + +config MN10300_CACHE_DISABLED + bool "Disabled" + help + The icache and dcache are disabled. + +endchoice + +config MN10300_CACHE_ENABLED + def_bool y if !MN10300_CACHE_DISABLED + + +choice + prompt "CPU cache flush/invalidate method" + default MN10300_CACHE_MANAGE_BY_TAG if !AM34_2 + default MN10300_CACHE_MANAGE_BY_REG if AM34_2 + depends on MN10300_CACHE_ENABLED + help + This determines the method by which CPU cache flushing and + invalidation is performed. + +config MN10300_CACHE_MANAGE_BY_TAG + bool "Use the cache tag registers directly" + depends on !(SMP && MN10300_CACHE_WTHRU) + +config MN10300_CACHE_MANAGE_BY_REG + bool "Flush areas by way of automatic purge registers (AM34 only)" + depends on AM34_2 + +endchoice + +config MN10300_CACHE_INV_BY_TAG + def_bool y if MN10300_CACHE_MANAGE_BY_TAG && MN10300_CACHE_ENABLED + +config MN10300_CACHE_INV_BY_REG + def_bool y if MN10300_CACHE_MANAGE_BY_REG && MN10300_CACHE_ENABLED + +config MN10300_CACHE_FLUSH_BY_TAG + def_bool y if MN10300_CACHE_MANAGE_BY_TAG && MN10300_CACHE_WBACK + +config MN10300_CACHE_FLUSH_BY_REG + def_bool y if MN10300_CACHE_MANAGE_BY_REG && MN10300_CACHE_WBACK + + +config MN10300_HAS_CACHE_SNOOP + def_bool n + +config MN10300_CACHE_SNOOP + bool "Use CPU Cache Snooping" + depends on MN10300_CACHE_ENABLED && MN10300_HAS_CACHE_SNOOP + default y + +config MN10300_CACHE_FLUSH_ICACHE + def_bool y if MN10300_CACHE_WBACK && !MN10300_CACHE_SNOOP + help + Set if we need the dcache flushing before the icache is invalidated. + +config MN10300_CACHE_INV_ICACHE + def_bool y if MN10300_CACHE_WTHRU && !MN10300_CACHE_SNOOP + help + Set if we need the icache to be invalidated, even if the dcache is in + write-through mode and doesn't need flushing. + +# +# The kernel debugger gets its own separate cache flushing functions +# +config MN10300_DEBUGGER_CACHE_FLUSH_BY_TAG + def_bool y if KERNEL_DEBUGGER && \ + MN10300_CACHE_WBACK && \ + !MN10300_CACHE_SNOOP && \ + MN10300_CACHE_MANAGE_BY_TAG + help + Set if the debugger needs to flush the dcache and invalidate the + icache using the cache tag registers to make breakpoints work. + +config MN10300_DEBUGGER_CACHE_FLUSH_BY_REG + def_bool y if KERNEL_DEBUGGER && \ + MN10300_CACHE_WBACK && \ + !MN10300_CACHE_SNOOP && \ + MN10300_CACHE_MANAGE_BY_REG + help + Set if the debugger needs to flush the dcache and invalidate the + icache using automatic purge registers to make breakpoints work. + +config MN10300_DEBUGGER_CACHE_INV_BY_TAG + def_bool y if KERNEL_DEBUGGER && \ + MN10300_CACHE_WTHRU && \ + !MN10300_CACHE_SNOOP && \ + MN10300_CACHE_MANAGE_BY_TAG + help + Set if the debugger needs to invalidate the icache using the cache + tag registers to make breakpoints work. + +config MN10300_DEBUGGER_CACHE_INV_BY_REG + def_bool y if KERNEL_DEBUGGER && \ + MN10300_CACHE_WTHRU && \ + !MN10300_CACHE_SNOOP && \ + MN10300_CACHE_MANAGE_BY_REG + help + Set if the debugger needs to invalidate the icache using automatic + purge registers to make breakpoints work. + +config MN10300_DEBUGGER_CACHE_NO_FLUSH + def_bool y if KERNEL_DEBUGGER && \ + (MN10300_CACHE_DISABLED || MN10300_CACHE_SNOOP) + help + Set if the debugger does not need to flush the dcache and/or + invalidate the icache to make breakpoints work. diff --git a/arch/mn10300/mm/Makefile b/arch/mn10300/mm/Makefile index 28b9d983db0..11f38466ac2 100644 --- a/arch/mn10300/mm/Makefile +++ b/arch/mn10300/mm/Makefile @@ -2,13 +2,30 @@ # Makefile for the MN10300-specific memory management code # +cache-smp-wback-$(CONFIG_MN10300_CACHE_WBACK) := cache-smp-flush.o + +cacheflush-y := cache.o +cacheflush-$(CONFIG_SMP) += cache-smp.o cache-smp-inv.o $(cache-smp-wback-y) +cacheflush-$(CONFIG_MN10300_CACHE_INV_ICACHE) += cache-inv-icache.o +cacheflush-$(CONFIG_MN10300_CACHE_FLUSH_ICACHE) += cache-flush-icache.o +cacheflush-$(CONFIG_MN10300_CACHE_INV_BY_TAG) += cache-inv-by-tag.o +cacheflush-$(CONFIG_MN10300_CACHE_INV_BY_REG) += cache-inv-by-reg.o +cacheflush-$(CONFIG_MN10300_CACHE_FLUSH_BY_TAG) += cache-flush-by-tag.o +cacheflush-$(CONFIG_MN10300_CACHE_FLUSH_BY_REG) += cache-flush-by-reg.o + +cacheflush-$(CONFIG_MN10300_DEBUGGER_CACHE_FLUSH_BY_TAG) += \ + cache-dbg-flush-by-tag.o cache-dbg-inv-by-tag.o +cacheflush-$(CONFIG_MN10300_DEBUGGER_CACHE_FLUSH_BY_REG) += \ + cache-dbg-flush-by-reg.o +cacheflush-$(CONFIG_MN10300_DEBUGGER_CACHE_INV_BY_TAG) += \ + cache-dbg-inv-by-tag.o cache-dbg-inv.o +cacheflush-$(CONFIG_MN10300_DEBUGGER_CACHE_INV_BY_REG) += \ + cache-dbg-inv-by-reg.o cache-dbg-inv.o + +cacheflush-$(CONFIG_MN10300_CACHE_DISABLED) := cache-disabled.o + obj-y := \ init.o fault.o pgtable.o extable.o tlb-mn10300.o mmu-context.o \ - misalignment.o dma-alloc.o - -ifneq ($(CONFIG_MN10300_CACHE_DISABLED),y) -obj-y += cache.o cache-mn10300.o -ifeq ($(CONFIG_MN10300_CACHE_WBACK),y) -obj-y += cache-flush-mn10300.o -endif -endif + misalignment.o dma-alloc.o $(cacheflush-y) + +obj-$(CONFIG_SMP) += tlb-smp.o diff --git a/arch/mn10300/mm/cache-dbg-flush-by-reg.S b/arch/mn10300/mm/cache-dbg-flush-by-reg.S new file mode 100644 index 00000000000..a775ea5d7ce --- /dev/null +++ b/arch/mn10300/mm/cache-dbg-flush-by-reg.S @@ -0,0 +1,160 @@ +/* MN10300 CPU cache invalidation routines, using automatic purge registers + * + * Copyright (C) 2011 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ +#include <linux/sys.h> +#include <linux/linkage.h> +#include <asm/smp.h> +#include <asm/page.h> +#include <asm/cache.h> +#include <asm/irqflags.h> +#include <asm/cacheflush.h> +#include "cache.inc" + + .am33_2 + +############################################################################### +# +# void debugger_local_cache_flushinv(void) +# Flush the entire data cache back to RAM and invalidate the icache +# +############################################################################### + ALIGN + .globl debugger_local_cache_flushinv + .type debugger_local_cache_flushinv,@function +debugger_local_cache_flushinv: + # + # firstly flush the dcache + # + movhu (CHCTR),d0 + btst CHCTR_DCEN|CHCTR_ICEN,d0 + beq debugger_local_cache_flushinv_end + + mov DCPGCR,a0 + + mov epsw,d1 + and ~EPSW_IE,epsw + or EPSW_NMID,epsw + nop + + btst CHCTR_DCEN,d0 + beq debugger_local_cache_flushinv_no_dcache + + # wait for busy bit of area purge + setlb + mov (a0),d0 + btst DCPGCR_DCPGBSY,d0 + lne + + # set mask + clr d0 + mov d0,(DCPGMR) + + # area purge + # + # DCPGCR = DCPGCR_DCP + # + mov DCPGCR_DCP,d0 + mov d0,(a0) + + # wait for busy bit of area purge + setlb + mov (a0),d0 + btst DCPGCR_DCPGBSY,d0 + lne + +debugger_local_cache_flushinv_no_dcache: + # + # secondly, invalidate the icache if it is enabled + # + mov CHCTR,a0 + movhu (a0),d0 + btst CHCTR_ICEN,d0 + beq debugger_local_cache_flushinv_done + + invalidate_icache 0 + +debugger_local_cache_flushinv_done: + mov d1,epsw + +debugger_local_cache_flushinv_end: + ret [],0 + .size debugger_local_cache_flushinv,.-debugger_local_cache_flushinv + +############################################################################### +# +# void debugger_local_cache_flushinv_one(u8 *addr) +# +# Invalidate one particular cacheline if it's in the icache +# +############################################################################### + ALIGN + .globl debugger_local_cache_flushinv_one + .type debugger_local_cache_flushinv_one,@function +debugger_local_cache_flushinv_one: + movhu (CHCTR),d1 + btst CHCTR_DCEN|CHCTR_ICEN,d1 + beq debugger_local_cache_flushinv_one_end + btst CHCTR_DCEN,d1 + beq debugger_local_cache_flushinv_one_no_dcache + + # round cacheline addr down + and L1_CACHE_TAG_MASK,d0 + mov d0,a1 + mov d0,d1 + + # determine the dcache purge control reg address + mov DCACHE_PURGE(0,0),a0 + and L1_CACHE_TAG_ENTRY,d0 + add d0,a0 + + # retain valid entries in the cache + or L1_CACHE_TAG_VALID,d1 + + # conditionally purge this line in all ways + mov d1,(L1_CACHE_WAYDISP*0,a0) + +debugger_local_cache_flushinv_one_no_dcache: + # + # now try to flush the icache + # + mov CHCTR,a0 + movhu (a0),d0 + btst CHCTR_ICEN,d0 + beq debugger_local_cache_flushinv_one_end + + LOCAL_CLI_SAVE(d1) + + mov ICIVCR,a0 + + # wait for the invalidator to quiesce + setlb + mov (a0),d0 + btst ICIVCR_ICIVBSY,d0 + lne + + # set the mask + mov L1_CACHE_TAG_MASK,d0 + mov d0,(ICIVMR) + + # invalidate the cache line at the given address + or ICIVCR_ICI,a1 + mov a1,(a0) + + # wait for the invalidator to quiesce again + setlb + mov (a0),d0 + btst ICIVCR_ICIVBSY,d0 + lne + + LOCAL_IRQ_RESTORE(d1) + +debugger_local_cache_flushinv_one_end: + ret [],0 + .size debugger_local_cache_flushinv_one,.-debugger_local_cache_flushinv_one diff --git a/arch/mn10300/mm/cache-dbg-flush-by-tag.S b/arch/mn10300/mm/cache-dbg-flush-by-tag.S new file mode 100644 index 00000000000..bf56930e6e7 --- /dev/null +++ b/arch/mn10300/mm/cache-dbg-flush-by-tag.S @@ -0,0 +1,114 @@ +/* MN10300 CPU cache invalidation routines, using direct tag flushing + * + * Copyright (C) 2011 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ +#include <linux/sys.h> +#include <linux/linkage.h> +#include <asm/smp.h> +#include <asm/page.h> +#include <asm/cache.h> +#include <asm/irqflags.h> +#include <asm/cacheflush.h> +#include "cache.inc" + + .am33_2 + +############################################################################### +# +# void debugger_local_cache_flushinv(void) +# +# Flush the entire data cache back to RAM and invalidate the icache +# +############################################################################### + ALIGN + .globl debugger_local_cache_flushinv + .type debugger_local_cache_flushinv,@function +debugger_local_cache_flushinv: + # + # firstly flush the dcache + # + movhu (CHCTR),d0 + btst CHCTR_DCEN|CHCTR_ICEN,d0 + beq debugger_local_cache_flushinv_end + + btst CHCTR_DCEN,d0 + beq debugger_local_cache_flushinv_no_dcache + + # read the addresses tagged in the cache's tag RAM and attempt to flush + # those addresses specifically + # - we rely on the hardware to filter out invalid tag entry addresses + mov DCACHE_TAG(0,0),a0 # dcache tag RAM access address + mov DCACHE_PURGE(0,0),a1 # dcache purge request address + mov L1_CACHE_NWAYS*L1_CACHE_NENTRIES,e0 # total number of entries + +mn10300_local_dcache_flush_loop: + mov (a0),d0 + and L1_CACHE_TAG_MASK,d0 + or L1_CACHE_TAG_VALID,d0 # retain valid entries in the + # cache + mov d0,(a1) # conditional purge + + add L1_CACHE_BYTES,a0 + add L1_CACHE_BYTES,a1 + add -1,e0 + bne mn10300_local_dcache_flush_loop + +debugger_local_cache_flushinv_no_dcache: + # + # secondly, invalidate the icache if it is enabled + # + mov CHCTR,a0 + movhu (a0),d0 + btst CHCTR_ICEN,d0 + beq debugger_local_cache_flushinv_end + + invalidate_icache 1 + +debugger_local_cache_flushinv_end: + ret [],0 + .size debugger_local_cache_flushinv,.-debugger_local_cache_flushinv + +############################################################################### +# +# void debugger_local_cache_flushinv_one(u8 *addr) +# +# Invalidate one particular cacheline if it's in the icache +# +############################################################################### + ALIGN + .globl debugger_local_cache_flushinv_one + .type debugger_local_cache_flushinv_one,@function +debugger_local_cache_flushinv_one: + movhu (CHCTR),d1 + btst CHCTR_DCEN|CHCTR_ICEN,d1 + beq debugger_local_cache_flushinv_one_end + btst CHCTR_DCEN,d1 + beq debugger_local_cache_flushinv_one_icache + + # round cacheline addr down + and L1_CACHE_TAG_MASK,d0 + mov d0,a1 + + # determine the dcache purge control reg address + mov DCACHE_PURGE(0,0),a0 + and L1_CACHE_TAG_ENTRY,d0 + add d0,a0 + + # retain valid entries in the cache + or L1_CACHE_TAG_VALID,a1 + + # conditionally purge this line in all ways + mov a1,(L1_CACHE_WAYDISP*0,a0) + + # now go and do the icache + bra debugger_local_cache_flushinv_one_icache + +debugger_local_cache_flushinv_one_end: + ret [],0 + .size debugger_local_cache_flushinv_one,.-debugger_local_cache_flushinv_one diff --git a/arch/mn10300/mm/cache-dbg-inv-by-reg.S b/arch/mn10300/mm/cache-dbg-inv-by-reg.S new file mode 100644 index 00000000000..c4e6252941b --- /dev/null +++ b/arch/mn10300/mm/cache-dbg-inv-by-reg.S @@ -0,0 +1,69 @@ +/* MN10300 CPU cache invalidation routines, using automatic purge registers + * + * Copyright (C) 2011 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ +#include <linux/sys.h> +#include <linux/linkage.h> +#include <asm/cache.h> +#include <asm/irqflags.h> +#include <asm/cacheflush.h> +#include "cache.inc" + + .am33_2 + + .globl debugger_local_cache_flushinv_one + +############################################################################### +# +# void debugger_local_cache_flushinv_one(u8 *addr) +# +# Invalidate one particular cacheline if it's in the icache +# +############################################################################### + ALIGN + .globl debugger_local_cache_flushinv_one + .type debugger_local_cache_flushinv_one,@function +debugger_local_cache_flushinv_one: + mov d0,a1 + + mov CHCTR,a0 + movhu (a0),d0 + btst CHCTR_ICEN,d0 + beq mn10300_local_icache_inv_range_reg_end + + LOCAL_CLI_SAVE(d1) + + mov ICIVCR,a0 + + # wait for the invalidator to quiesce + setlb + mov (a0),d0 + btst ICIVCR_ICIVBSY,d0 + lne + + # set the mask + mov ~L1_CACHE_TAG_MASK,d0 + mov d0,(ICIVMR) + + # invalidate the cache line at the given address + and ~L1_CACHE_TAG_MASK,a1 + or ICIVCR_ICI,a1 + mov a1,(a0) + + # wait for the invalidator to quiesce again + setlb + mov (a0),d0 + btst ICIVCR_ICIVBSY,d0 + lne + + LOCAL_IRQ_RESTORE(d1) + +mn10300_local_icache_inv_range_reg_end: + ret [],0 + .size debugger_local_cache_flushinv_one,.-debugger_local_cache_flushinv_one diff --git a/arch/mn10300/mm/cache-dbg-inv-by-tag.S b/arch/mn10300/mm/cache-dbg-inv-by-tag.S new file mode 100644 index 00000000000..d8ec821e5f8 --- /dev/null +++ b/arch/mn10300/mm/cache-dbg-inv-by-tag.S @@ -0,0 +1,120 @@ +/* MN10300 CPU cache invalidation routines, using direct tag flushing + * + * Copyright (C) 2011 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ +#include <linux/sys.h> +#include <linux/linkage.h> +#include <asm/smp.h> +#include <asm/page.h> +#include <asm/cache.h> +#include <asm/irqflags.h> +#include <asm/cacheflush.h> +#include "cache.inc" + + .am33_2 + + .globl debugger_local_cache_flushinv_one_icache + +############################################################################### +# +# void debugger_local_cache_flushinv_one(u8 *addr) +# +# Invalidate one particular cacheline if it's in the icache +# +############################################################################### + ALIGN + .globl debugger_local_cache_flushinv_one_icache + .type debugger_local_cache_flushinv_one_icache,@function +debugger_local_cache_flushinv_one_icache: + movm [d3,a2],(sp) + + mov CHCTR,a2 + movhu (a2),d0 + btst CHCTR_ICEN,d0 + beq debugger_local_cache_flushinv_one_icache_end + + mov d0,a1 + and L1_CACHE_TAG_MASK,a1 + + # read the tags from the tag RAM, and if they indicate a matching valid + # cache line then we invalidate that line + mov ICACHE_TAG(0,0),a0 + mov a1,d0 + and L1_CACHE_TAG_ENTRY,d0 + add d0,a0 # starting icache tag RAM + # access address + + and ~(L1_CACHE_DISPARITY-1),a1 # determine comparator base + or L1_CACHE_TAG_VALID,a1 + mov L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_VALID,d1 + + LOCAL_CLI_SAVE(d3) + + # disable the icache + movhu (a2),d0 + and ~CHCTR_ICEN,d0 + movhu d0,(a2) + + # and wait for it to calm down + setlb + movhu (a2),d0 + btst CHCTR_ICBUSY,d0 + lne + + # check all the way tags for this cache entry + mov (a0),d0 # read the tag in the way 0 slot + xor a1,d0 + and d1,d0 + beq debugger_local_icache_kill # jump if matched + + add L1_CACHE_WAYDISP,a0 + mov (a0),d0 # read the tag in the way 1 slot + xor a1,d0 + and d1,d0 + beq debugger_local_icache_kill # jump if matched + + add L1_CACHE_WAYDISP,a0 + mov (a0),d0 # read the tag in the way 2 slot + xor a1,d0 + and d1,d0 + beq debugger_local_icache_kill # jump if matched + + add L1_CACHE_WAYDISP,a0 + mov (a0),d0 # read the tag in the way 3 slot + xor a1,d0 + and d1,d0 + bne debugger_local_icache_finish # jump if not matched + +debugger_local_icache_kill: + mov d0,(a0) # kill the tag (D0 is 0 at this point) + +debugger_local_icache_finish: + # wait for the cache to finish what it's doing + setlb + movhu (a2),d0 + btst CHCTR_ICBUSY,d0 + lne + + # and reenable it + or CHCTR_ICEN,d0 + movhu d0,(a2) + movhu (a2),d0 + + # re-enable interrupts + LOCAL_IRQ_RESTORE(d3) + +debugger_local_cache_flushinv_one_icache_end: + ret [d3,a2],8 + .size debugger_local_cache_flushinv_one_icache,.-debugger_local_cache_flushinv_one_icache + +#ifdef CONFIG_MN10300_DEBUGGER_CACHE_INV_BY_TAG + .globl debugger_local_cache_flushinv_one + .type debugger_local_cache_flushinv_one,@function +debugger_local_cache_flushinv_one = debugger_local_cache_flushinv_one_icache +#endif diff --git a/arch/mn10300/mm/cache-dbg-inv.S b/arch/mn10300/mm/cache-dbg-inv.S new file mode 100644 index 00000000000..eba2d6dca06 --- /dev/null +++ b/arch/mn10300/mm/cache-dbg-inv.S @@ -0,0 +1,47 @@ +/* MN10300 CPU cache invalidation routines + * + * Copyright (C) 2011 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ +#include <linux/sys.h> +#include <linux/linkage.h> +#include <asm/smp.h> +#include <asm/page.h> +#include <asm/cache.h> +#include <asm/irqflags.h> +#include <asm/cacheflush.h> +#include "cache.inc" + + .am33_2 + + .globl debugger_local_cache_flushinv + +############################################################################### +# +# void debugger_local_cache_flushinv(void) +# +# Invalidate the entire icache +# +############################################################################### + ALIGN + .globl debugger_local_cache_flushinv + .type debugger_local_cache_flushinv,@function +debugger_local_cache_flushinv: + # + # we only need to invalidate the icache in this cache mode + # + mov CHCTR,a0 + movhu (a0),d0 + btst CHCTR_ICEN,d0 + beq debugger_local_cache_flushinv_end + + invalidate_icache 1 + +debugger_local_cache_flushinv_end: + ret [],0 + .size debugger_local_cache_flushinv,.-debugger_local_cache_flushinv diff --git a/arch/mn10300/mm/cache-disabled.c b/arch/mn10300/mm/cache-disabled.c new file mode 100644 index 00000000000..f669ea42aba --- /dev/null +++ b/arch/mn10300/mm/cache-disabled.c @@ -0,0 +1,21 @@ +/* Handle the cache being disabled + * + * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ +#include <linux/mm.h> + +/* + * allow userspace to flush the instruction cache + */ +asmlinkage long sys_cacheflush(unsigned long start, unsigned long end) +{ + if (end < start) + return -EINVAL; + return 0; +} diff --git a/arch/mn10300/mm/cache-flush-by-reg.S b/arch/mn10300/mm/cache-flush-by-reg.S new file mode 100644 index 00000000000..1dcae021167 --- /dev/null +++ b/arch/mn10300/mm/cache-flush-by-reg.S @@ -0,0 +1,308 @@ +/* MN10300 CPU core caching routines, using indirect regs on cache controller + * + * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ + +#include <linux/sys.h> +#include <linux/linkage.h> +#include <asm/smp.h> +#include <asm/page.h> +#include <asm/cache.h> +#include <asm/irqflags.h> + + .am33_2 + +#ifndef CONFIG_SMP + .globl mn10300_dcache_flush + .globl mn10300_dcache_flush_page + .globl mn10300_dcache_flush_range + .globl mn10300_dcache_flush_range2 + .globl mn10300_dcache_flush_inv + .globl mn10300_dcache_flush_inv_page + .globl mn10300_dcache_flush_inv_range + .globl mn10300_dcache_flush_inv_range2 + +mn10300_dcache_flush = mn10300_local_dcache_flush +mn10300_dcache_flush_page = mn10300_local_dcache_flush_page +mn10300_dcache_flush_range = mn10300_local_dcache_flush_range +mn10300_dcache_flush_range2 = mn10300_local_dcache_flush_range2 +mn10300_dcache_flush_inv = mn10300_local_dcache_flush_inv +mn10300_dcache_flush_inv_page = mn10300_local_dcache_flush_inv_page +mn10300_dcache_flush_inv_range = mn10300_local_dcache_flush_inv_range +mn10300_dcache_flush_inv_range2 = mn10300_local_dcache_flush_inv_range2 + +#endif /* !CONFIG_SMP */ + +############################################################################### +# +# void mn10300_local_dcache_flush(void) +# Flush the entire data cache back to RAM +# +############################################################################### + ALIGN + .globl mn10300_local_dcache_flush + .type mn10300_local_dcache_flush,@function +mn10300_local_dcache_flush: + movhu (CHCTR),d0 + btst CHCTR_DCEN,d0 + beq mn10300_local_dcache_flush_end + + mov DCPGCR,a0 + + LOCAL_CLI_SAVE(d1) + + # wait for busy bit of area purge + setlb + mov (a0),d0 + btst DCPGCR_DCPGBSY,d0 + lne + + # set mask + clr d0 + mov d0,(DCPGMR) + + # area purge + # + # DCPGCR = DCPGCR_DCP + # + mov DCPGCR_DCP,d0 + mov d0,(a0) + + # wait for busy bit of area purge + setlb + mov (a0),d0 + btst DCPGCR_DCPGBSY,d0 + lne + + LOCAL_IRQ_RESTORE(d1) + +mn10300_local_dcache_flush_end: + ret [],0 + .size mn10300_local_dcache_flush,.-mn10300_local_dcache_flush + +############################################################################### +# +# void mn10300_local_dcache_flush_page(unsigned long start) +# void mn10300_local_dcache_flush_range(unsigned long start, unsigned long end) +# void mn10300_local_dcache_flush_range2(unsigned long start, unsigned long size) +# Flush a range of addresses on a page in the dcache +# +############################################################################### + ALIGN + .globl mn10300_local_dcache_flush_page + .globl mn10300_local_dcache_flush_range + .globl mn10300_local_dcache_flush_range2 + .type mn10300_local_dcache_flush_page,@function + .type mn10300_local_dcache_flush_range,@function + .type mn10300_local_dcache_flush_range2,@function +mn10300_local_dcache_flush_page: + and ~(PAGE_SIZE-1),d0 + mov PAGE_SIZE,d1 +mn10300_local_dcache_flush_range2: + add d0,d1 +mn10300_local_dcache_flush_range: + movm [d2,d3,a2],(sp) + + movhu (CHCTR),d2 + btst CHCTR_DCEN,d2 + beq mn10300_local_dcache_flush_range_end + + # calculate alignsize + # + # alignsize = L1_CACHE_BYTES; + # for (i = (end - start - 1) / L1_CACHE_BYTES ; i > 0; i >>= 1) + # alignsize <<= 1; + # d2 = alignsize; + # + mov L1_CACHE_BYTES,d2 + sub d0,d1,d3 + add -1,d3 + lsr L1_CACHE_SHIFT,d3 + beq 2f +1: + add d2,d2 + lsr 1,d3 + bne 1b +2: + mov d1,a1 # a1 = end + + LOCAL_CLI_SAVE(d3) + mov DCPGCR,a0 + + # wait for busy bit of area purge + setlb + mov (a0),d1 + btst DCPGCR_DCPGBSY,d1 + lne + + # determine the mask + mov d2,d1 + add -1,d1 + not d1 # d1 = mask = ~(alignsize-1) + mov d1,(DCPGMR) + + and d1,d0,a2 # a2 = mask & start + +dcpgloop: + # area purge + mov a2,d0 + or DCPGCR_DCP,d0 + mov d0,(a0) # DCPGCR = (mask & start) | DCPGCR_DCP + + # wait for busy bit of area purge + setlb + mov (a0),d1 + btst DCPGCR_DCPGBSY,d1 + lne + + # check purge of end address + add d2,a2 # a2 += alignsize + cmp a1,a2 # if (a2 < end) goto dcpgloop + bns dcpgloop + + LOCAL_IRQ_RESTORE(d3) + +mn10300_local_dcache_flush_range_end: + ret [d2,d3,a2],12 + + .size mn10300_local_dcache_flush_page,.-mn10300_local_dcache_flush_page + .size mn10300_local_dcache_flush_range,.-mn10300_local_dcache_flush_range + .size mn10300_local_dcache_flush_range2,.-mn10300_local_dcache_flush_range2 + +############################################################################### +# +# void mn10300_local_dcache_flush_inv(void) +# Flush the entire data cache and invalidate all entries +# +############################################################################### + ALIGN + .globl mn10300_local_dcache_flush_inv + .type mn10300_local_dcache_flush_inv,@function +mn10300_local_dcache_flush_inv: + movhu (CHCTR),d0 + btst CHCTR_DCEN,d0 + beq mn10300_local_dcache_flush_inv_end + + mov DCPGCR,a0 + + LOCAL_CLI_SAVE(d1) + + # wait for busy bit of area purge & invalidate + setlb + mov (a0),d0 + btst DCPGCR_DCPGBSY,d0 + lne + + # set the mask to cover everything + clr d0 + mov d0,(DCPGMR) + + # area purge & invalidate + mov DCPGCR_DCP|DCPGCR_DCI,d0 + mov d0,(a0) + + # wait for busy bit of area purge & invalidate + setlb + mov (a0),d0 + btst DCPGCR_DCPGBSY,d0 + lne + + LOCAL_IRQ_RESTORE(d1) + +mn10300_local_dcache_flush_inv_end: + ret [],0 + .size mn10300_local_dcache_flush_inv,.-mn10300_local_dcache_flush_inv + +############################################################################### +# +# void mn10300_local_dcache_flush_inv_page(unsigned long start) +# void mn10300_local_dcache_flush_inv_range(unsigned long start, unsigned long end) +# void mn10300_local_dcache_flush_inv_range2(unsigned long start, unsigned long size) +# Flush and invalidate a range of addresses on a page in the dcache +# +############################################################################### + ALIGN + .globl mn10300_local_dcache_flush_inv_page + .globl mn10300_local_dcache_flush_inv_range + .globl mn10300_local_dcache_flush_inv_range2 + .type mn10300_local_dcache_flush_inv_page,@function + .type mn10300_local_dcache_flush_inv_range,@function + .type mn10300_local_dcache_flush_inv_range2,@function +mn10300_local_dcache_flush_inv_page: + and ~(PAGE_SIZE-1),d0 + mov PAGE_SIZE,d1 +mn10300_local_dcache_flush_inv_range2: + add d0,d1 +mn10300_local_dcache_flush_inv_range: + movm [d2,d3,a2],(sp) + + movhu (CHCTR),d2 + btst CHCTR_DCEN,d2 + beq mn10300_local_dcache_flush_inv_range_end + + # calculate alignsize + # + # alignsize = L1_CACHE_BYTES; + # for (i = (end - start - 1) / L1_CACHE_BYTES; i > 0; i >>= 1) + # alignsize <<= 1; + # d2 = alignsize + # + mov L1_CACHE_BYTES,d2 + sub d0,d1,d3 + add -1,d3 + lsr L1_CACHE_SHIFT,d3 + beq 2f +1: + add d2,d2 + lsr 1,d3 + bne 1b +2: + mov d1,a1 # a1 = end + + LOCAL_CLI_SAVE(d3) + mov DCPGCR,a0 + + # wait for busy bit of area purge & invalidate + setlb + mov (a0),d1 + btst DCPGCR_DCPGBSY,d1 + lne + + # set the mask + mov d2,d1 + add -1,d1 + not d1 # d1 = mask = ~(alignsize-1) + mov d1,(DCPGMR) + + and d1,d0,a2 # a2 = mask & start + +dcpgivloop: + # area purge & invalidate + mov a2,d0 + or DCPGCR_DCP|DCPGCR_DCI,d0 + mov d0,(a0) # DCPGCR = (mask & start)|DCPGCR_DCP|DCPGCR_DCI + + # wait for busy bit of area purge & invalidate + setlb + mov (a0),d1 + btst DCPGCR_DCPGBSY,d1 + lne + + # check purge & invalidate of end address + add d2,a2 # a2 += alignsize + cmp a1,a2 # if (a2 < end) goto dcpgivloop + bns dcpgivloop + + LOCAL_IRQ_RESTORE(d3) + +mn10300_local_dcache_flush_inv_range_end: + ret [d2,d3,a2],12 + .size mn10300_local_dcache_flush_inv_page,.-mn10300_local_dcache_flush_inv_page + .size mn10300_local_dcache_flush_inv_range,.-mn10300_local_dcache_flush_inv_range + .size mn10300_local_dcache_flush_inv_range2,.-mn10300_local_dcache_flush_inv_range2 diff --git a/arch/mn10300/mm/cache-flush-by-tag.S b/arch/mn10300/mm/cache-flush-by-tag.S new file mode 100644 index 00000000000..1ddc0684924 --- /dev/null +++ b/arch/mn10300/mm/cache-flush-by-tag.S @@ -0,0 +1,250 @@ +/* MN10300 CPU core caching routines, using direct tag flushing + * + * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ + +#include <linux/sys.h> +#include <linux/linkage.h> +#include <asm/smp.h> +#include <asm/page.h> +#include <asm/cache.h> +#include <asm/irqflags.h> + + .am33_2 + +#ifndef CONFIG_SMP + .globl mn10300_dcache_flush + .globl mn10300_dcache_flush_page + .globl mn10300_dcache_flush_range + .globl mn10300_dcache_flush_range2 + .globl mn10300_dcache_flush_inv + .globl mn10300_dcache_flush_inv_page + .globl mn10300_dcache_flush_inv_range + .globl mn10300_dcache_flush_inv_range2 + +mn10300_dcache_flush = mn10300_local_dcache_flush +mn10300_dcache_flush_page = mn10300_local_dcache_flush_page +mn10300_dcache_flush_range = mn10300_local_dcache_flush_range +mn10300_dcache_flush_range2 = mn10300_local_dcache_flush_range2 +mn10300_dcache_flush_inv = mn10300_local_dcache_flush_inv +mn10300_dcache_flush_inv_page = mn10300_local_dcache_flush_inv_page +mn10300_dcache_flush_inv_range = mn10300_local_dcache_flush_inv_range +mn10300_dcache_flush_inv_range2 = mn10300_local_dcache_flush_inv_range2 + +#endif /* !CONFIG_SMP */ + +############################################################################### +# +# void mn10300_local_dcache_flush(void) +# Flush the entire data cache back to RAM +# +############################################################################### + ALIGN + .globl mn10300_local_dcache_flush + .type mn10300_local_dcache_flush,@function +mn10300_local_dcache_flush: + movhu (CHCTR),d0 + btst CHCTR_DCEN,d0 + beq mn10300_local_dcache_flush_end + + # read the addresses tagged in the cache's tag RAM and attempt to flush + # those addresses specifically + # - we rely on the hardware to filter out invalid tag entry addresses + mov DCACHE_TAG(0,0),a0 # dcache tag RAM access address + mov DCACHE_PURGE(0,0),a1 # dcache purge request address + mov L1_CACHE_NWAYS*L1_CACHE_NENTRIES,d1 # total number of entries + +mn10300_local_dcache_flush_loop: + mov (a0),d0 + and L1_CACHE_TAG_MASK,d0 + or L1_CACHE_TAG_VALID,d0 # retain valid entries in the + # cache + mov d0,(a1) # conditional purge + + add L1_CACHE_BYTES,a0 + add L1_CACHE_BYTES,a1 + add -1,d1 + bne mn10300_local_dcache_flush_loop + +mn10300_local_dcache_flush_end: + ret [],0 + .size mn10300_local_dcache_flush,.-mn10300_local_dcache_flush + +############################################################################### +# +# void mn10300_local_dcache_flush_page(unsigned long start) +# void mn10300_local_dcache_flush_range(unsigned long start, unsigned long end) +# void mn10300_local_dcache_flush_range2(unsigned long start, unsigned long size) +# Flush a range of addresses on a page in the dcache +# +############################################################################### + ALIGN + .globl mn10300_local_dcache_flush_page + .globl mn10300_local_dcache_flush_range + .globl mn10300_local_dcache_flush_range2 + .type mn10300_local_dcache_flush_page,@function + .type mn10300_local_dcache_flush_range,@function + .type mn10300_local_dcache_flush_range2,@function +mn10300_local_dcache_flush_page: + and ~(PAGE_SIZE-1),d0 + mov PAGE_SIZE,d1 +mn10300_local_dcache_flush_range2: + add d0,d1 +mn10300_local_dcache_flush_range: + movm [d2],(sp) + + movhu (CHCTR),d2 + btst CHCTR_DCEN,d2 + beq mn10300_local_dcache_flush_range_end + + sub d0,d1,a0 + cmp MN10300_DCACHE_FLUSH_BORDER,a0 + ble 1f + + movm (sp),[d2] + bra mn10300_local_dcache_flush +1: + + # round start addr down + and L1_CACHE_TAG_MASK,d0 + mov d0,a1 + + add L1_CACHE_BYTES,d1 # round end addr up + and L1_CACHE_TAG_MASK,d1 + + # write a request to flush all instances of an address from the cache + mov DCACHE_PURGE(0,0),a0 + mov a1,d0 + and L1_CACHE_TAG_ENTRY,d0 + add d0,a0 # starting dcache purge control + # reg address + + sub a1,d1 + lsr L1_CACHE_SHIFT,d1 # total number of entries to + # examine + + or L1_CACHE_TAG_VALID,a1 # retain valid entries in the + # cache + +mn10300_local_dcache_flush_range_loop: + mov a1,(L1_CACHE_WAYDISP*0,a0) # conditionally purge this line + # all ways + + add L1_CACHE_BYTES,a0 + add L1_CACHE_BYTES,a1 + and ~L1_CACHE_WAYDISP,a0 # make sure way stay on way 0 + add -1,d1 + bne mn10300_local_dcache_flush_range_loop + +mn10300_local_dcache_flush_range_end: + ret [d2],4 + + .size mn10300_local_dcache_flush_page,.-mn10300_local_dcache_flush_page + .size mn10300_local_dcache_flush_range,.-mn10300_local_dcache_flush_range + .size mn10300_local_dcache_flush_range2,.-mn10300_local_dcache_flush_range2 + +############################################################################### +# +# void mn10300_local_dcache_flush_inv(void) +# Flush the entire data cache and invalidate all entries +# +############################################################################### + ALIGN + .globl mn10300_local_dcache_flush_inv + .type mn10300_local_dcache_flush_inv,@function +mn10300_local_dcache_flush_inv: + movhu (CHCTR),d0 + btst CHCTR_DCEN,d0 + beq mn10300_local_dcache_flush_inv_end + + mov L1_CACHE_NENTRIES,d1 + clr a1 + +mn10300_local_dcache_flush_inv_loop: + mov (DCACHE_PURGE_WAY0(0),a1),d0 # unconditional purge + mov (DCACHE_PURGE_WAY1(0),a1),d0 # unconditional purge + mov (DCACHE_PURGE_WAY2(0),a1),d0 # unconditional purge + mov (DCACHE_PURGE_WAY3(0),a1),d0 # unconditional purge + + add L1_CACHE_BYTES,a1 + add -1,d1 + bne mn10300_local_dcache_flush_inv_loop + +mn10300_local_dcache_flush_inv_end: + ret [],0 + .size mn10300_local_dcache_flush_inv,.-mn10300_local_dcache_flush_inv + +############################################################################### +# +# void mn10300_local_dcache_flush_inv_page(unsigned long start) +# void mn10300_local_dcache_flush_inv_range(unsigned long start, unsigned long end) +# void mn10300_local_dcache_flush_inv_range2(unsigned long start, unsigned long size) +# Flush and invalidate a range of addresses on a page in the dcache +# +############################################################################### + ALIGN + .globl mn10300_local_dcache_flush_inv_page + .globl mn10300_local_dcache_flush_inv_range + .globl mn10300_local_dcache_flush_inv_range2 + .type mn10300_local_dcache_flush_inv_page,@function + .type mn10300_local_dcache_flush_inv_range,@function + .type mn10300_local_dcache_flush_inv_range2,@function +mn10300_local_dcache_flush_inv_page: + and ~(PAGE_SIZE-1),d0 + mov PAGE_SIZE,d1 +mn10300_local_dcache_flush_inv_range2: + add d0,d1 +mn10300_local_dcache_flush_inv_range: + movm [d2],(sp) + + movhu (CHCTR),d2 + btst CHCTR_DCEN,d2 + beq mn10300_local_dcache_flush_inv_range_end + + sub d0,d1,a0 + cmp MN10300_DCACHE_FLUSH_INV_BORDER,a0 + ble 1f + + movm (sp),[d2] + bra mn10300_local_dcache_flush_inv +1: + + and L1_CACHE_TAG_MASK,d0 # round start addr down + mov d0,a1 + + add L1_CACHE_BYTES,d1 # round end addr up + and L1_CACHE_TAG_MASK,d1 + + # write a request to flush and invalidate all instances of an address + # from the cache + mov DCACHE_PURGE(0,0),a0 + mov a1,d0 + and L1_CACHE_TAG_ENTRY,d0 + add d0,a0 # starting dcache purge control + # reg address + + sub a1,d1 + lsr L1_CACHE_SHIFT,d1 # total number of entries to + # examine + +mn10300_local_dcache_flush_inv_range_loop: + mov a1,(L1_CACHE_WAYDISP*0,a0) # conditionally purge this line + # in all ways + + add L1_CACHE_BYTES,a0 + add L1_CACHE_BYTES,a1 + and ~L1_CACHE_WAYDISP,a0 # make sure way stay on way 0 + add -1,d1 + bne mn10300_local_dcache_flush_inv_range_loop + +mn10300_local_dcache_flush_inv_range_end: + ret [d2],4 + .size mn10300_local_dcache_flush_inv_page,.-mn10300_local_dcache_flush_inv_page + .size mn10300_local_dcache_flush_inv_range,.-mn10300_local_dcache_flush_inv_range + .size mn10300_local_dcache_flush_inv_range2,.-mn10300_local_dcache_flush_inv_range2 diff --git a/arch/mn10300/mm/cache-flush-icache.c b/arch/mn10300/mm/cache-flush-icache.c new file mode 100644 index 00000000000..fdb1a9db20f --- /dev/null +++ b/arch/mn10300/mm/cache-flush-icache.c @@ -0,0 +1,155 @@ +/* Flush dcache and invalidate icache when the dcache is in writeback mode + * + * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ +#include <linux/module.h> +#include <linux/mm.h> +#include <asm/cacheflush.h> +#include <asm/smp.h> +#include "cache-smp.h" + +/** + * flush_icache_page - Flush a page from the dcache and invalidate the icache + * @vma: The VMA the page is part of. + * @page: The page to be flushed. + * + * Write a page back from the dcache and invalidate the icache so that we can + * run code from it that we've just written into it + */ +void flush_icache_page(struct vm_area_struct *vma, struct page *page) +{ + unsigned long start = page_to_phys(page); + unsigned long flags; + + flags = smp_lock_cache(); + + mn10300_local_dcache_flush_page(start); + mn10300_local_icache_inv_page(start); + + smp_cache_call(SMP_IDCACHE_INV_FLUSH_RANGE, start, start + PAGE_SIZE); + smp_unlock_cache(flags); +} +EXPORT_SYMBOL(flush_icache_page); + +/** + * flush_icache_page_range - Flush dcache and invalidate icache for part of a + * single page + * @start: The starting virtual address of the page part. + * @end: The ending virtual address of the page part. + * + * Flush the dcache and invalidate the icache for part of a single page, as + * determined by the virtual addresses given. The page must be in the paged + * area. + */ +static void flush_icache_page_range(unsigned long start, unsigned long end) +{ + unsigned long addr, size, off; + struct page *page; + pgd_t *pgd; + pud_t *pud; + pmd_t *pmd; + pte_t *ppte, pte; + + /* work out how much of the page to flush */ + off = start & ~PAGE_MASK; + size = end - start; + + /* get the physical address the page is mapped to from the page + * tables */ + pgd = pgd_offset(current->mm, start); + if (!pgd || !pgd_val(*pgd)) + return; + + pud = pud_offset(pgd, start); + if (!pud || !pud_val(*pud)) + return; + + pmd = pmd_offset(pud, start); + if (!pmd || !pmd_val(*pmd)) + return; + + ppte = pte_offset_map(pmd, start); + if (!ppte) + return; + pte = *ppte; + pte_unmap(ppte); + + if (pte_none(pte)) + return; + + page = pte_page(pte); + if (!page) + return; + + addr = page_to_phys(page); + + /* flush the dcache and invalidate the icache coverage on that + * region */ + mn10300_local_dcache_flush_range2(addr + off, size); + mn10300_local_icache_inv_range2(addr + off, size); + smp_cache_call(SMP_IDCACHE_INV_FLUSH_RANGE, start, end); +} + +/** + * flush_icache_range - Globally flush dcache and invalidate icache for region + * @start: The starting virtual address of the region. + * @end: The ending virtual address of the region. + * + * This is used by the kernel to globally flush some code it has just written + * from the dcache back to RAM and then to globally invalidate the icache over + * that region so that that code can be run on all CPUs in the system. + */ +void flush_icache_range(unsigned long start, unsigned long end) +{ + unsigned long start_page, end_page; + unsigned long flags; + + flags = smp_lock_cache(); + + if (end > 0x80000000UL) { + /* addresses above 0xa0000000 do not go through the cache */ + if (end > 0xa0000000UL) { + end = 0xa0000000UL; + if (start >= end) + goto done; + } + + /* kernel addresses between 0x80000000 and 0x9fffffff do not + * require page tables, so we just map such addresses + * directly */ + start_page = (start >= 0x80000000UL) ? start : 0x80000000UL; + mn10300_local_dcache_flush_range(start_page, end); + mn10300_local_icache_inv_range(start_page, end); + smp_cache_call(SMP_IDCACHE_INV_FLUSH_RANGE, start_page, end); + if (start_page == start) + goto done; + end = start_page; + } + + start_page = start & PAGE_MASK; + end_page = (end - 1) & PAGE_MASK; + + if (start_page == end_page) { + /* the first and last bytes are on the same page */ + flush_icache_page_range(start, end); + } else if (start_page + 1 == end_page) { + /* split over two virtually contiguous pages */ + flush_icache_page_range(start, end_page); + flush_icache_page_range(end_page, end); + } else { + /* more than 2 pages; just flush the entire cache */ + mn10300_dcache_flush(); + mn10300_icache_inv(); + smp_cache_call(SMP_IDCACHE_INV_FLUSH, 0, 0); + } + +done: + smp_unlock_cache(flags); +} +EXPORT_SYMBOL(flush_icache_range); diff --git a/arch/mn10300/mm/cache-flush-mn10300.S b/arch/mn10300/mm/cache-flush-mn10300.S deleted file mode 100644 index c8ed1cbac10..00000000000 --- a/arch/mn10300/mm/cache-flush-mn10300.S +++ /dev/null @@ -1,192 +0,0 @@ -/* MN10300 CPU core caching routines - * - * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. - * Written by David Howells (dhowells@redhat.com) - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public Licence - * as published by the Free Software Foundation; either version - * 2 of the Licence, or (at your option) any later version. - */ - -#include <linux/sys.h> -#include <linux/linkage.h> -#include <asm/smp.h> -#include <asm/page.h> -#include <asm/cache.h> - - .am33_2 - .globl mn10300_dcache_flush - .globl mn10300_dcache_flush_page - .globl mn10300_dcache_flush_range - .globl mn10300_dcache_flush_range2 - .globl mn10300_dcache_flush_inv - .globl mn10300_dcache_flush_inv_page - .globl mn10300_dcache_flush_inv_range - .globl mn10300_dcache_flush_inv_range2 - -############################################################################### -# -# void mn10300_dcache_flush(void) -# Flush the entire data cache back to RAM -# -############################################################################### - ALIGN -mn10300_dcache_flush: - movhu (CHCTR),d0 - btst CHCTR_DCEN,d0 - beq mn10300_dcache_flush_end - - # read the addresses tagged in the cache's tag RAM and attempt to flush - # those addresses specifically - # - we rely on the hardware to filter out invalid tag entry addresses - mov DCACHE_TAG(0,0),a0 # dcache tag RAM access address - mov DCACHE_PURGE(0,0),a1 # dcache purge request address - mov L1_CACHE_NWAYS*L1_CACHE_NENTRIES,d1 # total number of entries - -mn10300_dcache_flush_loop: - mov (a0),d0 - and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0 - or L1_CACHE_TAG_VALID,d0 # retain valid entries in the - # cache - mov d0,(a1) # conditional purge - -mn10300_dcache_flush_skip: - add L1_CACHE_BYTES,a0 - add L1_CACHE_BYTES,a1 - add -1,d1 - bne mn10300_dcache_flush_loop - -mn10300_dcache_flush_end: - ret [],0 - -############################################################################### -# -# void mn10300_dcache_flush_page(unsigned start) -# void mn10300_dcache_flush_range(unsigned start, unsigned end) -# void mn10300_dcache_flush_range2(unsigned start, unsigned size) -# Flush a range of addresses on a page in the dcache -# -############################################################################### - ALIGN -mn10300_dcache_flush_page: - mov PAGE_SIZE,d1 -mn10300_dcache_flush_range2: - add d0,d1 -mn10300_dcache_flush_range: - movm [d2,d3],(sp) - - movhu (CHCTR),d2 - btst CHCTR_DCEN,d2 - beq mn10300_dcache_flush_range_end - - # round start addr down - and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0 - mov d0,a1 - - add L1_CACHE_BYTES,d1 # round end addr up - and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d1 - - # write a request to flush all instances of an address from the cache - mov DCACHE_PURGE(0,0),a0 - mov a1,d0 - and L1_CACHE_TAG_ENTRY,d0 - add d0,a0 # starting dcache purge control - # reg address - - sub a1,d1 - lsr L1_CACHE_SHIFT,d1 # total number of entries to - # examine - - or L1_CACHE_TAG_VALID,a1 # retain valid entries in the - # cache - -mn10300_dcache_flush_range_loop: - mov a1,(L1_CACHE_WAYDISP*0,a0) # conditionally purge this line - # all ways - - add L1_CACHE_BYTES,a0 - add L1_CACHE_BYTES,a1 - and ~L1_CACHE_WAYDISP,a0 # make sure way stay on way 0 - add -1,d1 - bne mn10300_dcache_flush_range_loop - -mn10300_dcache_flush_range_end: - ret [d2,d3],8 - -############################################################################### -# -# void mn10300_dcache_flush_inv(void) -# Flush the entire data cache and invalidate all entries -# -############################################################################### - ALIGN -mn10300_dcache_flush_inv: - movhu (CHCTR),d0 - btst CHCTR_DCEN,d0 - beq mn10300_dcache_flush_inv_end - - # hit each line in the dcache with an unconditional purge - mov DCACHE_PURGE(0,0),a1 # dcache purge request address - mov L1_CACHE_NWAYS*L1_CACHE_NENTRIES,d1 # total number of entries - -mn10300_dcache_flush_inv_loop: - mov (a1),d0 # unconditional purge - - add L1_CACHE_BYTES,a1 - add -1,d1 - bne mn10300_dcache_flush_inv_loop - -mn10300_dcache_flush_inv_end: - ret [],0 - -############################################################################### -# -# void mn10300_dcache_flush_inv_page(unsigned start) -# void mn10300_dcache_flush_inv_range(unsigned start, unsigned end) -# void mn10300_dcache_flush_inv_range2(unsigned start, unsigned size) -# Flush and invalidate a range of addresses on a page in the dcache -# -############################################################################### - ALIGN -mn10300_dcache_flush_inv_page: - mov PAGE_SIZE,d1 -mn10300_dcache_flush_inv_range2: - add d0,d1 -mn10300_dcache_flush_inv_range: - movm [d2,d3],(sp) - movhu (CHCTR),d2 - btst CHCTR_DCEN,d2 - beq mn10300_dcache_flush_inv_range_end - - and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0 # round start - # addr down - mov d0,a1 - - add L1_CACHE_BYTES,d1 # round end addr up - and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d1 - - # write a request to flush and invalidate all instances of an address - # from the cache - mov DCACHE_PURGE(0,0),a0 - mov a1,d0 - and L1_CACHE_TAG_ENTRY,d0 - add d0,a0 # starting dcache purge control - # reg address - - sub a1,d1 - lsr L1_CACHE_SHIFT,d1 # total number of entries to - # examine - -mn10300_dcache_flush_inv_range_loop: - mov a1,(L1_CACHE_WAYDISP*0,a0) # conditionally purge this line - # in all ways - - add L1_CACHE_BYTES,a0 - add L1_CACHE_BYTES,a1 - and ~L1_CACHE_WAYDISP,a0 # make sure way stay on way 0 - add -1,d1 - bne mn10300_dcache_flush_inv_range_loop - -mn10300_dcache_flush_inv_range_end: - ret [d2,d3],8 diff --git a/arch/mn10300/mm/cache-inv-by-reg.S b/arch/mn10300/mm/cache-inv-by-reg.S new file mode 100644 index 00000000000..a60825b91e7 --- /dev/null +++ b/arch/mn10300/mm/cache-inv-by-reg.S @@ -0,0 +1,350 @@ +/* MN10300 CPU cache invalidation routines, using automatic purge registers + * + * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ +#include <linux/sys.h> +#include <linux/linkage.h> +#include <asm/smp.h> +#include <asm/page.h> +#include <asm/cache.h> +#include <asm/irqflags.h> +#include <asm/cacheflush.h> +#include "cache.inc" + +#define mn10300_local_dcache_inv_range_intr_interval \ + +((1 << MN10300_DCACHE_INV_RANGE_INTR_LOG2_INTERVAL) - 1) + +#if mn10300_local_dcache_inv_range_intr_interval > 0xff +#error MN10300_DCACHE_INV_RANGE_INTR_LOG2_INTERVAL must be 8 or less +#endif + + .am33_2 + +#ifndef CONFIG_SMP + .globl mn10300_icache_inv + .globl mn10300_icache_inv_page + .globl mn10300_icache_inv_range + .globl mn10300_icache_inv_range2 + .globl mn10300_dcache_inv + .globl mn10300_dcache_inv_page + .globl mn10300_dcache_inv_range + .globl mn10300_dcache_inv_range2 + +mn10300_icache_inv = mn10300_local_icache_inv +mn10300_icache_inv_page = mn10300_local_icache_inv_page +mn10300_icache_inv_range = mn10300_local_icache_inv_range +mn10300_icache_inv_range2 = mn10300_local_icache_inv_range2 +mn10300_dcache_inv = mn10300_local_dcache_inv +mn10300_dcache_inv_page = mn10300_local_dcache_inv_page +mn10300_dcache_inv_range = mn10300_local_dcache_inv_range +mn10300_dcache_inv_range2 = mn10300_local_dcache_inv_range2 + +#endif /* !CONFIG_SMP */ + +############################################################################### +# +# void mn10300_local_icache_inv(void) +# Invalidate the entire icache +# +############################################################################### + ALIGN + .globl mn10300_local_icache_inv + .type mn10300_local_icache_inv,@function +mn10300_local_icache_inv: + mov CHCTR,a0 + + movhu (a0),d0 + btst CHCTR_ICEN,d0 + beq mn10300_local_icache_inv_end + + invalidate_icache 1 + +mn10300_local_icache_inv_end: + ret [],0 + .size mn10300_local_icache_inv,.-mn10300_local_icache_inv + +############################################################################### +# +# void mn10300_local_dcache_inv(void) +# Invalidate the entire dcache +# +############################################################################### + ALIGN + .globl mn10300_local_dcache_inv + .type mn10300_local_dcache_inv,@function +mn10300_local_dcache_inv: + mov CHCTR,a0 + + movhu (a0),d0 + btst CHCTR_DCEN,d0 + beq mn10300_local_dcache_inv_end + + invalidate_dcache 1 + +mn10300_local_dcache_inv_end: + ret [],0 + .size mn10300_local_dcache_inv,.-mn10300_local_dcache_inv + +############################################################################### +# +# void mn10300_local_dcache_inv_range(unsigned long start, unsigned long end) +# void mn10300_local_dcache_inv_range2(unsigned long start, unsigned long size) +# void mn10300_local_dcache_inv_page(unsigned long start) +# Invalidate a range of addresses on a page in the dcache +# +############################################################################### + ALIGN + .globl mn10300_local_dcache_inv_page + .globl mn10300_local_dcache_inv_range + .globl mn10300_local_dcache_inv_range2 + .type mn10300_local_dcache_inv_page,@function + .type mn10300_local_dcache_inv_range,@function + .type mn10300_local_dcache_inv_range2,@function +mn10300_local_dcache_inv_page: + and ~(PAGE_SIZE-1),d0 + mov PAGE_SIZE,d1 +mn10300_local_dcache_inv_range2: + add d0,d1 +mn10300_local_dcache_inv_range: + # If we are in writeback mode we check the start and end alignments, + # and if they're not cacheline-aligned, we must flush any bits outside + # the range that share cachelines with stuff inside the range +#ifdef CONFIG_MN10300_CACHE_WBACK + btst ~L1_CACHE_TAG_MASK,d0 + bne 1f + btst ~L1_CACHE_TAG_MASK,d1 + beq 2f +1: + bra mn10300_local_dcache_flush_inv_range +2: +#endif /* CONFIG_MN10300_CACHE_WBACK */ + + movm [d2,d3,a2],(sp) + + mov CHCTR,a0 + movhu (a0),d2 + btst CHCTR_DCEN,d2 + beq mn10300_local_dcache_inv_range_end + + # round the addresses out to be full cachelines, unless we're in + # writeback mode, in which case we would be in flush and invalidate by + # now +#ifndef CONFIG_MN10300_CACHE_WBACK + and L1_CACHE_TAG_MASK,d0 # round start addr down + + mov L1_CACHE_BYTES-1,d2 + add d2,d1 + and L1_CACHE_TAG_MASK,d1 # round end addr up +#endif /* !CONFIG_MN10300_CACHE_WBACK */ + + sub d0,d1,d2 # calculate the total size + mov d0,a2 # A2 = start address + mov d1,a1 # A1 = end address + + LOCAL_CLI_SAVE(d3) + + mov DCPGCR,a0 # make sure the purger isn't busy + setlb + mov (a0),d0 + btst DCPGCR_DCPGBSY,d0 + lne + + # skip initial address alignment calculation if address is zero + mov d2,d1 + cmp 0,a2 + beq 1f + +dcivloop: + /* calculate alignsize + * + * alignsize = L1_CACHE_BYTES; + * while (! start & alignsize) { + * alignsize <<=1; + * } + * d1 = alignsize; + */ + mov L1_CACHE_BYTES,d1 + lsr 1,d1 + setlb + add d1,d1 + mov d1,d0 + and a2,d0 + leq + +1: + /* calculate invsize + * + * if (totalsize > alignsize) { + * invsize = alignsize; + * } else { + * invsize = totalsize; + * tmp = 0x80000000; + * while (! invsize & tmp) { + * tmp >>= 1; + * } + * invsize = tmp; + * } + * d1 = invsize + */ + cmp d2,d1 + bns 2f + mov d2,d1 + + mov 0x80000000,d0 # start from 31bit=1 + setlb + lsr 1,d0 + mov d0,e0 + and d1,e0 + leq + mov d0,d1 + +2: + /* set mask + * + * mask = ~(invsize-1); + * DCPGMR = mask; + */ + mov d1,d0 + add -1,d0 + not d0 + mov d0,(DCPGMR) + + # invalidate area + mov a2,d0 + or DCPGCR_DCI,d0 + mov d0,(a0) # DCPGCR = (mask & start) | DCPGCR_DCI + + setlb # wait for the purge to complete + mov (a0),d0 + btst DCPGCR_DCPGBSY,d0 + lne + + sub d1,d2 # decrease size remaining + add d1,a2 # increase next start address + + /* check invalidating of end address + * + * a2 = a2 + invsize + * if (a2 < end) { + * goto dcivloop; + * } */ + cmp a1,a2 + bns dcivloop + + LOCAL_IRQ_RESTORE(d3) + +mn10300_local_dcache_inv_range_end: + ret [d2,d3,a2],12 + .size mn10300_local_dcache_inv_page,.-mn10300_local_dcache_inv_page + .size mn10300_local_dcache_inv_range,.-mn10300_local_dcache_inv_range + .size mn10300_local_dcache_inv_range2,.-mn10300_local_dcache_inv_range2 + +############################################################################### +# +# void mn10300_local_icache_inv_page(unsigned long start) +# void mn10300_local_icache_inv_range2(unsigned long start, unsigned long size) +# void mn10300_local_icache_inv_range(unsigned long start, unsigned long end) +# Invalidate a range of addresses on a page in the icache +# +############################################################################### + ALIGN + .globl mn10300_local_icache_inv_page + .globl mn10300_local_icache_inv_range + .globl mn10300_local_icache_inv_range2 + .type mn10300_local_icache_inv_page,@function + .type mn10300_local_icache_inv_range,@function + .type mn10300_local_icache_inv_range2,@function +mn10300_local_icache_inv_page: + and ~(PAGE_SIZE-1),d0 + mov PAGE_SIZE,d1 +mn10300_local_icache_inv_range2: + add d0,d1 +mn10300_local_icache_inv_range: + movm [d2,d3,a2],(sp) + + mov CHCTR,a0 + movhu (a0),d2 + btst CHCTR_ICEN,d2 + beq mn10300_local_icache_inv_range_reg_end + + /* calculate alignsize + * + * alignsize = L1_CACHE_BYTES; + * for (i = (end - start - 1) / L1_CACHE_BYTES ; i > 0; i >>= 1) { + * alignsize <<= 1; + * } + * d2 = alignsize; + */ + mov L1_CACHE_BYTES,d2 + sub d0,d1,d3 + add -1,d3 + lsr L1_CACHE_SHIFT,d3 + beq 2f +1: + add d2,d2 + lsr 1,d3 + bne 1b +2: + + /* a1 = end */ + mov d1,a1 + + LOCAL_CLI_SAVE(d3) + + mov ICIVCR,a0 + /* wait for busy bit of area invalidation */ + setlb + mov (a0),d1 + btst ICIVCR_ICIVBSY,d1 + lne + + /* set mask + * + * mask = ~(alignsize-1); + * ICIVMR = mask; + */ + mov d2,d1 + add -1,d1 + not d1 + mov d1,(ICIVMR) + /* a2 = mask & start */ + and d1,d0,a2 + +icivloop: + /* area invalidate + * + * ICIVCR = (mask & start) | ICIVCR_ICI + */ + mov a2,d0 + or ICIVCR_ICI,d0 + mov d0,(a0) + + /* wait for busy bit of area invalidation */ + setlb + mov (a0),d1 + btst ICIVCR_ICIVBSY,d1 + lne + + /* check invalidating of end address + * + * a2 = a2 + alignsize + * if (a2 < end) { + * goto icivloop; + * } */ + add d2,a2 + cmp a1,a2 + bns icivloop + + LOCAL_IRQ_RESTORE(d3) + +mn10300_local_icache_inv_range_reg_end: + ret [d2,d3,a2],12 + .size mn10300_local_icache_inv_page,.-mn10300_local_icache_inv_page + .size mn10300_local_icache_inv_range,.-mn10300_local_icache_inv_range + .size mn10300_local_icache_inv_range2,.-mn10300_local_icache_inv_range2 diff --git a/arch/mn10300/mm/cache-inv-by-tag.S b/arch/mn10300/mm/cache-inv-by-tag.S new file mode 100644 index 00000000000..ccedce9c144 --- /dev/null +++ b/arch/mn10300/mm/cache-inv-by-tag.S @@ -0,0 +1,276 @@ +/* MN10300 CPU core caching routines + * + * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ +#include <linux/sys.h> +#include <linux/linkage.h> +#include <asm/smp.h> +#include <asm/page.h> +#include <asm/cache.h> +#include <asm/irqflags.h> +#include <asm/cacheflush.h> +#include "cache.inc" + +#define mn10300_local_dcache_inv_range_intr_interval \ + +((1 << MN10300_DCACHE_INV_RANGE_INTR_LOG2_INTERVAL) - 1) + +#if mn10300_local_dcache_inv_range_intr_interval > 0xff +#error MN10300_DCACHE_INV_RANGE_INTR_LOG2_INTERVAL must be 8 or less +#endif + + .am33_2 + + .globl mn10300_local_icache_inv_page + .globl mn10300_local_icache_inv_range + .globl mn10300_local_icache_inv_range2 + +mn10300_local_icache_inv_page = mn10300_local_icache_inv +mn10300_local_icache_inv_range = mn10300_local_icache_inv +mn10300_local_icache_inv_range2 = mn10300_local_icache_inv + +#ifndef CONFIG_SMP + .globl mn10300_icache_inv + .globl mn10300_icache_inv_page + .globl mn10300_icache_inv_range + .globl mn10300_icache_inv_range2 + .globl mn10300_dcache_inv + .globl mn10300_dcache_inv_page + .globl mn10300_dcache_inv_range + .globl mn10300_dcache_inv_range2 + +mn10300_icache_inv = mn10300_local_icache_inv +mn10300_icache_inv_page = mn10300_local_icache_inv_page +mn10300_icache_inv_range = mn10300_local_icache_inv_range +mn10300_icache_inv_range2 = mn10300_local_icache_inv_range2 +mn10300_dcache_inv = mn10300_local_dcache_inv +mn10300_dcache_inv_page = mn10300_local_dcache_inv_page +mn10300_dcache_inv_range = mn10300_local_dcache_inv_range +mn10300_dcache_inv_range2 = mn10300_local_dcache_inv_range2 + +#endif /* !CONFIG_SMP */ + +############################################################################### +# +# void mn10300_local_icache_inv(void) +# Invalidate the entire icache +# +############################################################################### + ALIGN + .globl mn10300_local_icache_inv + .type mn10300_local_icache_inv,@function +mn10300_local_icache_inv: + mov CHCTR,a0 + + movhu (a0),d0 + btst CHCTR_ICEN,d0 + beq mn10300_local_icache_inv_end + + invalidate_icache 1 + +mn10300_local_icache_inv_end: + ret [],0 + .size mn10300_local_icache_inv,.-mn10300_local_icache_inv + +############################################################################### +# +# void mn10300_local_dcache_inv(void) +# Invalidate the entire dcache +# +############################################################################### + ALIGN + .globl mn10300_local_dcache_inv + .type mn10300_local_dcache_inv,@function +mn10300_local_dcache_inv: + mov CHCTR,a0 + + movhu (a0),d0 + btst CHCTR_DCEN,d0 + beq mn10300_local_dcache_inv_end + + invalidate_dcache 1 + +mn10300_local_dcache_inv_end: + ret [],0 + .size mn10300_local_dcache_inv,.-mn10300_local_dcache_inv + +############################################################################### +# +# void mn10300_local_dcache_inv_range(unsigned long start, unsigned long end) +# void mn10300_local_dcache_inv_range2(unsigned long start, unsigned long size) +# void mn10300_local_dcache_inv_page(unsigned long start) +# Invalidate a range of addresses on a page in the dcache +# +############################################################################### + ALIGN + .globl mn10300_local_dcache_inv_page + .globl mn10300_local_dcache_inv_range + .globl mn10300_local_dcache_inv_range2 + .type mn10300_local_dcache_inv_page,@function + .type mn10300_local_dcache_inv_range,@function + .type mn10300_local_dcache_inv_range2,@function +mn10300_local_dcache_inv_page: + and ~(PAGE_SIZE-1),d0 + mov PAGE_SIZE,d1 +mn10300_local_dcache_inv_range2: + add d0,d1 +mn10300_local_dcache_inv_range: + # If we are in writeback mode we check the start and end alignments, + # and if they're not cacheline-aligned, we must flush any bits outside + # the range that share cachelines with stuff inside the range +#ifdef CONFIG_MN10300_CACHE_WBACK + btst ~L1_CACHE_TAG_MASK,d0 + bne 1f + btst ~L1_CACHE_TAG_MASK,d1 + beq 2f +1: + bra mn10300_local_dcache_flush_inv_range +2: +#endif /* CONFIG_MN10300_CACHE_WBACK */ + + movm [d2,d3,a2],(sp) + + mov CHCTR,a2 + movhu (a2),d2 + btst CHCTR_DCEN,d2 + beq mn10300_local_dcache_inv_range_end + +#ifndef CONFIG_MN10300_CACHE_WBACK + and L1_CACHE_TAG_MASK,d0 # round start addr down + + add L1_CACHE_BYTES,d1 # round end addr up + and L1_CACHE_TAG_MASK,d1 +#endif /* !CONFIG_MN10300_CACHE_WBACK */ + mov d0,a1 + + clr d2 # we're going to clear tag RAM + # entries + + # read the tags from the tag RAM, and if they indicate a valid dirty + # cache line then invalidate that line + mov DCACHE_TAG(0,0),a0 + mov a1,d0 + and L1_CACHE_TAG_ENTRY,d0 + add d0,a0 # starting dcache tag RAM + # access address + + sub a1,d1 + lsr L1_CACHE_SHIFT,d1 # total number of entries to + # examine + + and ~(L1_CACHE_DISPARITY-1),a1 # determine comparator base + +mn10300_local_dcache_inv_range_outer_loop: + LOCAL_CLI_SAVE(d3) + + # disable the dcache + movhu (a2),d0 + and ~CHCTR_DCEN,d0 + movhu d0,(a2) + + # and wait for it to calm down + setlb + movhu (a2),d0 + btst CHCTR_DCBUSY,d0 + lne + +mn10300_local_dcache_inv_range_loop: + + # process the way 0 slot + mov (L1_CACHE_WAYDISP*0,a0),d0 # read the tag in the way 0 slot + btst L1_CACHE_TAG_VALID,d0 + beq mn10300_local_dcache_inv_range_skip_0 # jump if this cacheline + # is not valid + + xor a1,d0 + lsr 12,d0 + bne mn10300_local_dcache_inv_range_skip_0 # jump if not this cacheline + + mov d2,(L1_CACHE_WAYDISP*0,a0) # kill the tag + +mn10300_local_dcache_inv_range_skip_0: + + # process the way 1 slot + mov (L1_CACHE_WAYDISP*1,a0),d0 # read the tag in the way 1 slot + btst L1_CACHE_TAG_VALID,d0 + beq mn10300_local_dcache_inv_range_skip_1 # jump if this cacheline + # is not valid + + xor a1,d0 + lsr 12,d0 + bne mn10300_local_dcache_inv_range_skip_1 # jump if not this cacheline + + mov d2,(L1_CACHE_WAYDISP*1,a0) # kill the tag + +mn10300_local_dcache_inv_range_skip_1: + + # process the way 2 slot + mov (L1_CACHE_WAYDISP*2,a0),d0 # read the tag in the way 2 slot + btst L1_CACHE_TAG_VALID,d0 + beq mn10300_local_dcache_inv_range_skip_2 # jump if this cacheline + # is not valid + + xor a1,d0 + lsr 12,d0 + bne mn10300_local_dcache_inv_range_skip_2 # jump if not this cacheline + + mov d2,(L1_CACHE_WAYDISP*2,a0) # kill the tag + +mn10300_local_dcache_inv_range_skip_2: + + # process the way 3 slot + mov (L1_CACHE_WAYDISP*3,a0),d0 # read the tag in the way 3 slot + btst L1_CACHE_TAG_VALID,d0 + beq mn10300_local_dcache_inv_range_skip_3 # jump if this cacheline + # is not valid + + xor a1,d0 + lsr 12,d0 + bne mn10300_local_dcache_inv_range_skip_3 # jump if not this cacheline + + mov d2,(L1_CACHE_WAYDISP*3,a0) # kill the tag + +mn10300_local_dcache_inv_range_skip_3: + + # approx every N steps we re-enable the cache and see if there are any + # interrupts to be processed + # we also break out if we've reached the end of the loop + # (the bottom nibble of the count is zero in both cases) + add L1_CACHE_BYTES,a0 + add L1_CACHE_BYTES,a1 + and ~L1_CACHE_WAYDISP,a0 + add -1,d1 + btst mn10300_local_dcache_inv_range_intr_interval,d1 + bne mn10300_local_dcache_inv_range_loop + + # wait for the cache to finish what it's doing + setlb + movhu (a2),d0 + btst CHCTR_DCBUSY,d0 + lne + + # and reenable it + or CHCTR_DCEN,d0 + movhu d0,(a2) + movhu (a2),d0 + + # re-enable interrupts + # - we don't bother with delay NOPs as we'll have enough instructions + # before we disable interrupts again to give the interrupts a chance + # to happen + LOCAL_IRQ_RESTORE(d3) + + # go around again if the counter hasn't yet reached zero + add 0,d1 + bne mn10300_local_dcache_inv_range_outer_loop + +mn10300_local_dcache_inv_range_end: + ret [d2,d3,a2],12 + .size mn10300_local_dcache_inv_page,.-mn10300_local_dcache_inv_page + .size mn10300_local_dcache_inv_range,.-mn10300_local_dcache_inv_range + .size mn10300_local_dcache_inv_range2,.-mn10300_local_dcache_inv_range2 diff --git a/arch/mn10300/mm/cache-inv-icache.c b/arch/mn10300/mm/cache-inv-icache.c new file mode 100644 index 00000000000..a6b63dde603 --- /dev/null +++ b/arch/mn10300/mm/cache-inv-icache.c @@ -0,0 +1,129 @@ +/* Invalidate icache when dcache doesn't need invalidation as it's in + * write-through mode + * + * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ +#include <linux/module.h> +#include <linux/mm.h> +#include <asm/cacheflush.h> +#include <asm/smp.h> +#include "cache-smp.h" + +/** + * flush_icache_page_range - Flush dcache and invalidate icache for part of a + * single page + * @start: The starting virtual address of the page part. + * @end: The ending virtual address of the page part. + * + * Invalidate the icache for part of a single page, as determined by the + * virtual addresses given. The page must be in the paged area. The dcache is + * not flushed as the cache must be in write-through mode to get here. + */ +static void flush_icache_page_range(unsigned long start, unsigned long end) +{ + unsigned long addr, size, off; + struct page *page; + pgd_t *pgd; + pud_t *pud; + pmd_t *pmd; + pte_t *ppte, pte; + + /* work out how much of the page to flush */ + off = start & ~PAGE_MASK; + size = end - start; + + /* get the physical address the page is mapped to from the page + * tables */ + pgd = pgd_offset(current->mm, start); + if (!pgd || !pgd_val(*pgd)) + return; + + pud = pud_offset(pgd, start); + if (!pud || !pud_val(*pud)) + return; + + pmd = pmd_offset(pud, start); + if (!pmd || !pmd_val(*pmd)) + return; + + ppte = pte_offset_map(pmd, start); + if (!ppte) + return; + pte = *ppte; + pte_unmap(ppte); + + if (pte_none(pte)) + return; + + page = pte_page(pte); + if (!page) + return; + + addr = page_to_phys(page); + + /* invalidate the icache coverage on that region */ + mn10300_local_icache_inv_range2(addr + off, size); + smp_cache_call(SMP_ICACHE_INV_RANGE, start, end); +} + +/** + * flush_icache_range - Globally flush dcache and invalidate icache for region + * @start: The starting virtual address of the region. + * @end: The ending virtual address of the region. + * + * This is used by the kernel to globally flush some code it has just written + * from the dcache back to RAM and then to globally invalidate the icache over + * that region so that that code can be run on all CPUs in the system. + */ +void flush_icache_range(unsigned long start, unsigned long end) +{ + unsigned long start_page, end_page; + unsigned long flags; + + flags = smp_lock_cache(); + + if (end > 0x80000000UL) { + /* addresses above 0xa0000000 do not go through the cache */ + if (end > 0xa0000000UL) { + end = 0xa0000000UL; + if (start >= end) + goto done; + } + + /* kernel addresses between 0x80000000 and 0x9fffffff do not + * require page tables, so we just map such addresses + * directly */ + start_page = (start >= 0x80000000UL) ? start : 0x80000000UL; + mn10300_icache_inv_range(start_page, end); + smp_cache_call(SMP_ICACHE_INV_RANGE, start, end); + if (start_page == start) + goto done; + end = start_page; + } + + start_page = start & PAGE_MASK; + end_page = (end - 1) & PAGE_MASK; + + if (start_page == end_page) { + /* the first and last bytes are on the same page */ + flush_icache_page_range(start, end); + } else if (start_page + 1 == end_page) { + /* split over two virtually contiguous pages */ + flush_icache_page_range(start, end_page); + flush_icache_page_range(end_page, end); + } else { + /* more than 2 pages; just flush the entire cache */ + mn10300_local_icache_inv(); + smp_cache_call(SMP_ICACHE_INV, 0, 0); + } + +done: + smp_unlock_cache(flags); +} +EXPORT_SYMBOL(flush_icache_range); diff --git a/arch/mn10300/mm/cache-mn10300.S b/arch/mn10300/mm/cache-mn10300.S deleted file mode 100644 index e839d0aedd6..00000000000 --- a/arch/mn10300/mm/cache-mn10300.S +++ /dev/null @@ -1,289 +0,0 @@ -/* MN10300 CPU core caching routines - * - * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. - * Written by David Howells (dhowells@redhat.com) - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public Licence - * as published by the Free Software Foundation; either version - * 2 of the Licence, or (at your option) any later version. - */ -#include <linux/sys.h> -#include <linux/linkage.h> -#include <asm/smp.h> -#include <asm/page.h> -#include <asm/cache.h> - -#define mn10300_dcache_inv_range_intr_interval \ - +((1 << MN10300_DCACHE_INV_RANGE_INTR_LOG2_INTERVAL) - 1) - -#if mn10300_dcache_inv_range_intr_interval > 0xff -#error MN10300_DCACHE_INV_RANGE_INTR_LOG2_INTERVAL must be 8 or less -#endif - - .am33_2 - - .globl mn10300_icache_inv - .globl mn10300_dcache_inv - .globl mn10300_dcache_inv_range - .globl mn10300_dcache_inv_range2 - .globl mn10300_dcache_inv_page - -############################################################################### -# -# void mn10300_icache_inv(void) -# Invalidate the entire icache -# -############################################################################### - ALIGN -mn10300_icache_inv: - mov CHCTR,a0 - - movhu (a0),d0 - btst CHCTR_ICEN,d0 - beq mn10300_icache_inv_end - - mov epsw,d1 - and ~EPSW_IE,epsw - nop - nop - - # disable the icache - and ~CHCTR_ICEN,d0 - movhu d0,(a0) - - # and wait for it to calm down - setlb - movhu (a0),d0 - btst CHCTR_ICBUSY,d0 - lne - - # invalidate - or CHCTR_ICINV,d0 - movhu d0,(a0) - - # wait for the cache to finish - mov CHCTR,a0 - setlb - movhu (a0),d0 - btst CHCTR_ICBUSY,d0 - lne - - # and reenable it - and ~CHCTR_ICINV,d0 - or CHCTR_ICEN,d0 - movhu d0,(a0) - movhu (a0),d0 - - mov d1,epsw - -mn10300_icache_inv_end: - ret [],0 - -############################################################################### -# -# void mn10300_dcache_inv(void) -# Invalidate the entire dcache -# -############################################################################### - ALIGN -mn10300_dcache_inv: - mov CHCTR,a0 - - movhu (a0),d0 - btst CHCTR_DCEN,d0 - beq mn10300_dcache_inv_end - - mov epsw,d1 - and ~EPSW_IE,epsw - nop - nop - - # disable the dcache - and ~CHCTR_DCEN,d0 - movhu d0,(a0) - - # and wait for it to calm down - setlb - movhu (a0),d0 - btst CHCTR_DCBUSY,d0 - lne - - # invalidate - or CHCTR_DCINV,d0 - movhu d0,(a0) - - # wait for the cache to finish - mov CHCTR,a0 - setlb - movhu (a0),d0 - btst CHCTR_DCBUSY,d0 - lne - - # and reenable it - and ~CHCTR_DCINV,d0 - or CHCTR_DCEN,d0 - movhu d0,(a0) - movhu (a0),d0 - - mov d1,epsw - -mn10300_dcache_inv_end: - ret [],0 - -############################################################################### -# -# void mn10300_dcache_inv_range(unsigned start, unsigned end) -# void mn10300_dcache_inv_range2(unsigned start, unsigned size) -# void mn10300_dcache_inv_page(unsigned start) -# Invalidate a range of addresses on a page in the dcache -# -############################################################################### - ALIGN -mn10300_dcache_inv_page: - mov PAGE_SIZE,d1 -mn10300_dcache_inv_range2: - add d0,d1 -mn10300_dcache_inv_range: - movm [d2,d3,a2],(sp) - mov CHCTR,a2 - - movhu (a2),d2 - btst CHCTR_DCEN,d2 - beq mn10300_dcache_inv_range_end - - and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0 # round start - # addr down - mov d0,a1 - - add L1_CACHE_BYTES,d1 # round end addr up - and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d1 - - clr d2 # we're going to clear tag ram - # entries - - # read the tags from the tag RAM, and if they indicate a valid dirty - # cache line then invalidate that line - mov DCACHE_TAG(0,0),a0 - mov a1,d0 - and L1_CACHE_TAG_ENTRY,d0 - add d0,a0 # starting dcache tag RAM - # access address - - sub a1,d1 - lsr L1_CACHE_SHIFT,d1 # total number of entries to - # examine - - and ~(L1_CACHE_DISPARITY-1),a1 # determine comparator base - -mn10300_dcache_inv_range_outer_loop: - # disable interrupts - mov epsw,d3 - and ~EPSW_IE,epsw - nop # note that reading CHCTR and - # AND'ing D0 occupy two delay - # slots after disabling - # interrupts - - # disable the dcache - movhu (a2),d0 - and ~CHCTR_DCEN,d0 - movhu d0,(a2) - - # and wait for it to calm down - setlb - movhu (a2),d0 - btst CHCTR_DCBUSY,d0 - lne - -mn10300_dcache_inv_range_loop: - - # process the way 0 slot - mov (L1_CACHE_WAYDISP*0,a0),d0 # read the tag in the way 0 slot - btst L1_CACHE_TAG_VALID,d0 - beq mn10300_dcache_inv_range_skip_0 # jump if this cacheline is not - # valid - - xor a1,d0 - lsr 12,d0 - bne mn10300_dcache_inv_range_skip_0 # jump if not this cacheline - - mov d2,(a0) # kill the tag - -mn10300_dcache_inv_range_skip_0: - - # process the way 1 slot - mov (L1_CACHE_WAYDISP*1,a0),d0 # read the tag in the way 1 slot - btst L1_CACHE_TAG_VALID,d0 - beq mn10300_dcache_inv_range_skip_1 # jump if this cacheline is not - # valid - - xor a1,d0 - lsr 12,d0 - bne mn10300_dcache_inv_range_skip_1 # jump if not this cacheline - - mov d2,(a0) # kill the tag - -mn10300_dcache_inv_range_skip_1: - - # process the way 2 slot - mov (L1_CACHE_WAYDISP*2,a0),d0 # read the tag in the way 2 slot - btst L1_CACHE_TAG_VALID,d0 - beq mn10300_dcache_inv_range_skip_2 # jump if this cacheline is not - # valid - - xor a1,d0 - lsr 12,d0 - bne mn10300_dcache_inv_range_skip_2 # jump if not this cacheline - - mov d2,(a0) # kill the tag - -mn10300_dcache_inv_range_skip_2: - - # process the way 3 slot - mov (L1_CACHE_WAYDISP*3,a0),d0 # read the tag in the way 3 slot - btst L1_CACHE_TAG_VALID,d0 - beq mn10300_dcache_inv_range_skip_3 # jump if this cacheline is not - # valid - - xor a1,d0 - lsr 12,d0 - bne mn10300_dcache_inv_range_skip_3 # jump if not this cacheline - - mov d2,(a0) # kill the tag - -mn10300_dcache_inv_range_skip_3: - - # approx every N steps we re-enable the cache and see if there are any - # interrupts to be processed - # we also break out if we've reached the end of the loop - # (the bottom nibble of the count is zero in both cases) - add L1_CACHE_BYTES,a0 - add L1_CACHE_BYTES,a1 - add -1,d1 - btst mn10300_dcache_inv_range_intr_interval,d1 - bne mn10300_dcache_inv_range_loop - - # wait for the cache to finish what it's doing - setlb - movhu (a2),d0 - btst CHCTR_DCBUSY,d0 - lne - - # and reenable it - or CHCTR_DCEN,d0 - movhu d0,(a2) - movhu (a2),d0 - - # re-enable interrupts - # - we don't bother with delay NOPs as we'll have enough instructions - # before we disable interrupts again to give the interrupts a chance - # to happen - mov d3,epsw - - # go around again if the counter hasn't yet reached zero - add 0,d1 - bne mn10300_dcache_inv_range_outer_loop - -mn10300_dcache_inv_range_end: - ret [d2,d3,a2],12 diff --git a/arch/mn10300/mm/cache-smp-flush.c b/arch/mn10300/mm/cache-smp-flush.c new file mode 100644 index 00000000000..fd51af5eaf7 --- /dev/null +++ b/arch/mn10300/mm/cache-smp-flush.c @@ -0,0 +1,156 @@ +/* Functions for global dcache flush when writeback caching in SMP + * + * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ +#include <linux/mm.h> +#include <asm/cacheflush.h> +#include "cache-smp.h" + +/** + * mn10300_dcache_flush - Globally flush data cache + * + * Flush the data cache on all CPUs. + */ +void mn10300_dcache_flush(void) +{ + unsigned long flags; + + flags = smp_lock_cache(); + mn10300_local_dcache_flush(); + smp_cache_call(SMP_DCACHE_FLUSH, 0, 0); + smp_unlock_cache(flags); +} + +/** + * mn10300_dcache_flush_page - Globally flush a page of data cache + * @start: The address of the page of memory to be flushed. + * + * Flush a range of addresses in the data cache on all CPUs covering + * the page that includes the given address. + */ +void mn10300_dcache_flush_page(unsigned long start) +{ + unsigned long flags; + + start &= ~(PAGE_SIZE-1); + + flags = smp_lock_cache(); + mn10300_local_dcache_flush_page(start); + smp_cache_call(SMP_DCACHE_FLUSH_RANGE, start, start + PAGE_SIZE); + smp_unlock_cache(flags); +} + +/** + * mn10300_dcache_flush_range - Globally flush range of data cache + * @start: The start address of the region to be flushed. + * @end: The end address of the region to be flushed. + * + * Flush a range of addresses in the data cache on all CPUs, between start and + * end-1 inclusive. + */ +void mn10300_dcache_flush_range(unsigned long start, unsigned long end) +{ + unsigned long flags; + + flags = smp_lock_cache(); + mn10300_local_dcache_flush_range(start, end); + smp_cache_call(SMP_DCACHE_FLUSH_RANGE, start, end); + smp_unlock_cache(flags); +} + +/** + * mn10300_dcache_flush_range2 - Globally flush range of data cache + * @start: The start address of the region to be flushed. + * @size: The size of the region to be flushed. + * + * Flush a range of addresses in the data cache on all CPUs, between start and + * start+size-1 inclusive. + */ +void mn10300_dcache_flush_range2(unsigned long start, unsigned long size) +{ + unsigned long flags; + + flags = smp_lock_cache(); + mn10300_local_dcache_flush_range2(start, size); + smp_cache_call(SMP_DCACHE_FLUSH_RANGE, start, start + size); + smp_unlock_cache(flags); +} + +/** + * mn10300_dcache_flush_inv - Globally flush and invalidate data cache + * + * Flush and invalidate the data cache on all CPUs. + */ +void mn10300_dcache_flush_inv(void) +{ + unsigned long flags; + + flags = smp_lock_cache(); + mn10300_local_dcache_flush_inv(); + smp_cache_call(SMP_DCACHE_FLUSH_INV, 0, 0); + smp_unlock_cache(flags); +} + +/** + * mn10300_dcache_flush_inv_page - Globally flush and invalidate a page of data + * cache + * @start: The address of the page of memory to be flushed and invalidated. + * + * Flush and invalidate a range of addresses in the data cache on all CPUs + * covering the page that includes the given address. + */ +void mn10300_dcache_flush_inv_page(unsigned long start) +{ + unsigned long flags; + + start &= ~(PAGE_SIZE-1); + + flags = smp_lock_cache(); + mn10300_local_dcache_flush_inv_page(start); + smp_cache_call(SMP_DCACHE_FLUSH_INV_RANGE, start, start + PAGE_SIZE); + smp_unlock_cache(flags); +} + +/** + * mn10300_dcache_flush_inv_range - Globally flush and invalidate range of data + * cache + * @start: The start address of the region to be flushed and invalidated. + * @end: The end address of the region to be flushed and invalidated. + * + * Flush and invalidate a range of addresses in the data cache on all CPUs, + * between start and end-1 inclusive. + */ +void mn10300_dcache_flush_inv_range(unsigned long start, unsigned long end) +{ + unsigned long flags; + + flags = smp_lock_cache(); + mn10300_local_dcache_flush_inv_range(start, end); + smp_cache_call(SMP_DCACHE_FLUSH_INV_RANGE, start, end); + smp_unlock_cache(flags); +} + +/** + * mn10300_dcache_flush_inv_range2 - Globally flush and invalidate range of data + * cache + * @start: The start address of the region to be flushed and invalidated. + * @size: The size of the region to be flushed and invalidated. + * + * Flush and invalidate a range of addresses in the data cache on all CPUs, + * between start and start+size-1 inclusive. + */ +void mn10300_dcache_flush_inv_range2(unsigned long start, unsigned long size) +{ + unsigned long flags; + + flags = smp_lock_cache(); + mn10300_local_dcache_flush_inv_range2(start, size); + smp_cache_call(SMP_DCACHE_FLUSH_INV_RANGE, start, start + size); + smp_unlock_cache(flags); +} diff --git a/arch/mn10300/mm/cache-smp-inv.c b/arch/mn10300/mm/cache-smp-inv.c new file mode 100644 index 00000000000..ff1787358c8 --- /dev/null +++ b/arch/mn10300/mm/cache-smp-inv.c @@ -0,0 +1,153 @@ +/* Functions for global i/dcache invalidation when caching in SMP + * + * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ +#include <linux/mm.h> +#include <asm/cacheflush.h> +#include "cache-smp.h" + +/** + * mn10300_icache_inv - Globally invalidate instruction cache + * + * Invalidate the instruction cache on all CPUs. + */ +void mn10300_icache_inv(void) +{ + unsigned long flags; + + flags = smp_lock_cache(); + mn10300_local_icache_inv(); + smp_cache_call(SMP_ICACHE_INV, 0, 0); + smp_unlock_cache(flags); +} + +/** + * mn10300_icache_inv_page - Globally invalidate a page of instruction cache + * @start: The address of the page of memory to be invalidated. + * + * Invalidate a range of addresses in the instruction cache on all CPUs + * covering the page that includes the given address. + */ +void mn10300_icache_inv_page(unsigned long start) +{ + unsigned long flags; + + start &= ~(PAGE_SIZE-1); + + flags = smp_lock_cache(); + mn10300_local_icache_inv_page(start); + smp_cache_call(SMP_ICACHE_INV_RANGE, start, start + PAGE_SIZE); + smp_unlock_cache(flags); +} + +/** + * mn10300_icache_inv_range - Globally invalidate range of instruction cache + * @start: The start address of the region to be invalidated. + * @end: The end address of the region to be invalidated. + * + * Invalidate a range of addresses in the instruction cache on all CPUs, + * between start and end-1 inclusive. + */ +void mn10300_icache_inv_range(unsigned long start, unsigned long end) +{ + unsigned long flags; + + flags = smp_lock_cache(); + mn10300_local_icache_inv_range(start, end); + smp_cache_call(SMP_ICACHE_INV_RANGE, start, end); + smp_unlock_cache(flags); +} + +/** + * mn10300_icache_inv_range2 - Globally invalidate range of instruction cache + * @start: The start address of the region to be invalidated. + * @size: The size of the region to be invalidated. + * + * Invalidate a range of addresses in the instruction cache on all CPUs, + * between start and start+size-1 inclusive. + */ +void mn10300_icache_inv_range2(unsigned long start, unsigned long size) +{ + unsigned long flags; + + flags = smp_lock_cache(); + mn10300_local_icache_inv_range2(start, size); + smp_cache_call(SMP_ICACHE_INV_RANGE, start, start + size); + smp_unlock_cache(flags); +} + +/** + * mn10300_dcache_inv - Globally invalidate data cache + * + * Invalidate the data cache on all CPUs. + */ +void mn10300_dcache_inv(void) +{ + unsigned long flags; + + flags = smp_lock_cache(); + mn10300_local_dcache_inv(); + smp_cache_call(SMP_DCACHE_INV, 0, 0); + smp_unlock_cache(flags); +} + +/** + * mn10300_dcache_inv_page - Globally invalidate a page of data cache + * @start: The address of the page of memory to be invalidated. + * + * Invalidate a range of addresses in the data cache on all CPUs covering the + * page that includes the given address. + */ +void mn10300_dcache_inv_page(unsigned long start) +{ + unsigned long flags; + + start &= ~(PAGE_SIZE-1); + + flags = smp_lock_cache(); + mn10300_local_dcache_inv_page(start); + smp_cache_call(SMP_DCACHE_INV_RANGE, start, start + PAGE_SIZE); + smp_unlock_cache(flags); +} + +/** + * mn10300_dcache_inv_range - Globally invalidate range of data cache + * @start: The start address of the region to be invalidated. + * @end: The end address of the region to be invalidated. + * + * Invalidate a range of addresses in the data cache on all CPUs, between start + * and end-1 inclusive. + */ +void mn10300_dcache_inv_range(unsigned long start, unsigned long end) +{ + unsigned long flags; + + flags = smp_lock_cache(); + mn10300_local_dcache_inv_range(start, end); + smp_cache_call(SMP_DCACHE_INV_RANGE, start, end); + smp_unlock_cache(flags); +} + +/** + * mn10300_dcache_inv_range2 - Globally invalidate range of data cache + * @start: The start address of the region to be invalidated. + * @size: The size of the region to be invalidated. + * + * Invalidate a range of addresses in the data cache on all CPUs, between start + * and start+size-1 inclusive. + */ +void mn10300_dcache_inv_range2(unsigned long start, unsigned long size) +{ + unsigned long flags; + + flags = smp_lock_cache(); + mn10300_local_dcache_inv_range2(start, size); + smp_cache_call(SMP_DCACHE_INV_RANGE, start, start + size); + smp_unlock_cache(flags); +} diff --git a/arch/mn10300/mm/cache-smp.c b/arch/mn10300/mm/cache-smp.c new file mode 100644 index 00000000000..2d23b9eeee6 --- /dev/null +++ b/arch/mn10300/mm/cache-smp.c @@ -0,0 +1,105 @@ +/* SMP global caching code + * + * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ +#include <linux/module.h> +#include <linux/mm.h> +#include <linux/mman.h> +#include <linux/threads.h> +#include <linux/interrupt.h> +#include <asm/page.h> +#include <asm/pgtable.h> +#include <asm/processor.h> +#include <asm/cacheflush.h> +#include <asm/io.h> +#include <asm/uaccess.h> +#include <asm/smp.h> +#include "cache-smp.h" + +DEFINE_SPINLOCK(smp_cache_lock); +static unsigned long smp_cache_mask; +static unsigned long smp_cache_start; +static unsigned long smp_cache_end; +static cpumask_t smp_cache_ipi_map; /* Bitmask of cache IPI done CPUs */ + +/** + * smp_cache_interrupt - Handle IPI request to flush caches. + * + * Handle a request delivered by IPI to flush the current CPU's + * caches. The parameters are stored in smp_cache_*. + */ +void smp_cache_interrupt(void) +{ + unsigned long opr_mask = smp_cache_mask; + + switch ((enum smp_dcache_ops)(opr_mask & SMP_DCACHE_OP_MASK)) { + case SMP_DCACHE_NOP: + break; + case SMP_DCACHE_INV: + mn10300_local_dcache_inv(); + break; + case SMP_DCACHE_INV_RANGE: + mn10300_local_dcache_inv_range(smp_cache_start, smp_cache_end); + break; + case SMP_DCACHE_FLUSH: + mn10300_local_dcache_flush(); + break; + case SMP_DCACHE_FLUSH_RANGE: + mn10300_local_dcache_flush_range(smp_cache_start, + smp_cache_end); + break; + case SMP_DCACHE_FLUSH_INV: + mn10300_local_dcache_flush_inv(); + break; + case SMP_DCACHE_FLUSH_INV_RANGE: + mn10300_local_dcache_flush_inv_range(smp_cache_start, + smp_cache_end); + break; + } + + switch ((enum smp_icache_ops)(opr_mask & SMP_ICACHE_OP_MASK)) { + case SMP_ICACHE_NOP: + break; + case SMP_ICACHE_INV: + mn10300_local_icache_inv(); + break; + case SMP_ICACHE_INV_RANGE: + mn10300_local_icache_inv_range(smp_cache_start, smp_cache_end); + break; + } + + cpumask_clear_cpu(smp_processor_id(), &smp_cache_ipi_map); +} + +/** + * smp_cache_call - Issue an IPI to request the other CPUs flush caches + * @opr_mask: Cache operation flags + * @start: Start address of request + * @end: End address of request + * + * Send cache flush IPI to other CPUs. This invokes smp_cache_interrupt() + * above on those other CPUs and then waits for them to finish. + * + * The caller must hold smp_cache_lock. + */ +void smp_cache_call(unsigned long opr_mask, + unsigned long start, unsigned long end) +{ + smp_cache_mask = opr_mask; + smp_cache_start = start; + smp_cache_end = end; + cpumask_copy(&smp_cache_ipi_map, cpu_online_mask); + cpumask_clear_cpu(smp_processor_id(), &smp_cache_ipi_map); + + send_IPI_allbutself(FLUSH_CACHE_IPI); + + while (!cpumask_empty(&smp_cache_ipi_map)) + /* nothing. lockup detection does not belong here */ + mb(); +} diff --git a/arch/mn10300/mm/cache-smp.h b/arch/mn10300/mm/cache-smp.h new file mode 100644 index 00000000000..cb52892aa66 --- /dev/null +++ b/arch/mn10300/mm/cache-smp.h @@ -0,0 +1,69 @@ +/* SMP caching definitions + * + * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ + + +/* + * Operation requests for smp_cache_call(). + * + * One of smp_icache_ops and one of smp_dcache_ops can be OR'd together. + */ +enum smp_icache_ops { + SMP_ICACHE_NOP = 0x0000, + SMP_ICACHE_INV = 0x0001, + SMP_ICACHE_INV_RANGE = 0x0002, +}; +#define SMP_ICACHE_OP_MASK 0x0003 + +enum smp_dcache_ops { + SMP_DCACHE_NOP = 0x0000, + SMP_DCACHE_INV = 0x0004, + SMP_DCACHE_INV_RANGE = 0x0008, + SMP_DCACHE_FLUSH = 0x000c, + SMP_DCACHE_FLUSH_RANGE = 0x0010, + SMP_DCACHE_FLUSH_INV = 0x0014, + SMP_DCACHE_FLUSH_INV_RANGE = 0x0018, +}; +#define SMP_DCACHE_OP_MASK 0x001c + +#define SMP_IDCACHE_INV_FLUSH (SMP_ICACHE_INV | SMP_DCACHE_FLUSH) +#define SMP_IDCACHE_INV_FLUSH_RANGE (SMP_ICACHE_INV_RANGE | SMP_DCACHE_FLUSH_RANGE) + +/* + * cache-smp.c + */ +#ifdef CONFIG_SMP +extern spinlock_t smp_cache_lock; + +extern void smp_cache_call(unsigned long opr_mask, + unsigned long addr, unsigned long end); + +static inline unsigned long smp_lock_cache(void) + __acquires(&smp_cache_lock) +{ + unsigned long flags; + spin_lock_irqsave(&smp_cache_lock, flags); + return flags; +} + +static inline void smp_unlock_cache(unsigned long flags) + __releases(&smp_cache_lock) +{ + spin_unlock_irqrestore(&smp_cache_lock, flags); +} + +#else +static inline unsigned long smp_lock_cache(void) { return 0; } +static inline void smp_unlock_cache(unsigned long flags) {} +static inline void smp_cache_call(unsigned long opr_mask, + unsigned long addr, unsigned long end) +{ +} +#endif /* CONFIG_SMP */ diff --git a/arch/mn10300/mm/cache.c b/arch/mn10300/mm/cache.c index 1b76719ec1c..0a1f0aa92eb 100644 --- a/arch/mn10300/mm/cache.c +++ b/arch/mn10300/mm/cache.c @@ -18,8 +18,13 @@ #include <asm/cacheflush.h> #include <asm/io.h> #include <asm/uaccess.h> +#include <asm/smp.h> +#include "cache-smp.h" EXPORT_SYMBOL(mn10300_icache_inv); +EXPORT_SYMBOL(mn10300_icache_inv_range); +EXPORT_SYMBOL(mn10300_icache_inv_range2); +EXPORT_SYMBOL(mn10300_icache_inv_page); EXPORT_SYMBOL(mn10300_dcache_inv); EXPORT_SYMBOL(mn10300_dcache_inv_range); EXPORT_SYMBOL(mn10300_dcache_inv_range2); @@ -37,78 +42,6 @@ EXPORT_SYMBOL(mn10300_dcache_flush_page); #endif /* - * write a page back from the dcache and invalidate the icache so that we can - * run code from it that we've just written into it - */ -void flush_icache_page(struct vm_area_struct *vma, struct page *page) -{ - mn10300_dcache_flush_page(page_to_phys(page)); - mn10300_icache_inv(); -} -EXPORT_SYMBOL(flush_icache_page); - -/* - * write some code we've just written back from the dcache and invalidate the - * icache so that we can run that code - */ -void flush_icache_range(unsigned long start, unsigned long end) -{ -#ifdef CONFIG_MN10300_CACHE_WBACK - unsigned long addr, size, off; - struct page *page; - pgd_t *pgd; - pud_t *pud; - pmd_t *pmd; - pte_t *ppte, pte; - - for (; start < end; start += size) { - /* work out how much of the page to flush */ - off = start & (PAGE_SIZE - 1); - - size = end - start; - if (size > PAGE_SIZE - off) - size = PAGE_SIZE - off; - - /* get the physical address the page is mapped to from the page - * tables */ - pgd = pgd_offset(current->mm, start); - if (!pgd || !pgd_val(*pgd)) - continue; - - pud = pud_offset(pgd, start); - if (!pud || !pud_val(*pud)) - continue; - - pmd = pmd_offset(pud, start); - if (!pmd || !pmd_val(*pmd)) - continue; - - ppte = pte_offset_map(pmd, start); - if (!ppte) - continue; - pte = *ppte; - pte_unmap(ppte); - - if (pte_none(pte)) - continue; - - page = pte_page(pte); - if (!page) - continue; - - addr = page_to_phys(page); - - /* flush the dcache and invalidate the icache coverage on that - * region */ - mn10300_dcache_flush_range2(addr + off, size); - } -#endif - - mn10300_icache_inv(); -} -EXPORT_SYMBOL(flush_icache_range); - -/* * allow userspace to flush the instruction cache */ asmlinkage long sys_cacheflush(unsigned long start, unsigned long end) diff --git a/arch/mn10300/mm/cache.inc b/arch/mn10300/mm/cache.inc new file mode 100644 index 00000000000..394a119b9c7 --- /dev/null +++ b/arch/mn10300/mm/cache.inc @@ -0,0 +1,133 @@ +/* MN10300 CPU core caching macros -*- asm -*- + * + * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ + + +############################################################################### +# +# Invalidate the instruction cache. +# A0: Should hold CHCTR +# D0: Should have been read from CHCTR +# D1: Will be clobbered +# +# On some cores it is necessary to disable the icache whilst we do this. +# +############################################################################### + .macro invalidate_icache,disable_irq + +#if defined(CONFIG_AM33_2) || defined(CONFIG_AM33_3) + .if \disable_irq + # don't want an interrupt routine seeing a disabled cache + mov epsw,d1 + and ~EPSW_IE,epsw + or EPSW_NMID,epsw + nop + nop + .endif + + # disable the icache + and ~CHCTR_ICEN,d0 + movhu d0,(a0) + + # and wait for it to calm down + setlb + movhu (a0),d0 + btst CHCTR_ICBUSY,d0 + lne + + # invalidate + or CHCTR_ICINV,d0 + movhu d0,(a0) + + # wait for the cache to finish + setlb + movhu (a0),d0 + btst CHCTR_ICBUSY,d0 + lne + + # and reenable it + or CHCTR_ICEN,d0 + movhu d0,(a0) + movhu (a0),d0 + + .if \disable_irq + LOCAL_IRQ_RESTORE(d1) + .endif + +#else /* CONFIG_AM33_2 || CONFIG_AM33_3 */ + + # invalidate + or CHCTR_ICINV,d0 + movhu d0,(a0) + movhu (a0),d0 + +#endif /* CONFIG_AM33_2 || CONFIG_AM33_3 */ + .endm + +############################################################################### +# +# Invalidate the data cache. +# A0: Should hold CHCTR +# D0: Should have been read from CHCTR +# D1: Will be clobbered +# +# On some cores it is necessary to disable the dcache whilst we do this. +# +############################################################################### + .macro invalidate_dcache,disable_irq + +#if defined(CONFIG_AM33_2) || defined(CONFIG_AM33_3) + .if \disable_irq + # don't want an interrupt routine seeing a disabled cache + mov epsw,d1 + and ~EPSW_IE,epsw + or EPSW_NMID,epsw + nop + nop + .endif + + # disable the dcache + and ~CHCTR_DCEN,d0 + movhu d0,(a0) + + # and wait for it to calm down + setlb + movhu (a0),d0 + btst CHCTR_DCBUSY,d0 + lne + + # invalidate + or CHCTR_DCINV,d0 + movhu d0,(a0) + + # wait for the cache to finish + setlb + movhu (a0),d0 + btst CHCTR_DCBUSY,d0 + lne + + # and reenable it + or CHCTR_DCEN,d0 + movhu d0,(a0) + movhu (a0),d0 + + .if \disable_irq + LOCAL_IRQ_RESTORE(d1) + .endif + +#else /* CONFIG_AM33_2 || CONFIG_AM33_3 */ + + # invalidate + or CHCTR_DCINV,d0 + movhu d0,(a0) + movhu (a0),d0 + +#endif /* CONFIG_AM33_2 || CONFIG_AM33_3 */ + .endm diff --git a/arch/mn10300/mm/dma-alloc.c b/arch/mn10300/mm/dma-alloc.c index f3649d8f50e..e244ebe637e 100644 --- a/arch/mn10300/mm/dma-alloc.c +++ b/arch/mn10300/mm/dma-alloc.c @@ -14,14 +14,29 @@ #include <linux/mm.h> #include <linux/string.h> #include <linux/pci.h> +#include <linux/gfp.h> +#include <linux/export.h> #include <asm/io.h> +static unsigned long pci_sram_allocated = 0xbc000000; + void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, int gfp) { unsigned long addr; void *ret; + pr_debug("dma_alloc_coherent(%s,%zu,%x)\n", + dev ? dev_name(dev) : "?", size, gfp); + + if (0xbe000000 - pci_sram_allocated >= size) { + size = (size + 255) & ~255; + addr = pci_sram_allocated; + pci_sram_allocated += size; + ret = (void *) addr; + goto done; + } + /* ignore region specifiers */ gfp &= ~(__GFP_DMA | __GFP_HIGHMEM); @@ -41,7 +56,9 @@ void *dma_alloc_coherent(struct device *dev, size_t size, /* write back and evict all cache lines covering this region */ mn10300_dcache_flush_inv_range2(virt_to_phys((void *) addr), PAGE_SIZE); +done: *dma_handle = virt_to_bus((void *) addr); + printk("dma_alloc_coherent() = %p [%x]\n", ret, *dma_handle); return ret; } EXPORT_SYMBOL(dma_alloc_coherent); @@ -51,6 +68,9 @@ void dma_free_coherent(struct device *dev, size_t size, void *vaddr, { unsigned long addr = (unsigned long) vaddr & ~0x20000000; + if (addr >= 0x9c000000) + return; + free_pages(addr, get_order(size)); } EXPORT_SYMBOL(dma_free_coherent); diff --git a/arch/mn10300/mm/fault.c b/arch/mn10300/mm/fault.c index 33cf25025da..3516cbdf1ee 100644 --- a/arch/mn10300/mm/fault.c +++ b/arch/mn10300/mm/fault.c @@ -20,17 +20,16 @@ #include <linux/mman.h> #include <linux/mm.h> #include <linux/smp.h> -#include <linux/smp_lock.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/vt_kern.h> /* For unblank_screen() */ -#include <asm/system.h> #include <asm/uaccess.h> #include <asm/pgalloc.h> #include <asm/hardirq.h> -#include <asm/gdb-stub.h> #include <asm/cpu-regs.h> +#include <asm/debugger.h> +#include <asm/gdb-stub.h> /* * Unlock any spinlocks which will prevent us from getting the @@ -40,10 +39,6 @@ void bust_spinlocks(int yes) { if (yes) { oops_in_progress = 1; -#ifdef CONFIG_SMP - /* Many serial drivers do __global_cli() */ - global_irq_lock = 0; -#endif } else { int loglevel_save = console_loglevel; #ifdef CONFIG_VT @@ -101,8 +96,6 @@ static void print_pagetable_entries(pgd_t *pgdir, unsigned long address) } #endif -asmlinkage void monitor_signal(struct pt_regs *); - /* * This routine handles page faults. It determines the address, * and the problem, and then passes it off to one of the appropriate @@ -130,7 +123,8 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long fault_code, struct mm_struct *mm; unsigned long page; siginfo_t info; - int write, fault; + int fault; + unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; #ifdef CONFIG_GDBSTUB /* handle GDB stub causing a fault */ @@ -177,6 +171,9 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long fault_code, if (in_atomic() || !mm) goto no_context; + if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_USR) + flags |= FAULT_FLAG_USER; +retry: down_read(&mm->mmap_sem); vma = find_vma(mm, address); @@ -227,7 +224,6 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long fault_code, */ good_area: info.si_code = SEGV_ACCERR; - write = 0; switch (fault_code & (MMUFCR_xFC_PGINVAL|MMUFCR_xFC_TYPE)) { default: /* 3: write, present */ case MMUFCR_xFC_TYPE_WRITE: @@ -239,7 +235,7 @@ good_area: case MMUFCR_xFC_PGINVAL | MMUFCR_xFC_TYPE_WRITE: if (!(vma->vm_flags & VM_WRITE)) goto bad_area; - write++; + flags |= FAULT_FLAG_WRITE; break; /* read from protected page */ @@ -258,7 +254,11 @@ good_area: * make sure we exit gracefully rather than endlessly redo * the fault. */ - fault = handle_mm_fault(mm, vma, address, write); + fault = handle_mm_fault(mm, vma, address, flags); + + if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) + return; + if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; @@ -266,10 +266,22 @@ good_area: goto do_sigbus; BUG(); } - if (fault & VM_FAULT_MAJOR) - current->maj_flt++; - else - current->min_flt++; + if (flags & FAULT_FLAG_ALLOW_RETRY) { + if (fault & VM_FAULT_MAJOR) + current->maj_flt++; + else + current->min_flt++; + if (fault & VM_FAULT_RETRY) { + flags &= ~FAULT_FLAG_ALLOW_RETRY; + + /* No need to up_read(&mm->mmap_sem) as we would + * have already released it in __lock_page_or_retry + * in mm/filemap.c. + */ + + goto retry; + } + } up_read(&mm->mmap_sem); return; @@ -280,7 +292,6 @@ good_area: */ bad_area: up_read(&mm->mmap_sem); - monitor_signal(regs); /* User mode accesses just cause a SIGSEGV */ if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_USR) { @@ -293,7 +304,6 @@ bad_area: } no_context: - monitor_signal(regs); /* Are we prepared to handle this kernel fault? */ if (fixup_exception(regs)) return; @@ -315,10 +325,8 @@ no_context: printk(" printing pc:\n"); printk(KERN_ALERT "%08lx\n", regs->pc); -#ifdef CONFIG_GDBSTUB - gdbstub_intercept( - regs, fault_code & 0x00010000 ? EXCEP_IAERROR : EXCEP_DAERROR); -#endif + debugger_intercept(fault_code & 0x00010000 ? EXCEP_IAERROR : EXCEP_DAERROR, + SIGSEGV, SEGV_ACCERR, regs); page = PTBR; page = ((unsigned long *) __va(page))[address >> 22]; @@ -339,15 +347,14 @@ no_context: */ out_of_memory: up_read(&mm->mmap_sem); - monitor_signal(regs); - printk(KERN_ALERT "VM: killing process %s\n", tsk->comm); - if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_USR) - do_exit(SIGKILL); + if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_USR) { + pagefault_out_of_memory(); + return; + } goto no_context; do_sigbus: up_read(&mm->mmap_sem); - monitor_signal(regs); /* * Send a sigbus, regardless of whether we were in kernel diff --git a/arch/mn10300/mm/init.c b/arch/mn10300/mm/init.c index 8cee387a24f..97a1ec0beee 100644 --- a/arch/mn10300/mm/init.c +++ b/arch/mn10300/mm/init.c @@ -17,7 +17,6 @@ #include <linux/types.h> #include <linux/ptrace.h> #include <linux/mman.h> -#include <linux/slab.h> #include <linux/fs.h> #include <linux/mm.h> #include <linux/swap.h> @@ -27,9 +26,9 @@ #include <linux/highmem.h> #include <linux/pagemap.h> #include <linux/bootmem.h> +#include <linux/gfp.h> #include <asm/processor.h> -#include <asm/system.h> #include <asm/uaccess.h> #include <asm/pgtable.h> #include <asm/pgalloc.h> @@ -37,10 +36,12 @@ #include <asm/tlb.h> #include <asm/sections.h> -DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); - unsigned long highstart_pfn, highend_pfn; +#ifdef CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT +static struct vm_struct user_iomap_vm; +#endif + /* * set up paging */ @@ -73,7 +74,24 @@ void __init paging_init(void) /* pass the memory from the bootmem allocator to the main allocator */ free_area_init(zones_size); - __flush_tlb_all(); +#ifdef CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT + /* The Atomic Operation Unit registers need to be mapped to userspace + * for all processes. The following uses vm_area_register_early() to + * reserve the first page of the vmalloc area and sets the pte for that + * page. + * + * glibc hardcodes this virtual mapping, so we're pretty much stuck with + * it from now on. + */ + user_iomap_vm.flags = VM_USERMAP; + user_iomap_vm.size = 1 << PAGE_SHIFT; + vm_area_register_early(&user_iomap_vm, PAGE_SIZE); + ppte = kernel_vmalloc_ptes; + set_pte(ppte, pfn_pte(USER_ATOMIC_OPS_PAGE_ADDR >> PAGE_SHIFT, + PAGE_USERIO)); +#endif + + local_flush_tlb_all(); } /* @@ -81,62 +99,21 @@ void __init paging_init(void) */ void __init mem_init(void) { - int codesize, reservedpages, datasize, initsize; - int tmp; - - if (!mem_map) - BUG(); + BUG_ON(!mem_map); #define START_PFN (contig_page_data.bdata->node_min_pfn) #define MAX_LOW_PFN (contig_page_data.bdata->node_low_pfn) - max_mapnr = num_physpages = MAX_LOW_PFN - START_PFN; + max_mapnr = MAX_LOW_PFN - START_PFN; high_memory = (void *) __va(MAX_LOW_PFN * PAGE_SIZE); /* clear the zero-page */ memset(empty_zero_page, 0, PAGE_SIZE); /* this will put all low memory onto the freelists */ - totalram_pages += free_all_bootmem(); - - reservedpages = 0; - for (tmp = 0; tmp < num_physpages; tmp++) - if (PageReserved(&mem_map[tmp])) - reservedpages++; - - codesize = (unsigned long) &_etext - (unsigned long) &_stext; - datasize = (unsigned long) &_edata - (unsigned long) &_etext; - initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; - - printk(KERN_INFO - "Memory: %luk/%luk available" - " (%dk kernel code, %dk reserved, %dk data, %dk init," - " %ldk highmem)\n", - (unsigned long) nr_free_pages() << (PAGE_SHIFT - 10), - max_mapnr << (PAGE_SHIFT - 10), - codesize >> 10, - reservedpages << (PAGE_SHIFT - 10), - datasize >> 10, - initsize >> 10, - (unsigned long) (totalhigh_pages << (PAGE_SHIFT - 10)) - ); -} + free_all_bootmem(); -/* - * - */ -void free_init_pages(char *what, unsigned long begin, unsigned long end) -{ - unsigned long addr; - - for (addr = begin; addr < end; addr += PAGE_SIZE) { - ClearPageReserved(virt_to_page(addr)); - init_page_count(virt_to_page(addr)); - memset((void *) addr, 0xcc, PAGE_SIZE); - free_page(addr); - totalram_pages++; - } - printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10); + mem_init_print_info(NULL); } /* @@ -144,9 +121,7 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end) */ void free_initmem(void) { - free_init_pages("unused kernel memory", - (unsigned long) &__init_begin, - (unsigned long) &__init_end); + free_initmem_default(POISON_FREE_INITMEM); } /* @@ -155,6 +130,7 @@ void free_initmem(void) #ifdef CONFIG_BLK_DEV_INITRD void free_initrd_mem(unsigned long start, unsigned long end) { - free_init_pages("initrd memory", start, end); + free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM, + "initrd"); } #endif diff --git a/arch/mn10300/mm/misalignment.c b/arch/mn10300/mm/misalignment.c index 94c4a435806..b9920b1edd5 100644 --- a/arch/mn10300/mm/misalignment.c +++ b/arch/mn10300/mm/misalignment.c @@ -17,17 +17,15 @@ #include <linux/timer.h> #include <linux/mm.h> #include <linux/smp.h> -#include <linux/smp_lock.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/spinlock.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <asm/processor.h> -#include <asm/system.h> #include <asm/uaccess.h> #include <asm/io.h> -#include <asm/atomic.h> +#include <linux/atomic.h> #include <asm/smp.h> #include <asm/pgalloc.h> #include <asm/cpu-regs.h> @@ -450,8 +448,7 @@ found_opcode: regs->pc, opcode, pop->opcode, pop->params[0], pop->params[1]); tmp = format_tbl[pop->format].opsz; - if (tmp > noc) - BUG(); /* match was less complete than it ought to have been */ + BUG_ON(tmp > noc); /* match was less complete than it ought to have been */ if (tmp < noc) { tmp = noc - tmp; @@ -634,13 +631,13 @@ static int misalignment_addr(unsigned long *registers, unsigned long sp, goto displace_or_inc; case SD24: tmp = disp << 8; - asm("asr 8,%0" : "=r"(tmp) : "0"(tmp)); + asm("asr 8,%0" : "=r"(tmp) : "0"(tmp) : "cc"); disp = (long) tmp; goto displace_or_inc; case SIMM4_2: tmp = opcode >> 4 & 0x0f; tmp <<= 28; - asm("asr 28,%0" : "=r"(tmp) : "0"(tmp)); + asm("asr 28,%0" : "=r"(tmp) : "0"(tmp) : "cc"); disp = (long) tmp; goto displace_or_inc; case IMM8: diff --git a/arch/mn10300/mm/mmu-context.c b/arch/mn10300/mm/mmu-context.c index 31c9d27a75a..a4f7d3dcc6e 100644 --- a/arch/mn10300/mm/mmu-context.c +++ b/arch/mn10300/mm/mmu-context.c @@ -13,47 +13,23 @@ #include <asm/mmu_context.h> #include <asm/tlbflush.h> +#ifdef CONFIG_MN10300_TLB_USE_PIDR /* * list of the MMU contexts last allocated on each CPU */ unsigned long mmu_context_cache[NR_CPUS] = { - [0 ... NR_CPUS - 1] = MMU_CONTEXT_FIRST_VERSION * 2 - 1, + [0 ... NR_CPUS - 1] = + MMU_CONTEXT_FIRST_VERSION * 2 - (1 - MMU_CONTEXT_TLBPID_LOCK_NR), }; - -/* - * flush the specified TLB entry - */ -void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) -{ - unsigned long pteu, cnx, flags; - - addr &= PAGE_MASK; - - /* make sure the context doesn't migrate and defend against - * interference from vmalloc'd regions */ - local_irq_save(flags); - - cnx = mm_context(vma->vm_mm); - - if (cnx != MMU_NO_CONTEXT) { - pteu = addr | (cnx & 0x000000ffUL); - IPTEU = pteu; - DPTEU = pteu; - if (IPTEL & xPTEL_V) - IPTEL = 0; - if (DPTEL & xPTEL_V) - DPTEL = 0; - } - - local_irq_restore(flags); -} +#endif /* CONFIG_MN10300_TLB_USE_PIDR */ /* * preemptively set a TLB entry */ -void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte) +void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { unsigned long pteu, ptel, cnx, flags; + pte_t pte = *ptep; addr &= PAGE_MASK; ptel = pte_val(pte) & ~(xPTEL_UNUSED1 | xPTEL_UNUSED2); @@ -62,10 +38,16 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte) * interference from vmalloc'd regions */ local_irq_save(flags); + cnx = ~MMU_NO_CONTEXT; +#ifdef CONFIG_MN10300_TLB_USE_PIDR cnx = mm_context(vma->vm_mm); +#endif if (cnx != MMU_NO_CONTEXT) { - pteu = addr | (cnx & 0x000000ffUL); + pteu = addr; +#ifdef CONFIG_MN10300_TLB_USE_PIDR + pteu |= cnx & MMU_CONTEXT_TLBPID_MASK; +#endif if (!(pte_val(pte) & _PAGE_NX)) { IPTEU = pteu; if (IPTEL & xPTEL_V) diff --git a/arch/mn10300/mm/pgtable.c b/arch/mn10300/mm/pgtable.c index baffc581e03..e77a7c72808 100644 --- a/arch/mn10300/mm/pgtable.c +++ b/arch/mn10300/mm/pgtable.c @@ -12,16 +12,15 @@ #include <linux/sched.h> #include <linux/kernel.h> #include <linux/errno.h> +#include <linux/gfp.h> #include <linux/mm.h> #include <linux/swap.h> #include <linux/smp.h> #include <linux/highmem.h> -#include <linux/slab.h> #include <linux/pagemap.h> #include <linux/spinlock.h> #include <linux/quicklist.h> -#include <asm/system.h> #include <asm/pgtable.h> #include <asm/pgalloc.h> #include <asm/tlb.h> @@ -59,7 +58,7 @@ void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags) * It's enough to flush this one mapping. * (PGE mappings get flushed as well) */ - __flush_tlb_one(vaddr); + local_flush_tlb_one(vaddr); } pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) @@ -79,8 +78,13 @@ struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) #else pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0); #endif - if (pte) - clear_highpage(pte); + if (!pte) + return NULL; + clear_highpage(pte); + if (!pgtable_page_ctor(pte)) { + __free_page(pte); + return NULL; + } return pte; } @@ -96,7 +100,7 @@ struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) * checks at dup_mmap(), exec(), and other mmlist addition points * could be used. The locking scheme was chosen on the basis of * manfred's recommendations and having no core impact whatsoever. - * -- wli + * -- nyc */ DEFINE_SPINLOCK(pgd_lock); struct page *pgd_list; diff --git a/arch/mn10300/mm/tlb-mn10300.S b/arch/mn10300/mm/tlb-mn10300.S index 7095147dcb8..b9940177d81 100644 --- a/arch/mn10300/mm/tlb-mn10300.S +++ b/arch/mn10300/mm/tlb-mn10300.S @@ -27,7 +27,6 @@ ############################################################################### .type itlb_miss,@function ENTRY(itlb_miss) - and ~EPSW_NMID,epsw #ifdef CONFIG_GDBSTUB movm [d2,d3,a2],(sp) #else @@ -38,6 +37,12 @@ ENTRY(itlb_miss) nop #endif +#if defined(CONFIG_ERRATUM_NEED_TO_RELOAD_MMUCTR) + mov (MMUCTR),d2 + mov d2,(MMUCTR) +#endif + + and ~EPSW_NMID,epsw mov (IPTEU),d3 mov (PTBR),a2 mov d3,d2 @@ -56,10 +61,16 @@ ENTRY(itlb_miss) btst _PAGE_VALID,d2 beq itlb_miss_fault # jump if doesn't point to a page # (might be a swap id) +#if ((_PAGE_ACCESSED & 0xffffff00) == 0) bset _PAGE_ACCESSED,(0,a2) - and ~(xPTEL_UNUSED1|xPTEL_UNUSED2),d2 +#elif ((_PAGE_ACCESSED & 0xffff00ff) == 0) + bset +(_PAGE_ACCESSED >> 8),(1,a2) +#else +#error "_PAGE_ACCESSED value is out of range" +#endif + and ~xPTEL2_UNUSED1,d2 itlb_miss_set: - mov d2,(IPTEL) # change the TLB + mov d2,(IPTEL2) # change the TLB #ifdef CONFIG_GDBSTUB movm (sp),[d2,d3,a2] #endif @@ -79,7 +90,6 @@ itlb_miss_fault: ############################################################################### .type dtlb_miss,@function ENTRY(dtlb_miss) - and ~EPSW_NMID,epsw #ifdef CONFIG_GDBSTUB movm [d2,d3,a2],(sp) #else @@ -90,6 +100,12 @@ ENTRY(dtlb_miss) nop #endif +#if defined(CONFIG_ERRATUM_NEED_TO_RELOAD_MMUCTR) + mov (MMUCTR),d2 + mov d2,(MMUCTR) +#endif + + and ~EPSW_NMID,epsw mov (DPTEU),d3 mov (PTBR),a2 mov d3,d2 @@ -108,10 +124,16 @@ ENTRY(dtlb_miss) btst _PAGE_VALID,d2 beq dtlb_miss_fault # jump if doesn't point to a page # (might be a swap id) +#if ((_PAGE_ACCESSED & 0xffffff00) == 0) bset _PAGE_ACCESSED,(0,a2) - and ~(xPTEL_UNUSED1|xPTEL_UNUSED2),d2 +#elif ((_PAGE_ACCESSED & 0xffff00ff) == 0) + bset +(_PAGE_ACCESSED >> 8),(1,a2) +#else +#error "_PAGE_ACCESSED value is out of range" +#endif + and ~xPTEL2_UNUSED1,d2 dtlb_miss_set: - mov d2,(DPTEL) # change the TLB + mov d2,(DPTEL2) # change the TLB #ifdef CONFIG_GDBSTUB movm (sp),[d2,d3,a2] #endif @@ -130,9 +152,15 @@ dtlb_miss_fault: ############################################################################### .type itlb_aerror,@function ENTRY(itlb_aerror) - and ~EPSW_NMID,epsw add -4,sp SAVE_ALL + +#if defined(CONFIG_ERRATUM_NEED_TO_RELOAD_MMUCTR) + mov (MMUCTR),d1 + mov d1,(MMUCTR) +#endif + + and ~EPSW_NMID,epsw add -4,sp # need to pass three params # calculate the fault code @@ -140,15 +168,13 @@ ENTRY(itlb_aerror) or 0x00010000,d1 # it's an instruction fetch # determine the page address - mov (IPTEU),a2 - mov a2,d0 + mov (IPTEU),d0 and PAGE_MASK,d0 mov d0,(12,sp) clr d0 - mov d0,(IPTEL) + mov d0,(IPTEL2) - and ~EPSW_NMID,epsw or EPSW_IE,epsw mov fp,d0 call do_page_fault[],0 # do_page_fault(regs,code,addr @@ -163,10 +189,16 @@ ENTRY(itlb_aerror) ############################################################################### .type dtlb_aerror,@function ENTRY(dtlb_aerror) - and ~EPSW_NMID,epsw add -4,sp SAVE_ALL + +#if defined(CONFIG_ERRATUM_NEED_TO_RELOAD_MMUCTR) + mov (MMUCTR),d1 + mov d1,(MMUCTR) +#endif + add -4,sp # need to pass three params + and ~EPSW_NMID,epsw # calculate the fault code movhu (MMUFCR_DFC),d1 @@ -178,9 +210,8 @@ ENTRY(dtlb_aerror) mov d0,(12,sp) clr d0 - mov d0,(DPTEL) + mov d0,(DPTEL2) - and ~EPSW_NMID,epsw or EPSW_IE,epsw mov fp,d0 call do_page_fault[],0 # do_page_fault(regs,code,addr diff --git a/arch/mn10300/mm/tlb-smp.c b/arch/mn10300/mm/tlb-smp.c new file mode 100644 index 00000000000..e5d0ef722bf --- /dev/null +++ b/arch/mn10300/mm/tlb-smp.c @@ -0,0 +1,213 @@ +/* SMP TLB support routines. + * + * Copyright (C) 2006-2008 Panasonic Corporation + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include <linux/interrupt.h> +#include <linux/spinlock.h> +#include <linux/init.h> +#include <linux/jiffies.h> +#include <linux/cpumask.h> +#include <linux/err.h> +#include <linux/kernel.h> +#include <linux/delay.h> +#include <linux/sched.h> +#include <linux/profile.h> +#include <linux/smp.h> +#include <asm/tlbflush.h> +#include <asm/bitops.h> +#include <asm/processor.h> +#include <asm/bug.h> +#include <asm/exceptions.h> +#include <asm/hardirq.h> +#include <asm/fpu.h> +#include <asm/mmu_context.h> +#include <asm/thread_info.h> +#include <asm/cpu-regs.h> +#include <asm/intctl-regs.h> + +/* + * For flush TLB + */ +#define FLUSH_ALL 0xffffffff + +static cpumask_t flush_cpumask; +static struct mm_struct *flush_mm; +static unsigned long flush_va; +static DEFINE_SPINLOCK(tlbstate_lock); + +DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = { + &init_mm, 0 +}; + +static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm, + unsigned long va); +static void do_flush_tlb_all(void *info); + +/** + * smp_flush_tlb - Callback to invalidate the TLB. + * @unused: Callback context (ignored). + */ +void smp_flush_tlb(void *unused) +{ + unsigned long cpu_id; + + cpu_id = get_cpu(); + + if (!cpumask_test_cpu(cpu_id, &flush_cpumask)) + /* This was a BUG() but until someone can quote me the line + * from the intel manual that guarantees an IPI to multiple + * CPUs is retried _only_ on the erroring CPUs its staying as a + * return + * + * BUG(); + */ + goto out; + + if (flush_va == FLUSH_ALL) + local_flush_tlb(); + else + local_flush_tlb_page(flush_mm, flush_va); + + smp_mb__before_atomic(); + cpumask_clear_cpu(cpu_id, &flush_cpumask); + smp_mb__after_atomic(); +out: + put_cpu(); +} + +/** + * flush_tlb_others - Tell the specified CPUs to invalidate their TLBs + * @cpumask: The list of CPUs to target. + * @mm: The VM context to flush from (if va!=FLUSH_ALL). + * @va: Virtual address to flush or FLUSH_ALL to flush everything. + */ +static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm, + unsigned long va) +{ + cpumask_t tmp; + + /* A couple of sanity checks (to be removed): + * - mask must not be empty + * - current CPU must not be in mask + * - we do not send IPIs to as-yet unbooted CPUs. + */ + BUG_ON(!mm); + BUG_ON(cpumask_empty(&cpumask)); + BUG_ON(cpumask_test_cpu(smp_processor_id(), &cpumask)); + + cpumask_and(&tmp, &cpumask, cpu_online_mask); + BUG_ON(!cpumask_equal(&cpumask, &tmp)); + + /* I'm not happy about this global shared spinlock in the MM hot path, + * but we'll see how contended it is. + * + * Temporarily this turns IRQs off, so that lockups are detected by the + * NMI watchdog. + */ + spin_lock(&tlbstate_lock); + + flush_mm = mm; + flush_va = va; +#if NR_CPUS <= BITS_PER_LONG + atomic_set_mask(cpumask.bits[0], &flush_cpumask.bits[0]); +#else +#error Not supported. +#endif + + /* FIXME: if NR_CPUS>=3, change send_IPI_mask */ + smp_call_function(smp_flush_tlb, NULL, 1); + + while (!cpumask_empty(&flush_cpumask)) + /* Lockup detection does not belong here */ + smp_mb(); + + flush_mm = NULL; + flush_va = 0; + spin_unlock(&tlbstate_lock); +} + +/** + * flush_tlb_mm - Invalidate TLB of specified VM context + * @mm: The VM context to invalidate. + */ +void flush_tlb_mm(struct mm_struct *mm) +{ + cpumask_t cpu_mask; + + preempt_disable(); + cpumask_copy(&cpu_mask, mm_cpumask(mm)); + cpumask_clear_cpu(smp_processor_id(), &cpu_mask); + + local_flush_tlb(); + if (!cpumask_empty(&cpu_mask)) + flush_tlb_others(cpu_mask, mm, FLUSH_ALL); + + preempt_enable(); +} + +/** + * flush_tlb_current_task - Invalidate TLB of current task + */ +void flush_tlb_current_task(void) +{ + struct mm_struct *mm = current->mm; + cpumask_t cpu_mask; + + preempt_disable(); + cpumask_copy(&cpu_mask, mm_cpumask(mm)); + cpumask_clear_cpu(smp_processor_id(), &cpu_mask); + + local_flush_tlb(); + if (!cpumask_empty(&cpu_mask)) + flush_tlb_others(cpu_mask, mm, FLUSH_ALL); + + preempt_enable(); +} + +/** + * flush_tlb_page - Invalidate TLB of page + * @vma: The VM context to invalidate the page for. + * @va: The virtual address of the page to invalidate. + */ +void flush_tlb_page(struct vm_area_struct *vma, unsigned long va) +{ + struct mm_struct *mm = vma->vm_mm; + cpumask_t cpu_mask; + + preempt_disable(); + cpumask_copy(&cpu_mask, mm_cpumask(mm)); + cpumask_clear_cpu(smp_processor_id(), &cpu_mask); + + local_flush_tlb_page(mm, va); + if (!cpumask_empty(&cpu_mask)) + flush_tlb_others(cpu_mask, mm, va); + + preempt_enable(); +} + +/** + * do_flush_tlb_all - Callback to completely invalidate a TLB + * @unused: Callback context (ignored). + */ +static void do_flush_tlb_all(void *unused) +{ + local_flush_tlb_all(); +} + +/** + * flush_tlb_all - Completely invalidate TLBs on all CPUs + */ +void flush_tlb_all(void) +{ + on_each_cpu(do_flush_tlb_all, 0, 1); +} |
