diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-10-27 18:53:26 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-10-27 18:53:26 -0700 |
commit | bdab225015fbbb45ccd8913f5d7c01b2bf67d8b2 (patch) | |
tree | 5ef62301face958977a084bf2b6c5300296a25f2 /arch/mn10300/include | |
parent | 7c5814c7199851c5fe9395d08fc1ab3c8c1531ea (diff) | |
parent | 7c7fcf762e405eb040ee10d22d656a791f616122 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-2.6-mn10300
* git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-2.6-mn10300: (44 commits)
MN10300: Save frame pointer in thread_info struct rather than global var
MN10300: Change "Matsushita" to "Panasonic".
MN10300: Create a defconfig for the ASB2364 board
MN10300: Update the ASB2303 defconfig
MN10300: ASB2364: Add support for SMSC911X and SMC911X
MN10300: ASB2364: Handle the IRQ multiplexer in the FPGA
MN10300: Generic time support
MN10300: Specify an ELF HWCAP flag for MN10300 Atomic Operations Unit support
MN10300: Map userspace atomic op regs as a vmalloc page
MN10300: And Panasonic AM34 subarch and implement SMP
MN10300: Delete idle_timestamp from irq_cpustat_t
MN10300: Make various interrupt priority settings configurable
MN10300: Optimise do_csum()
MN10300: Implement atomic ops using atomic ops unit
MN10300: Make the FPU operate in non-lazy mode under SMP
MN10300: SMP TLB flushing
MN10300: Use the [ID]PTEL2 registers rather than [ID]PTEL for TLB control
MN10300: Make the use of PIDR to mark TLB entries controllable
MN10300: Rename __flush_tlb*() to local_flush_tlb*()
MN10300: AM34 erratum requires MMUCTR read and write on exception entry
...
Diffstat (limited to 'arch/mn10300/include')
38 files changed, 1792 insertions, 524 deletions
diff --git a/arch/mn10300/include/asm/atomic.h b/arch/mn10300/include/asm/atomic.h index f0cc1f84a72..92d2f9298e3 100644 --- a/arch/mn10300/include/asm/atomic.h +++ b/arch/mn10300/include/asm/atomic.h @@ -1 +1,351 @@ +/* MN10300 Atomic counter operations + * + * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ +#ifndef _ASM_ATOMIC_H +#define _ASM_ATOMIC_H + +#include <asm/irqflags.h> + +#ifndef __ASSEMBLY__ + +#ifdef CONFIG_SMP +#ifdef CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT +static inline +unsigned long __xchg(volatile unsigned long *m, unsigned long val) +{ + unsigned long status; + unsigned long oldval; + + asm volatile( + "1: mov %4,(_AAR,%3) \n" + " mov (_ADR,%3),%1 \n" + " mov %5,(_ADR,%3) \n" + " mov (_ADR,%3),%0 \n" /* flush */ + " mov (_ASR,%3),%0 \n" + " or %0,%0 \n" + " bne 1b \n" + : "=&r"(status), "=&r"(oldval), "=m"(*m) + : "a"(ATOMIC_OPS_BASE_ADDR), "r"(m), "r"(val) + : "memory", "cc"); + + return oldval; +} + +static inline unsigned long __cmpxchg(volatile unsigned long *m, + unsigned long old, unsigned long new) +{ + unsigned long status; + unsigned long oldval; + + asm volatile( + "1: mov %4,(_AAR,%3) \n" + " mov (_ADR,%3),%1 \n" + " cmp %5,%1 \n" + " bne 2f \n" + " mov %6,(_ADR,%3) \n" + "2: mov (_ADR,%3),%0 \n" /* flush */ + " mov (_ASR,%3),%0 \n" + " or %0,%0 \n" + " bne 1b \n" + : "=&r"(status), "=&r"(oldval), "=m"(*m) + : "a"(ATOMIC_OPS_BASE_ADDR), "r"(m), + "r"(old), "r"(new) + : "memory", "cc"); + + return oldval; +} +#else /* CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT */ +#error "No SMP atomic operation support!" +#endif /* CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT */ + +#else /* CONFIG_SMP */ + +/* + * Emulate xchg for non-SMP MN10300 + */ +struct __xchg_dummy { unsigned long a[100]; }; +#define __xg(x) ((struct __xchg_dummy *)(x)) + +static inline +unsigned long __xchg(volatile unsigned long *m, unsigned long val) +{ + unsigned long oldval; + unsigned long flags; + + flags = arch_local_cli_save(); + oldval = *m; + *m = val; + arch_local_irq_restore(flags); + return oldval; +} + +/* + * Emulate cmpxchg for non-SMP MN10300 + */ +static inline unsigned long __cmpxchg(volatile unsigned long *m, + unsigned long old, unsigned long new) +{ + unsigned long oldval; + unsigned long flags; + + flags = arch_local_cli_save(); + oldval = *m; + if (oldval == old) + *m = new; + arch_local_irq_restore(flags); + return oldval; +} + +#endif /* CONFIG_SMP */ + +#define xchg(ptr, v) \ + ((__typeof__(*(ptr))) __xchg((unsigned long *)(ptr), \ + (unsigned long)(v))) + +#define cmpxchg(ptr, o, n) \ + ((__typeof__(*(ptr))) __cmpxchg((unsigned long *)(ptr), \ + (unsigned long)(o), \ + (unsigned long)(n))) + +#define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v))) +#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new))) + +#endif /* !__ASSEMBLY__ */ + +#ifndef CONFIG_SMP #include <asm-generic/atomic.h> +#else + +/* + * Atomic operations that C can't guarantee us. Useful for + * resource counting etc.. + */ + +#define ATOMIC_INIT(i) { (i) } + +#ifdef __KERNEL__ + +/** + * atomic_read - read atomic variable + * @v: pointer of type atomic_t + * + * Atomically reads the value of @v. Note that the guaranteed + * useful range of an atomic_t is only 24 bits. + */ +#define atomic_read(v) ((v)->counter) + +/** + * atomic_set - set atomic variable + * @v: pointer of type atomic_t + * @i: required value + * + * Atomically sets the value of @v to @i. Note that the guaranteed + * useful range of an atomic_t is only 24 bits. + */ +#define atomic_set(v, i) (((v)->counter) = (i)) + +/** + * atomic_add_return - add integer to atomic variable + * @i: integer value to add + * @v: pointer of type atomic_t + * + * Atomically adds @i to @v and returns the result + * Note that the guaranteed useful range of an atomic_t is only 24 bits. + */ +static inline int atomic_add_return(int i, atomic_t *v) +{ + int retval; +#ifdef CONFIG_SMP + int status; + + asm volatile( + "1: mov %4,(_AAR,%3) \n" + " mov (_ADR,%3),%1 \n" + " add %5,%1 \n" + " mov %1,(_ADR,%3) \n" + " mov (_ADR,%3),%0 \n" /* flush */ + " mov (_ASR,%3),%0 \n" + " or %0,%0 \n" + " bne 1b \n" + : "=&r"(status), "=&r"(retval), "=m"(v->counter) + : "a"(ATOMIC_OPS_BASE_ADDR), "r"(&v->counter), "r"(i) + : "memory", "cc"); + +#else + unsigned long flags; + + flags = arch_local_cli_save(); + retval = v->counter; + retval += i; + v->counter = retval; + arch_local_irq_restore(flags); +#endif + return retval; +} + +/** + * atomic_sub_return - subtract integer from atomic variable + * @i: integer value to subtract + * @v: pointer of type atomic_t + * + * Atomically subtracts @i from @v and returns the result + * Note that the guaranteed useful range of an atomic_t is only 24 bits. + */ +static inline int atomic_sub_return(int i, atomic_t *v) +{ + int retval; +#ifdef CONFIG_SMP + int status; + + asm volatile( + "1: mov %4,(_AAR,%3) \n" + " mov (_ADR,%3),%1 \n" + " sub %5,%1 \n" + " mov %1,(_ADR,%3) \n" + " mov (_ADR,%3),%0 \n" /* flush */ + " mov (_ASR,%3),%0 \n" + " or %0,%0 \n" + " bne 1b \n" + : "=&r"(status), "=&r"(retval), "=m"(v->counter) + : "a"(ATOMIC_OPS_BASE_ADDR), "r"(&v->counter), "r"(i) + : "memory", "cc"); + +#else + unsigned long flags; + flags = arch_local_cli_save(); + retval = v->counter; + retval -= i; + v->counter = retval; + arch_local_irq_restore(flags); +#endif + return retval; +} + +static inline int atomic_add_negative(int i, atomic_t *v) +{ + return atomic_add_return(i, v) < 0; +} + +static inline void atomic_add(int i, atomic_t *v) +{ + atomic_add_return(i, v); +} + +static inline void atomic_sub(int i, atomic_t *v) +{ + atomic_sub_return(i, v); +} + +static inline void atomic_inc(atomic_t *v) +{ + atomic_add_return(1, v); +} + +static inline void atomic_dec(atomic_t *v) +{ + atomic_sub_return(1, v); +} + +#define atomic_dec_return(v) atomic_sub_return(1, (v)) +#define atomic_inc_return(v) atomic_add_return(1, (v)) + +#define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0) +#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) +#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0) + +#define atomic_add_unless(v, a, u) \ +({ \ + int c, old; \ + c = atomic_read(v); \ + while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ + c = old; \ + c != (u); \ +}) + +#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) + +/** + * atomic_clear_mask - Atomically clear bits in memory + * @mask: Mask of the bits to be cleared + * @v: pointer to word in memory + * + * Atomically clears the bits set in mask from the memory word specified. + */ +static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) +{ +#ifdef CONFIG_SMP + int status; + + asm volatile( + "1: mov %3,(_AAR,%2) \n" + " mov (_ADR,%2),%0 \n" + " and %4,%0 \n" + " mov %0,(_ADR,%2) \n" + " mov (_ADR,%2),%0 \n" /* flush */ + " mov (_ASR,%2),%0 \n" + " or %0,%0 \n" + " bne 1b \n" + : "=&r"(status), "=m"(*addr) + : "a"(ATOMIC_OPS_BASE_ADDR), "r"(addr), "r"(~mask) + : "memory", "cc"); +#else + unsigned long flags; + + mask = ~mask; + flags = arch_local_cli_save(); + *addr &= mask; + arch_local_irq_restore(flags); +#endif +} + +/** + * atomic_set_mask - Atomically set bits in memory + * @mask: Mask of the bits to be set + * @v: pointer to word in memory + * + * Atomically sets the bits set in mask from the memory word specified. + */ +static inline void atomic_set_mask(unsigned long mask, unsigned long *addr) +{ +#ifdef CONFIG_SMP + int status; + + asm volatile( + "1: mov %3,(_AAR,%2) \n" + " mov (_ADR,%2),%0 \n" + " or %4,%0 \n" + " mov %0,(_ADR,%2) \n" + " mov (_ADR,%2),%0 \n" /* flush */ + " mov (_ASR,%2),%0 \n" + " or %0,%0 \n" + " bne 1b \n" + : "=&r"(status), "=m"(*addr) + : "a"(ATOMIC_OPS_BASE_ADDR), "r"(addr), "r"(mask) + : "memory", "cc"); +#else + unsigned long flags; + + flags = arch_local_cli_save(); + *addr |= mask; + arch_local_irq_restore(flags); +#endif +} + +/* Atomic operations are already serializing on MN10300??? */ +#define smp_mb__before_atomic_dec() barrier() +#define smp_mb__after_atomic_dec() barrier() +#define smp_mb__before_atomic_inc() barrier() +#define smp_mb__after_atomic_inc() barrier() + +#include <asm-generic/atomic-long.h> + +#endif /* __KERNEL__ */ +#endif /* CONFIG_SMP */ +#endif /* _ASM_ATOMIC_H */ diff --git a/arch/mn10300/include/asm/bitops.h b/arch/mn10300/include/asm/bitops.h index 3f50e966107..3b8a868188f 100644 --- a/arch/mn10300/include/asm/bitops.h +++ b/arch/mn10300/include/asm/bitops.h @@ -57,7 +57,7 @@ #define clear_bit(nr, addr) ___clear_bit((nr), (addr)) -static inline void __clear_bit(int nr, volatile void *addr) +static inline void __clear_bit(unsigned long nr, volatile void *addr) { unsigned int *a = (unsigned int *) addr; int mask; @@ -70,15 +70,15 @@ static inline void __clear_bit(int nr, volatile void *addr) /* * test bit */ -static inline int test_bit(int nr, const volatile void *addr) +static inline int test_bit(unsigned long nr, const volatile void *addr) { - return 1UL & (((const unsigned int *) addr)[nr >> 5] >> (nr & 31)); + return 1UL & (((const volatile unsigned int *) addr)[nr >> 5] >> (nr & 31)); } /* * change bit */ -static inline void __change_bit(int nr, volatile void *addr) +static inline void __change_bit(unsigned long nr, volatile void *addr) { int mask; unsigned int *a = (unsigned int *) addr; @@ -88,7 +88,7 @@ static inline void __change_bit(int nr, volatile void *addr) *a ^= mask; } -extern void change_bit(int nr, volatile void *addr); +extern void change_bit(unsigned long nr, volatile void *addr); /* * test and set bit @@ -135,7 +135,7 @@ extern void change_bit(int nr, volatile void *addr); /* * test and change bit */ -static inline int __test_and_change_bit(int nr, volatile void *addr) +static inline int __test_and_change_bit(unsigned long nr, volatile void *addr) { int mask, retval; unsigned int *a = (unsigned int *)addr; @@ -148,7 +148,7 @@ static inline int __test_and_change_bit(int nr, volatile void *addr) return retval; } -extern int test_and_change_bit(int nr, volatile void *addr); +extern int test_and_change_bit(unsigned long nr, volatile void *addr); #include <asm-generic/bitops/lock.h> diff --git a/arch/mn10300/include/asm/cache.h b/arch/mn10300/include/asm/cache.h index 781bf613366..f29cde2cfc9 100644 --- a/arch/mn10300/include/asm/cache.h +++ b/arch/mn10300/include/asm/cache.h @@ -43,14 +43,18 @@ /* instruction cache access registers */ #define ICACHE_DATA(WAY, ENTRY, OFF) \ - __SYSREG(0xc8000000 + (WAY) * L1_CACHE_WAYDISP + (ENTRY) * 0x10 + (OFF) * 4, u32) + __SYSREG(0xc8000000 + (WAY) * L1_CACHE_WAYDISP + \ + (ENTRY) * L1_CACHE_BYTES + (OFF) * 4, u32) #define ICACHE_TAG(WAY, ENTRY) \ - __SYSREG(0xc8100000 + (WAY) * L1_CACHE_WAYDISP + (ENTRY) * 0x10, u32) + __SYSREG(0xc8100000 + (WAY) * L1_CACHE_WAYDISP + \ + (ENTRY) * L1_CACHE_BYTES, u32) -/* instruction cache access registers */ +/* data cache access registers */ #define DCACHE_DATA(WAY, ENTRY, OFF) \ - __SYSREG(0xc8200000 + (WAY) * L1_CACHE_WAYDISP + (ENTRY) * 0x10 + (OFF) * 4, u32) + __SYSREG(0xc8200000 + (WAY) * L1_CACHE_WAYDISP + \ + (ENTRY) * L1_CACHE_BYTES + (OFF) * 4, u32) #define DCACHE_TAG(WAY, ENTRY) \ - __SYSREG(0xc8300000 + (WAY) * L1_CACHE_WAYDISP + (ENTRY) * 0x10, u32) + __SYSREG(0xc8300000 + (WAY) * L1_CACHE_WAYDISP + \ + (ENTRY) * L1_CACHE_BYTES, u32) #endif /* _ASM_CACHE_H */ diff --git a/arch/mn10300/include/asm/cacheflush.h b/arch/mn10300/include/asm/cacheflush.h index 29e692f7f03..faed90240de 100644 --- a/arch/mn10300/include/asm/cacheflush.h +++ b/arch/mn10300/include/asm/cacheflush.h @@ -17,66 +17,55 @@ #include <linux/mm.h> /* - * virtually-indexed cache management (our cache is physically indexed) + * Primitive routines */ -#define flush_cache_all() do {} while (0) -#define flush_cache_mm(mm) do {} while (0) -#define flush_cache_dup_mm(mm) do {} while (0) -#define flush_cache_range(mm, start, end) do {} while (0) -#define flush_cache_page(vma, vmaddr, pfn) do {} while (0) -#define flush_cache_vmap(start, end) do {} while (0) -#define flush_cache_vunmap(start, end) do {} while (0) -#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 -#define flush_dcache_page(page) do {} while (0) -#define flush_dcache_mmap_lock(mapping) do {} while (0) -#define flush_dcache_mmap_unlock(mapping) do {} while (0) - -/* - * physically-indexed cache management - */ -#ifndef CONFIG_MN10300_CACHE_DISABLED - -extern void flush_icache_range(unsigned long start, unsigned long end); -extern void flush_icache_page(struct vm_area_struct *vma, struct page *pg); - -#else - -#define flush_icache_range(start, end) do {} while (0) -#define flush_icache_page(vma, pg) do {} while (0) - -#endif - -#define flush_icache_user_range(vma, pg, adr, len) \ - flush_icache_range(adr, adr + len) - -#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ - do { \ - memcpy(dst, src, len); \ - flush_icache_page(vma, page); \ - } while (0) - -#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ - memcpy(dst, src, len) - -/* - * primitive routines - */ -#ifndef CONFIG_MN10300_CACHE_DISABLED +#ifdef CONFIG_MN10300_CACHE_ENABLED +extern void mn10300_local_icache_inv(void); +extern void mn10300_local_icache_inv_page(unsigned long start); +extern void mn10300_local_icache_inv_range(unsigned long start, unsigned long end); +extern void mn10300_local_icache_inv_range2(unsigned long start, unsigned long size); +extern void mn10300_local_dcache_inv(void); +extern void mn10300_local_dcache_inv_page(unsigned long start); +extern void mn10300_local_dcache_inv_range(unsigned long start, unsigned long end); +extern void mn10300_local_dcache_inv_range2(unsigned long start, unsigned long size); extern void mn10300_icache_inv(void); +extern void mn10300_icache_inv_page(unsigned long start); +extern void mn10300_icache_inv_range(unsigned long start, unsigned long end); +extern void mn10300_icache_inv_range2(unsigned long start, unsigned long size); extern void mn10300_dcache_inv(void); -extern void mn10300_dcache_inv_page(unsigned start); -extern void mn10300_dcache_inv_range(unsigned start, unsigned end); -extern void mn10300_dcache_inv_range2(unsigned start, unsigned size); +extern void mn10300_dcache_inv_page(unsigned long start); +extern void mn10300_dcache_inv_range(unsigned long start, unsigned long end); +extern void mn10300_dcache_inv_range2(unsigned long start, unsigned long size); #ifdef CONFIG_MN10300_CACHE_WBACK +extern void mn10300_local_dcache_flush(void); +extern void mn10300_local_dcache_flush_page(unsigned long start); +extern void mn10300_local_dcache_flush_range(unsigned long start, unsigned long end); +extern void mn10300_local_dcache_flush_range2(unsigned long start, unsigned long size); +extern void mn10300_local_dcache_flush_inv(void); +extern void mn10300_local_dcache_flush_inv_page(unsigned long start); +extern void mn10300_local_dcache_flush_inv_range(unsigned long start, unsigned long end); +extern void mn10300_local_dcache_flush_inv_range2(unsigned long start, unsigned long size); extern void mn10300_dcache_flush(void); -extern void mn10300_dcache_flush_page(unsigned start); -extern void mn10300_dcache_flush_range(unsigned start, unsigned end); -extern void mn10300_dcache_flush_range2(unsigned start, unsigned size); +extern void mn10300_dcache_flush_page(unsigned long start); +extern void mn10300_dcache_flush_range(unsigned long start, unsigned long end); +extern void mn10300_dcache_flush_range2(unsigned long start, unsigned long size); extern void mn10300_dcache_flush_inv(void); -extern void mn10300_dcache_flush_inv_page(unsigned start); -extern void mn10300_dcache_flush_inv_range(unsigned start, unsigned end); -extern void mn10300_dcache_flush_inv_range2(unsigned start, unsigned size); +extern void mn10300_dcache_flush_inv_page(unsigned long start); +extern void mn10300_dcache_flush_inv_range(unsigned long start, unsigned long end); +extern void mn10300_dcache_flush_inv_range2(unsigned long start, unsigned long size); #else +#define mn10300_local_dcache_flush() do {} while (0) +#define mn10300_local_dcache_flush_page(start) do {} while (0) +#define mn10300_local_dcache_flush_range(start, end) do {} while (0) +#define mn10300_local_dcache_flush_range2(start, size) do {} while (0) +#define mn10300_local_dcache_flush_inv() \ + mn10300_local_dcache_inv() +#define mn10300_local_dcache_flush_inv_page(start) \ + mn10300_local_dcache_inv_page(start) +#define mn10300_local_dcache_flush_inv_range(start, end) \ + mn10300_local_dcache_inv_range(start, end) +#define mn10300_local_dcache_flush_inv_range2(start, size) \ + mn10300_local_dcache_inv_range2(start, size) #define mn10300_dcache_flush() do {} while (0) #define mn10300_dcache_flush_page(start) do {} while (0) #define mn10300_dcache_flush_range(start, end) do {} while (0) @@ -90,7 +79,26 @@ extern void mn10300_dcache_flush_inv_range2(unsigned start, unsigned size); mn10300_dcache_inv_range2((start), (size)) #endif /* CONFIG_MN10300_CACHE_WBACK */ #else +#define mn10300_local_icache_inv() do {} while (0) +#define mn10300_local_icache_inv_page(start) do {} while (0) +#define mn10300_local_icache_inv_range(start, end) do {} while (0) +#define mn10300_local_icache_inv_range2(start, size) do {} while (0) +#define mn10300_local_dcache_inv() do {} while (0) +#define mn10300_local_dcache_inv_page(start) do {} while (0) +#define mn10300_local_dcache_inv_range(start, end) do {} while (0) +#define mn10300_local_dcache_inv_range2(start, size) do {} while (0) +#define mn10300_local_dcache_flush() do {} while (0) +#define mn10300_local_dcache_flush_inv_page(start) do {} while (0) +#define mn10300_local_dcache_flush_inv() do {} while (0) +#define mn10300_local_dcache_flush_inv_range(start, end)do {} while (0) +#define mn10300_local_dcache_flush_inv_range2(start, size) do {} while (0) +#define mn10300_local_dcache_flush_page(start) do {} while (0) +#define mn10300_local_dcache_flush_range(start, end) do {} while (0) +#define mn10300_local_dcache_flush_range2(start, size) do {} while (0) #define mn10300_icache_inv() do {} while (0) +#define mn10300_icache_inv_page(start) do {} while (0) +#define mn10300_icache_inv_range(start, end) do {} while (0) +#define mn10300_icache_inv_range2(start, size) do {} while (0) #define mn10300_dcache_inv() do {} while (0) #define mn10300_dcache_inv_page(start) do {} while (0) #define mn10300_dcache_inv_range(start, end) do {} while (0) @@ -103,10 +111,56 @@ extern void mn10300_dcache_flush_inv_range2(unsigned start, unsigned size); #define mn10300_dcache_flush_page(start) do {} while (0) #define mn10300_dcache_flush_range(start, end) do {} while (0) #define mn10300_dcache_flush_range2(start, size) do {} while (0) -#endif /* CONFIG_MN10300_CACHE_DISABLED */ +#endif /* CONFIG_MN10300_CACHE_ENABLED */ + +/* + * Virtually-indexed cache management (our cache is physically indexed) + */ +#define flush_cache_all() do {} while (0) +#define flush_cache_mm(mm) do {} while (0) +#define flush_cache_dup_mm(mm) do {} while (0) +#define flush_cache_range(mm, start, end) do {} while (0) +#define flush_cache_page(vma, vmaddr, pfn) do {} while (0) +#define flush_cache_vmap(start, end) do {} while (0) +#define flush_cache_vunmap(start, end) do {} while (0) +#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 +#define flush_dcache_page(page) do {} while (0) +#define flush_dcache_mmap_lock(mapping) do {} while (0) +#define flush_dcache_mmap_unlock(mapping) do {} while (0) + +/* + * Physically-indexed cache management + */ +#if defined(CONFIG_MN10300_CACHE_FLUSH_ICACHE) +extern void flush_icache_page(struct vm_area_struct *vma, struct page *page); +extern void flush_icache_range(unsigned long start, unsigned long end); +#elif defined(CONFIG_MN10300_CACHE_INV_ICACHE) +static inline void flush_icache_page(struct vm_area_struct *vma, + struct page *page) +{ + mn10300_icache_inv_page(page_to_phys(page)); +} +extern void flush_icache_range(unsigned long start, unsigned long end); +#else +#define flush_icache_range(start, end) do {} while (0) +#define flush_icache_page(vma, pg) do {} while (0) +#endif + + +#define flush_icache_user_range(vma, pg, adr, len) \ + flush_icache_range(adr, adr + len) + +#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ + do { \ + memcpy(dst, src, len); \ + flush_icache_page(vma, page); \ + } while (0) + +#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ + memcpy(dst, src, len) /* - * internal debugging function + * Internal debugging function */ #ifdef CONFIG_DEBUG_PAGEALLOC extern void kernel_map_pages(struct page *page, int numpages, int enable); diff --git a/arch/mn10300/include/asm/cpu-regs.h b/arch/mn10300/include/asm/cpu-regs.h index 757e9b5388e..90ed4a365c9 100644 --- a/arch/mn10300/include/asm/cpu-regs.h +++ b/arch/mn10300/include/asm/cpu-regs.h @@ -15,7 +15,6 @@ #include <linux/types.h> #endif -#ifdef CONFIG_MN10300_CPU_AM33V2 /* we tell the compiler to pretend to be AM33 so that it doesn't try and use * the FP regs, but tell the assembler that we're actually allowed AM33v2 * instructions */ @@ -24,7 +23,6 @@ asm(" .am33_2\n"); #else .am33_2 #endif -#endif #ifdef __KERNEL__ @@ -58,6 +56,9 @@ asm(" .am33_2\n"); #define EPSW_nAR 0x00040000 /* register bank control */ #define EPSW_ML 0x00080000 /* monitor level */ #define EPSW_FE 0x00100000 /* FPU enable */ +#define EPSW_IM_SHIFT 8 /* EPSW_IM_SHIFT determines the interrupt mode */ + +#define NUM2EPSW_IM(num) ((num) << EPSW_IM_SHIFT) /* FPU registers */ #define FPCR_EF_I 0x00000001 /* inexact result FPU exception flag */ @@ -99,9 +100,11 @@ asm(" .am33_2\n"); #define CPUREV __SYSREGC(0xc0000050, u32) /* CPU revision register */ #define CPUREV_TYPE 0x0000000f /* CPU type */ #define CPUREV_TYPE_S 0 -#define CPUREV_TYPE_AM33V1 0x00000000 /* - AM33 V1 core, AM33/1.00 arch */ -#define CPUREV_TYPE_AM33V2 0x00000001 /* - AM33 V2 core, AM33/2.00 arch */ -#define CPUREV_TYPE_AM34V1 0x00000002 /* - AM34 V1 core, AM33/2.00 arch */ +#define CPUREV_TYPE_AM33_1 0x00000000 /* - AM33-1 core, AM33/1.00 arch */ +#define CPUREV_TYPE_AM33_2 0x00000001 /* - AM33-2 core, AM33/2.00 arch */ +#define CPUREV_TYPE_AM34_1 0x00000002 /* - AM34-1 core, AM33/2.00 arch */ +#define CPUREV_TYPE_AM33_3 0x00000003 /* - AM33-3 core, AM33/2.00 arch */ +#define CPUREV_TYPE_AM34_2 0x00000004 /* - AM34-2 core, AM33/3.00 arch */ #define CPUREV_REVISION 0x000000f0 /* CPU revision */ #define CPUREV_REVISION_S 4 #define CPUREV_ICWAY 0x00000f00 /* number of instruction cache ways */ @@ -180,6 +183,21 @@ asm(" .am33_2\n"); #define CHCTR_ICWMD 0x0f00 /* instruction cache way mode */ #define CHCTR_DCWMD 0xf000 /* data cache way mode */ +#ifdef CONFIG_AM34_2 +#define ICIVCR __SYSREG(0xc0000c00, u32) /* icache area invalidate control */ +#define ICIVCR_ICIVBSY 0x00000008 /* icache area invalidate busy */ +#define ICIVCR_ICI 0x00000001 /* icache area invalidate */ + +#define ICIVMR __SYSREG(0xc0000c04, u32) /* icache area invalidate mask */ + +#define DCPGCR __SYSREG(0xc0000c10, u32) /* data cache area purge control */ +#define DCPGCR_DCPGBSY 0x00000008 /* data cache area purge busy */ +#define DCPGCR_DCP 0x00000002 /* data cache area purge */ +#define DCPGCR_DCI 0x00000001 /* data cache area invalidate */ + +#define DCPGMR __SYSREG(0xc0000c14, u32) /* data cache area purge mask */ +#endif /* CONFIG_AM34_2 */ + /* MMU control registers */ #define MMUCTR __SYSREG(0xc0000090, u32) /* MMU control register */ #define MMUCTR_IRP 0x0000003f /* instruction TLB replace pointer */ @@ -203,6 +221,9 @@ asm(" .am33_2\n"); |