From a0ab36689a36e583b6e736f1c99ac8c9aebdad59 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 13 Jan 2010 18:31:48 +0900 Subject: sh: fixed PMB mode refactoring. This introduces some much overdue chainsawing of the fixed PMB support. fixed PMB was introduced initially to work around the fact that dynamic PMB mode was relatively broken, though they were never intended to converge. The main areas where there are differences are whether the system is booted in 29-bit mode or 32-bit mode, and whether legacy mappings are to be preserved. Any system booting in true 32-bit mode will not care about legacy mappings, so these are roughly decoupled. Regardless of the entry point, PMB and 32BIT are directly related as far as the kernel is concerned, so we also switch back to having one select the other. With legacy mappings iterated through and applied in the initialization path it's now possible to finally merge the two implementations and permit dynamic remapping overtop of remaining entries regardless of whether boot mappings are crafted by hand or inherited from the boot loader. Signed-off-by: Paul Mundt --- arch/sh/include/asm/mmu.h | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) (limited to 'arch/sh/include/asm/mmu.h') diff --git a/arch/sh/include/asm/mmu.h b/arch/sh/include/asm/mmu.h index c7426ad9926..4b0882bf518 100644 --- a/arch/sh/include/asm/mmu.h +++ b/arch/sh/include/asm/mmu.h @@ -65,11 +65,29 @@ struct pmb_entry { struct pmb_entry *link; }; +#ifdef CONFIG_PMB /* arch/sh/mm/pmb.c */ long pmb_remap(unsigned long virt, unsigned long phys, unsigned long size, unsigned long flags); void pmb_unmap(unsigned long addr); int pmb_init(void); +#else +static inline long pmb_remap(unsigned long virt, unsigned long phys, + unsigned long size, unsigned long flags) +{ + return -EINVAL +} + +static inline void pmb_unmap(unsigned long addr) +{ +} + +static inline int pmb_init(void) +{ + return -ENODEV; +} +#endif /* CONFIG_PMB */ + #endif /* __ASSEMBLY__ */ #endif /* __MMU_H */ -- cgit v1.2.3-18-g5258 From 46c4e5daea3d5df06e27bf5a49a0c42274db6725 Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Fri, 15 Jan 2010 08:00:45 +0900 Subject: sh: Fix CONFIG_PMB=n build. The last commit introduced the following breakage arch/sh/include/asm/mmu.h: In function 'pmb_remap': arch/sh/include/asm/mmu.h:79: error: expected ';' before '}' token and... arch/sh/include/asm/mmu.h:78: error: 'EINVAL' undeclared (first use in this function) arch/sh/include/asm/mmu.h:78: error: (Each undeclared identifier is reported only once arch/sh/include/asm/mmu.h:78: error: for each function it appears in.) arch/sh/include/asm/mmu.h: In function 'pmb_init': arch/sh/include/asm/mmu.h:87: error: 'ENODEV' undeclared (first use in this function) Signed-off-by: Matt Fleming Signed-off-by: Paul Mundt --- arch/sh/include/asm/mmu.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch/sh/include/asm/mmu.h') diff --git a/arch/sh/include/asm/mmu.h b/arch/sh/include/asm/mmu.h index 4b0882bf518..e5e8f48830e 100644 --- a/arch/sh/include/asm/mmu.h +++ b/arch/sh/include/asm/mmu.h @@ -30,6 +30,7 @@ #define PMB_NO_ENTRY (-1) #ifndef __ASSEMBLY__ +#include /* Default "unsigned long" context */ typedef unsigned long mm_context_id_t[NR_CPUS]; @@ -75,7 +76,7 @@ int pmb_init(void); static inline long pmb_remap(unsigned long virt, unsigned long phys, unsigned long size, unsigned long flags) { - return -EINVAL + return -EINVAL; } static inline void pmb_unmap(unsigned long addr) -- cgit v1.2.3-18-g5258 From 2efa53b269ec1e9289a108e1506f53f6f1de440b Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 20 Jan 2010 16:40:48 +0900 Subject: sh: Make 29/32-bit mode check helper generally available. Presently __in_29bit_mode() is only defined for the PMB case, but it's also easily derived from the CONFIG_29BIT and CONFIG_32BIT && CONFIG_PMB=n cases. Signed-off-by: Paul Mundt --- arch/sh/include/asm/mmu.h | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) (limited to 'arch/sh/include/asm/mmu.h') diff --git a/arch/sh/include/asm/mmu.h b/arch/sh/include/asm/mmu.h index e5e8f48830e..ca7d91e8aa7 100644 --- a/arch/sh/include/asm/mmu.h +++ b/arch/sh/include/asm/mmu.h @@ -31,6 +31,7 @@ #ifndef __ASSEMBLY__ #include +#include /* Default "unsigned long" context */ typedef unsigned long mm_context_id_t[NR_CPUS]; @@ -72,6 +73,7 @@ long pmb_remap(unsigned long virt, unsigned long phys, unsigned long size, unsigned long flags); void pmb_unmap(unsigned long addr); int pmb_init(void); +bool __in_29bit_mode(void); #else static inline long pmb_remap(unsigned long virt, unsigned long phys, unsigned long size, unsigned long flags) @@ -87,8 +89,14 @@ static inline int pmb_init(void) { return -ENODEV; } -#endif /* CONFIG_PMB */ +#ifdef CONFIG_29BIT +#define __in_29bit_mode() (1) +#else +#define __in_29bit_mode() (0) +#endif + +#endif /* CONFIG_PMB */ #endif /* __ASSEMBLY__ */ #endif /* __MMU_H */ -- cgit v1.2.3-18-g5258 From efd54ea315f645ef318708aab5714a5f1f432d03 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Tue, 16 Feb 2010 18:39:30 +0900 Subject: sh: Merge the legacy PMB mapping and entry synchronization code. This merges the code for iterating over the legacy PMB mappings and the code for synchronizing software state with the hardware mappings. There's really no reason to do the same iteration twice, and this also buys us the legacy entry logging facility for the dynamic PMB case. Signed-off-by: Paul Mundt --- arch/sh/include/asm/mmu.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/sh/include/asm/mmu.h') diff --git a/arch/sh/include/asm/mmu.h b/arch/sh/include/asm/mmu.h index ca7d91e8aa7..2fcbedb5500 100644 --- a/arch/sh/include/asm/mmu.h +++ b/arch/sh/include/asm/mmu.h @@ -25,6 +25,7 @@ #define PMB_C 0x00000008 #define PMB_WT 0x00000001 #define PMB_UB 0x00000200 +#define PMB_CACHE_MASK (PMB_C | PMB_WT | PMB_UB) #define PMB_V 0x00000100 #define PMB_NO_ENTRY (-1) -- cgit v1.2.3-18-g5258 From 7bdda6209f224aa784a036df54b22cb338d2e859 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 17 Feb 2010 13:23:00 +0900 Subject: sh: Fix up more 64-bit pgprot truncation on SH-X2 TLB. Both the store queue API and the PMB remapping take unsigned long for their pgprot flags, which cuts off the extended protection bits. In the case of the PMB this isn't really a problem since the cache attribute bits that we care about are all in the lower 32-bits, but we do it just to be safe. The store queue remapping on the other hand depends on the extended prot bits for enabling userspace access to the mappings. Signed-off-by: Paul Mundt --- arch/sh/include/asm/mmu.h | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'arch/sh/include/asm/mmu.h') diff --git a/arch/sh/include/asm/mmu.h b/arch/sh/include/asm/mmu.h index 2fcbedb5500..151bc922701 100644 --- a/arch/sh/include/asm/mmu.h +++ b/arch/sh/include/asm/mmu.h @@ -33,6 +33,7 @@ #ifndef __ASSEMBLY__ #include #include +#include /* Default "unsigned long" context */ typedef unsigned long mm_context_id_t[NR_CPUS]; @@ -71,13 +72,13 @@ struct pmb_entry { #ifdef CONFIG_PMB /* arch/sh/mm/pmb.c */ long pmb_remap(unsigned long virt, unsigned long phys, - unsigned long size, unsigned long flags); + unsigned long size, pgprot_t prot); void pmb_unmap(unsigned long addr); int pmb_init(void); bool __in_29bit_mode(void); #else static inline long pmb_remap(unsigned long virt, unsigned long phys, - unsigned long size, unsigned long flags) + unsigned long size, pgprot_t prot) { return -EINVAL; } -- cgit v1.2.3-18-g5258 From 51becfd96287b3913b13075699433730984e2f4f Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 17 Feb 2010 15:33:30 +0900 Subject: sh: PMB tidying. Some overdue cleanup of the PMB code, killing off unused functionality and duplication sprinkled about the tree. Signed-off-by: Paul Mundt --- arch/sh/include/asm/mmu.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'arch/sh/include/asm/mmu.h') diff --git a/arch/sh/include/asm/mmu.h b/arch/sh/include/asm/mmu.h index 151bc922701..44c90434141 100644 --- a/arch/sh/include/asm/mmu.h +++ b/arch/sh/include/asm/mmu.h @@ -11,7 +11,9 @@ #define PMB_ADDR 0xf6100000 #define PMB_DATA 0xf7100000 -#define PMB_ENTRY_MAX 16 + +#define NR_PMB_ENTRIES 16 + #define PMB_E_MASK 0x0000000f #define PMB_E_SHIFT 8 -- cgit v1.2.3-18-g5258 From d7813bc9e8e384f5a293b05c095c799d41af3668 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 17 Feb 2010 17:56:38 +0900 Subject: sh: Build PMB entry links for existing contiguous multi-page mappings. This plugs in entry sizing support for existing mappings and then builds on top of that for linking together entries that are mapping contiguous areas. This will ultimately permit us to coalesce mappings and promote head pages while reclaiming PMB slots for dynamic remapping. Signed-off-by: Paul Mundt --- arch/sh/include/asm/mmu.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/sh/include/asm/mmu.h') diff --git a/arch/sh/include/asm/mmu.h b/arch/sh/include/asm/mmu.h index 44c90434141..5453169bf05 100644 --- a/arch/sh/include/asm/mmu.h +++ b/arch/sh/include/asm/mmu.h @@ -59,6 +59,7 @@ struct pmb_entry { unsigned long vpn; unsigned long ppn; unsigned long flags; + unsigned long size; /* * 0 .. NR_PMB_ENTRIES for specific entry selection, or @@ -66,7 +67,6 @@ struct pmb_entry { */ int entry; - struct pmb_entry *next; /* Adjacent entry link for contiguous multi-entry mappings */ struct pmb_entry *link; }; -- cgit v1.2.3-18-g5258 From d53a0d33bc3a50ea0e8dd1680a2e8435770b162a Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 17 Feb 2010 21:17:02 +0900 Subject: sh: PMB locking overhaul. This implements some locking for the PMB code. A high level rwlock is added for dealing with rw accesses on the entry map while a per-entry data structure spinlock is added to deal with the PMB entry changing out from underneath us. Signed-off-by: Paul Mundt --- arch/sh/include/asm/mmu.h | 18 ------------------ 1 file changed, 18 deletions(-) (limited to 'arch/sh/include/asm/mmu.h') diff --git a/arch/sh/include/asm/mmu.h b/arch/sh/include/asm/mmu.h index 5453169bf05..e42c4e2a41d 100644 --- a/arch/sh/include/asm/mmu.h +++ b/arch/sh/include/asm/mmu.h @@ -53,24 +53,6 @@ typedef struct { #endif } mm_context_t; -struct pmb_entry; - -struct pmb_entry { - unsigned long vpn; - unsigned long ppn; - unsigned long flags; - unsigned long size; - - /* - * 0 .. NR_PMB_ENTRIES for specific entry selection, or - * PMB_NO_ENTRY to search for a free one - */ - int entry; - - /* Adjacent entry link for contiguous multi-entry mappings */ - struct pmb_entry *link; -}; - #ifdef CONFIG_PMB /* arch/sh/mm/pmb.c */ long pmb_remap(unsigned long virt, unsigned long phys, -- cgit v1.2.3-18-g5258 From d01447b3197c2c470a14666be2c640407bbbfec7 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Thu, 18 Feb 2010 18:13:51 +0900 Subject: sh: Merge legacy and dynamic PMB modes. This implements a bit of rework for the PMB code, which permits us to kill off the legacy PMB mode completely. Rather than trusting the boot loader to do the right thing, we do a quick verification of the PMB contents to determine whether to have the kernel setup the initial mappings or whether it needs to mangle them later on instead. If we're booting from legacy mappings, the kernel will now take control of them and make them match the kernel's initial mapping configuration. This is accomplished by breaking the initialization phase out in to multiple steps: synchronization, merging, and resizing. With the recent rework, the synchronization code establishes page links for compound mappings already, so we build on top of this for promoting mappings and reclaiming unused slots. At the same time, the changes introduced for the uncached helpers also permit us to dynamically resize the uncached mapping without any particular headaches. The smallest page size is more than sufficient for mapping all of kernel text, and as we're careful not to jump to any far off locations in the setup code the mapping can safely be resized regardless of whether we are executing from it or not. Signed-off-by: Paul Mundt --- arch/sh/include/asm/mmu.h | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) (limited to 'arch/sh/include/asm/mmu.h') diff --git a/arch/sh/include/asm/mmu.h b/arch/sh/include/asm/mmu.h index e42c4e2a41d..15a05b615ba 100644 --- a/arch/sh/include/asm/mmu.h +++ b/arch/sh/include/asm/mmu.h @@ -58,7 +58,7 @@ typedef struct { long pmb_remap(unsigned long virt, unsigned long phys, unsigned long size, pgprot_t prot); void pmb_unmap(unsigned long addr); -int pmb_init(void); +void pmb_init(void); bool __in_29bit_mode(void); #else static inline long pmb_remap(unsigned long virt, unsigned long phys, @@ -67,14 +67,8 @@ static inline long pmb_remap(unsigned long virt, unsigned long phys, return -EINVAL; } -static inline void pmb_unmap(unsigned long addr) -{ -} - -static inline int pmb_init(void) -{ - return -ENODEV; -} +#define pmb_unmap(addr) do { } while (0) +#define pmb_init(addr) do { } while (0) #ifdef CONFIG_29BIT #define __in_29bit_mode() (1) -- cgit v1.2.3-18-g5258 From 90e7d649d86f21d478dc134f74c88e19dd472393 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Tue, 23 Feb 2010 16:20:53 +0900 Subject: sh: reworked dynamic PMB mapping. This implements a fairly significant overhaul of the dynamic PMB mapping code. The primary change here is that the PMB gets its own VMA that follows the uncached mapping and we attempt to be a bit more intelligent with dynamic sizing, multi-entry mapping, and so forth. Signed-off-by: Paul Mundt --- arch/sh/include/asm/mmu.h | 31 ++++++++++++++++++++++++------- 1 file changed, 24 insertions(+), 7 deletions(-) (limited to 'arch/sh/include/asm/mmu.h') diff --git a/arch/sh/include/asm/mmu.h b/arch/sh/include/asm/mmu.h index 15a05b615ba..19fe84550b4 100644 --- a/arch/sh/include/asm/mmu.h +++ b/arch/sh/include/asm/mmu.h @@ -55,19 +55,29 @@ typedef struct { #ifdef CONFIG_PMB /* arch/sh/mm/pmb.c */ -long pmb_remap(unsigned long virt, unsigned long phys, - unsigned long size, pgprot_t prot); -void pmb_unmap(unsigned long addr); -void pmb_init(void); bool __in_29bit_mode(void); + +void pmb_init(void); +int pmb_bolt_mapping(unsigned long virt, phys_addr_t phys, + unsigned long size, pgprot_t prot); +void __iomem *pmb_remap_caller(phys_addr_t phys, unsigned long size, + pgprot_t prot, void *caller); +int pmb_unmap(void __iomem *addr); + #else -static inline long pmb_remap(unsigned long virt, unsigned long phys, - unsigned long size, pgprot_t prot) + +static inline void __iomem * +pmb_remap_caller(phys_addr_t phys, unsigned long size, + pgprot_t prot, void *caller) +{ + return NULL; +} + +static inline int pmb_unmap(void __iomem *addr) { return -EINVAL; } -#define pmb_unmap(addr) do { } while (0) #define pmb_init(addr) do { } while (0) #ifdef CONFIG_29BIT @@ -77,6 +87,13 @@ static inline long pmb_remap(unsigned long virt, unsigned long phys, #endif #endif /* CONFIG_PMB */ + +static inline void __iomem * +pmb_remap(phys_addr_t phys, unsigned long size, pgprot_t prot) +{ + return pmb_remap_caller(phys, size, prot, __builtin_return_address(0)); +} + #endif /* __ASSEMBLY__ */ #endif /* __MMU_H */ -- cgit v1.2.3-18-g5258 From 089b43f9737f2e51c6ce354749f5a9f3f093601c Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 10 Mar 2010 16:29:48 +0900 Subject: sh: Fix up NUMA build for 29-bit. pmb_bolt_mapping() is undefined on 29-bit builds, so provide a stub. This fixes up the NUMA build on platforms lacking PMB support. Signed-off-by: Paul Mundt --- arch/sh/include/asm/mmu.h | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'arch/sh/include/asm/mmu.h') diff --git a/arch/sh/include/asm/mmu.h b/arch/sh/include/asm/mmu.h index 19fe84550b4..56e4418c19b 100644 --- a/arch/sh/include/asm/mmu.h +++ b/arch/sh/include/asm/mmu.h @@ -66,6 +66,13 @@ int pmb_unmap(void __iomem *addr); #else +static inline int +pmb_bolt_mapping(unsigned long virt, phys_addr_t phys, + unsigned long size, pgprot_t prot) +{ + return -EINVAL; +} + static inline void __iomem * pmb_remap_caller(phys_addr_t phys, unsigned long size, pgprot_t prot, void *caller) -- cgit v1.2.3-18-g5258