diff options
author | Paul Mundt <lethal@linux-sh.org> | 2010-05-11 13:52:50 +0900 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2010-05-11 13:52:50 +0900 |
commit | 21823259a70b7a2a21eea1d48c25a6f38896dd11 (patch) | |
tree | e1bc3e69cbf01534e77a89e450ea047d261da510 /arch/sh/kernel/setup.c | |
parent | dfbca89987b74c34d9b1a2414b0e5ccee65347e0 (diff) |
sh: Ensure active regions have a backing PMB entry.
In the NUMA or memory hot-add case where system memory has been
partitioned up, we immediately run in to a situation where the existing
PMB entry doesn't cover the new range (primarily as a result of the entry
size being shrunk to match the node size early in the initialization). In
order to fix this up it's necessary to preload a PMB mapping for the new
range prior to activation in order to circumvent reset by MMU.
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/kernel/setup.c')
-rw-r--r-- | arch/sh/kernel/setup.c | 17 |
1 files changed, 15 insertions, 2 deletions
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c index 61404ed0144..57bd93838f1 100644 --- a/arch/sh/kernel/setup.c +++ b/arch/sh/kernel/setup.c @@ -191,13 +191,18 @@ void __init __add_active_range(unsigned int nid, unsigned long start_pfn, unsigned long end_pfn) { struct resource *res = &mem_resources[nid]; + unsigned long start, end; WARN_ON(res->name); /* max one active range per node for now */ + start = start_pfn << PAGE_SHIFT; + end = end_pfn << PAGE_SHIFT; + res->name = "System RAM"; - res->start = start_pfn << PAGE_SHIFT; - res->end = (end_pfn << PAGE_SHIFT) - 1; + res->start = start; + res->end = end - 1; res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; + if (request_resource(&iomem_resource, res)) { pr_err("unable to request memory_resource 0x%lx 0x%lx\n", start_pfn, end_pfn); @@ -213,6 +218,14 @@ void __init __add_active_range(unsigned int nid, unsigned long start_pfn, request_resource(res, &data_resource); request_resource(res, &bss_resource); + /* + * Also make sure that there is a PMB mapping that covers this + * range before we attempt to activate it, to avoid reset by MMU. + * We can hit this path with NUMA or memory hot-add. + */ + pmb_bolt_mapping((unsigned long)__va(start), start, end - start, + PAGE_KERNEL); + add_active_range(nid, start_pfn, end_pfn); } |