aboutsummaryrefslogtreecommitdiff
path: root/arch/x86_64/mm/numa.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86_64/mm/numa.c')
-rw-r--r--arch/x86_64/mm/numa.c130
1 files changed, 75 insertions, 55 deletions
diff --git a/arch/x86_64/mm/numa.c b/arch/x86_64/mm/numa.c
index 80a49d9bd8a..a828a01739c 100644
--- a/arch/x86_64/mm/numa.c
+++ b/arch/x86_64/mm/numa.c
@@ -38,38 +38,57 @@ cpumask_t node_to_cpumask[MAX_NUMNODES] __read_mostly;
int numa_off __initdata;
-int __init compute_hash_shift(struct node *nodes, int numnodes)
+
+/*
+ * Given a shift value, try to populate memnodemap[]
+ * Returns :
+ * 1 if OK
+ * 0 if memnodmap[] too small (of shift too small)
+ * -1 if node overlap or lost ram (shift too big)
+ */
+static int __init populate_memnodemap(
+ const struct node *nodes, int numnodes, int shift)
{
int i;
- int shift = 20;
- unsigned long addr,maxend=0;
-
- for (i = 0; i < numnodes; i++)
- if ((nodes[i].start != nodes[i].end) && (nodes[i].end > maxend))
- maxend = nodes[i].end;
+ int res = -1;
+ unsigned long addr, end;
- while ((1UL << shift) < (maxend / NODEMAPSIZE))
- shift++;
-
- printk (KERN_DEBUG"Using %d for the hash shift. Max adder is %lx \n",
- shift,maxend);
- memset(memnodemap,0xff,sizeof(*memnodemap) * NODEMAPSIZE);
+ memset(memnodemap, 0xff, sizeof(memnodemap));
for (i = 0; i < numnodes; i++) {
- if (nodes[i].start == nodes[i].end)
+ addr = nodes[i].start;
+ end = nodes[i].end;
+ if (addr >= end)
continue;
- for (addr = nodes[i].start;
- addr < nodes[i].end;
- addr += (1UL << shift)) {
- if (memnodemap[addr >> shift] != 0xff) {
- printk(KERN_INFO
- "Your memory is not aligned you need to rebuild your kernel "
- "with a bigger NODEMAPSIZE shift=%d adder=%lu\n",
- shift,addr);
+ if ((end >> shift) >= NODEMAPSIZE)
+ return 0;
+ do {
+ if (memnodemap[addr >> shift] != 0xff)
return -1;
- }
memnodemap[addr >> shift] = i;
- }
+ addr += (1 << shift);
+ } while (addr < end);
+ res = 1;
}
+ return res;
+}
+
+int __init compute_hash_shift(struct node *nodes, int numnodes)
+{
+ int shift = 20;
+
+ while (populate_memnodemap(nodes, numnodes, shift + 1) >= 0)
+ shift++;
+
+ printk(KERN_DEBUG "Using %d for the hash shift.\n",
+ shift);
+
+ if (populate_memnodemap(nodes, numnodes, shift) != 1) {
+ printk(KERN_INFO
+ "Your memory is not aligned you need to rebuild your kernel "
+ "with a bigger NODEMAPSIZE shift=%d\n",
+ shift);
+ return -1;
+ }
return shift;
}
@@ -94,7 +113,6 @@ void __init setup_node_bootmem(int nodeid, unsigned long start, unsigned long en
start_pfn = start >> PAGE_SHIFT;
end_pfn = end >> PAGE_SHIFT;
- memory_present(nodeid, start_pfn, end_pfn);
nodedata_phys = find_e820_area(start, end, pgdat_size);
if (nodedata_phys == -1L)
panic("Cannot find memory pgdat in node %d\n", nodeid);
@@ -132,29 +150,14 @@ void __init setup_node_zones(int nodeid)
unsigned long start_pfn, end_pfn;
unsigned long zones[MAX_NR_ZONES];
unsigned long holes[MAX_NR_ZONES];
- unsigned long dma_end_pfn;
- memset(zones, 0, sizeof(unsigned long) * MAX_NR_ZONES);
- memset(holes, 0, sizeof(unsigned long) * MAX_NR_ZONES);
+ start_pfn = node_start_pfn(nodeid);
+ end_pfn = node_end_pfn(nodeid);
- start_pfn = node_start_pfn(nodeid);
- end_pfn = node_end_pfn(nodeid);
+ Dprintk(KERN_INFO "setting up node %d %lx-%lx\n",
+ nodeid, start_pfn, end_pfn);
- Dprintk(KERN_INFO "setting up node %d %lx-%lx\n", nodeid, start_pfn, end_pfn);
-
- /* All nodes > 0 have a zero length zone DMA */
- dma_end_pfn = __pa(MAX_DMA_ADDRESS) >> PAGE_SHIFT;
- if (start_pfn < dma_end_pfn) {
- zones[ZONE_DMA] = dma_end_pfn - start_pfn;
- holes[ZONE_DMA] = e820_hole_size(start_pfn, dma_end_pfn);
- zones[ZONE_NORMAL] = end_pfn - dma_end_pfn;
- holes[ZONE_NORMAL] = e820_hole_size(dma_end_pfn, end_pfn);
-
- } else {
- zones[ZONE_NORMAL] = end_pfn - start_pfn;
- holes[ZONE_NORMAL] = e820_hole_size(start_pfn, end_pfn);
- }
-
+ size_zones(zones, holes, start_pfn, end_pfn);
free_area_init_node(nodeid, NODE_DATA(nodeid), zones,
start_pfn, holes);
}
@@ -167,18 +170,16 @@ void __init numa_init_array(void)
mapping. To avoid this fill in the mapping for all possible
CPUs, as the number of CPUs is not known yet.
We round robin the existing nodes. */
- rr = 0;
+ rr = first_node(node_online_map);
for (i = 0; i < NR_CPUS; i++) {
if (cpu_to_node[i] != NUMA_NO_NODE)
continue;
+ numa_set_node(i, rr);
rr = next_node(rr, node_online_map);
if (rr == MAX_NUMNODES)
rr = first_node(node_online_map);
- cpu_to_node[i] = rr;
- rr++;
}
- set_bit(0, &node_to_cpumask[cpu_to_node(0)]);
}
#ifdef CONFIG_NUMA_EMU
@@ -207,8 +208,6 @@ static int numa_emulation(unsigned long start_pfn, unsigned long end_pfn)
if (i == numa_fake-1)
sz = (end_pfn<<PAGE_SHIFT) - nodes[i].start;
nodes[i].end = nodes[i].start + sz;
- if (i != numa_fake-1)
- nodes[i].end--;
printk(KERN_INFO "Faking node %d at %016Lx-%016Lx (%LuMB)\n",
i,
nodes[i].start, nodes[i].end,
@@ -259,18 +258,22 @@ void __init numa_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
nodes_clear(node_online_map);
node_set_online(0);
for (i = 0; i < NR_CPUS; i++)
- cpu_to_node[i] = 0;
+ numa_set_node(i, 0);
node_to_cpumask[0] = cpumask_of_cpu(0);
setup_node_bootmem(0, start_pfn << PAGE_SHIFT, end_pfn << PAGE_SHIFT);
}
__cpuinit void numa_add_cpu(int cpu)
{
- /* BP is initialized elsewhere */
- if (cpu)
- set_bit(cpu, &node_to_cpumask[cpu_to_node(cpu)]);
+ set_bit(cpu, &node_to_cpumask[cpu_to_node(cpu)]);
}
+void __cpuinit numa_set_node(int cpu, int node)
+{
+ cpu_pda[cpu].nodenumber = node;
+ cpu_to_node[cpu] = node;
+}
+
unsigned long __init numa_free_all_bootmem(void)
{
int i;
@@ -281,9 +284,26 @@ unsigned long __init numa_free_all_bootmem(void)
return pages;
}
+#ifdef CONFIG_SPARSEMEM
+static void __init arch_sparse_init(void)
+{
+ int i;
+
+ for_each_online_node(i)
+ memory_present(i, node_start_pfn(i), node_end_pfn(i));
+
+ sparse_init();
+}
+#else
+#define arch_sparse_init() do {} while (0)
+#endif
+
void __init paging_init(void)
{
int i;
+
+ arch_sparse_init();
+
for_each_online_node(i) {
setup_node_zones(i);
}