aboutsummaryrefslogtreecommitdiff
path: root/arch/powerpc/mm/numa.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/mm/numa.c')
-rw-r--r--arch/powerpc/mm/numa.c1057
1 files changed, 861 insertions, 196 deletions
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index b037d95eead..3b181b22cd4 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -13,17 +13,31 @@
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/mmzone.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/nodemask.h>
#include <linux/cpu.h>
#include <linux/notifier.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
#include <linux/of.h>
#include <linux/pfn.h>
+#include <linux/cpuset.h>
+#include <linux/node.h>
+#include <linux/stop_machine.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <asm/cputhreads.h>
#include <asm/sparsemem.h>
#include <asm/prom.h>
-#include <asm/system.h>
#include <asm/smp.h>
+#include <asm/cputhreads.h>
+#include <asm/topology.h>
+#include <asm/firmware.h>
+#include <asm/paca.h>
+#include <asm/hvcall.h>
+#include <asm/setup.h>
+#include <asm/vdso.h>
static int numa_enabled = 1;
@@ -33,17 +47,45 @@ static int numa_debug;
#define dbg(args...) if (numa_debug) { printk(KERN_INFO args); }
int numa_cpu_lookup_table[NR_CPUS];
-cpumask_t numa_cpumask_lookup_table[MAX_NUMNODES];
+cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
struct pglist_data *node_data[MAX_NUMNODES];
EXPORT_SYMBOL(numa_cpu_lookup_table);
-EXPORT_SYMBOL(numa_cpumask_lookup_table);
+EXPORT_SYMBOL(node_to_cpumask_map);
EXPORT_SYMBOL(node_data);
static int min_common_depth;
static int n_mem_addr_cells, n_mem_size_cells;
+static int form1_affinity;
-static int __cpuinit fake_numa_create_new_node(unsigned long end_pfn,
+#define MAX_DISTANCE_REF_POINTS 4
+static int distance_ref_points_depth;
+static const __be32 *distance_ref_points;
+static int distance_lookup_table[MAX_NUMNODES][MAX_DISTANCE_REF_POINTS];
+
+/*
+ * Allocate node_to_cpumask_map based on number of available nodes
+ * Requires node_possible_map to be valid.
+ *
+ * Note: cpumask_of_node() is not valid until after this is done.
+ */
+static void __init setup_node_to_cpumask_map(void)
+{
+ unsigned int node;
+
+ /* setup nr_node_ids if not done yet */
+ if (nr_node_ids == MAX_NUMNODES)
+ setup_nr_node_ids();
+
+ /* allocate the map */
+ for (node = 0; node < nr_node_ids; node++)
+ alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]);
+
+ /* cpumask_of_node() will now work */
+ dbg("Node to cpumask map for %d nodes\n", nr_node_ids);
+}
+
+static int __init fake_numa_create_new_node(unsigned long end_pfn,
unsigned int *nid)
{
unsigned long long mem;
@@ -91,75 +133,68 @@ static int __cpuinit fake_numa_create_new_node(unsigned long end_pfn,
}
/*
- * get_active_region_work_fn - A helper function for get_node_active_region
- * Returns datax set to the start_pfn and end_pfn if they contain
- * the initial value of datax->start_pfn between them
- * @start_pfn: start page(inclusive) of region to check
- * @end_pfn: end page(exclusive) of region to check
- * @datax: comes in with ->start_pfn set to value to search for and
- * goes out with active range if it contains it
- * Returns 1 if search value is in range else 0
+ * get_node_active_region - Return active region containing pfn
+ * Active range returned is empty if none found.
+ * @pfn: The page to return the region for
+ * @node_ar: Returned set to the active region containing @pfn
*/
-static int __init get_active_region_work_fn(unsigned long start_pfn,
- unsigned long end_pfn, void *datax)
+static void __init get_node_active_region(unsigned long pfn,
+ struct node_active_region *node_ar)
{
- struct node_active_region *data;
- data = (struct node_active_region *)datax;
+ unsigned long start_pfn, end_pfn;
+ int i, nid;
- if (start_pfn <= data->start_pfn && end_pfn > data->start_pfn) {
- data->start_pfn = start_pfn;
- data->end_pfn = end_pfn;
- return 1;
+ for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
+ if (pfn >= start_pfn && pfn < end_pfn) {
+ node_ar->nid = nid;
+ node_ar->start_pfn = start_pfn;
+ node_ar->end_pfn = end_pfn;
+ break;
+ }
}
- return 0;
-
}
-/*
- * get_node_active_region - Return active region containing start_pfn
- * Active range returned is empty if none found.
- * @start_pfn: The page to return the region for.
- * @node_ar: Returned set to the active region containing start_pfn
- */
-static void __init get_node_active_region(unsigned long start_pfn,
- struct node_active_region *node_ar)
+static void reset_numa_cpu_lookup_table(void)
{
- int nid = early_pfn_to_nid(start_pfn);
+ unsigned int cpu;
- node_ar->nid = nid;
- node_ar->start_pfn = start_pfn;
- node_ar->end_pfn = start_pfn;
- work_with_active_regions(nid, get_active_region_work_fn, node_ar);
+ for_each_possible_cpu(cpu)
+ numa_cpu_lookup_table[cpu] = -1;
}
-static void __cpuinit map_cpu_to_node(int cpu, int node)
+static void update_numa_cpu_lookup_table(unsigned int cpu, int node)
{
numa_cpu_lookup_table[cpu] = node;
+}
+
+static void map_cpu_to_node(int cpu, int node)
+{
+ update_numa_cpu_lookup_table(cpu, node);
dbg("adding cpu %d to node %d\n", cpu, node);
- if (!(cpu_isset(cpu, numa_cpumask_lookup_table[node])))
- cpu_set(cpu, numa_cpumask_lookup_table[node]);
+ if (!(cpumask_test_cpu(cpu, node_to_cpumask_map[node])))
+ cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
}
-#ifdef CONFIG_HOTPLUG_CPU
+#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PPC_SPLPAR)
static void unmap_cpu_from_node(unsigned long cpu)
{
int node = numa_cpu_lookup_table[cpu];
dbg("removing cpu %lu from node %d\n", cpu, node);
- if (cpu_isset(cpu, numa_cpumask_lookup_table[node])) {
- cpu_clear(cpu, numa_cpumask_lookup_table[node]);
+ if (cpumask_test_cpu(cpu, node_to_cpumask_map[node])) {
+ cpumask_clear_cpu(cpu, node_to_cpumask_map[node]);
} else {
printk(KERN_ERR "WARNING: cpu %lu not found in node %d\n",
cpu, node);
}
}
-#endif /* CONFIG_HOTPLUG_CPU */
+#endif /* CONFIG_HOTPLUG_CPU || CONFIG_PPC_SPLPAR */
/* must hold reference to node during call */
-static const int *of_get_associativity(struct device_node *dev)
+static const __be32 *of_get_associativity(struct device_node *dev)
{
return of_get_property(dev, "ibm,associativity", NULL);
}
@@ -169,41 +204,91 @@ static const int *of_get_associativity(struct device_node *dev)
* it exists (the property exists only in kexec/kdump kernels,
* added by kexec-tools)
*/
-static const u32 *of_get_usable_memory(struct device_node *memory)
+static const __be32 *of_get_usable_memory(struct device_node *memory)
{
- const u32 *prop;
+ const __be32 *prop;
u32 len;
prop = of_get_property(memory, "linux,drconf-usable-memory", &len);
if (!prop || len < sizeof(unsigned int))
- return 0;
+ return NULL;
return prop;
}
+int __node_distance(int a, int b)
+{
+ int i;
+ int distance = LOCAL_DISTANCE;
+
+ if (!form1_affinity)
+ return ((a == b) ? LOCAL_DISTANCE : REMOTE_DISTANCE);
+
+ for (i = 0; i < distance_ref_points_depth; i++) {
+ if (distance_lookup_table[a][i] == distance_lookup_table[b][i])
+ break;
+
+ /* Double the distance for each NUMA level */
+ distance *= 2;
+ }
+
+ return distance;
+}
+EXPORT_SYMBOL(__node_distance);
+
+static void initialize_distance_lookup_table(int nid,
+ const __be32 *associativity)
+{
+ int i;
+
+ if (!form1_affinity)
+ return;
+
+ for (i = 0; i < distance_ref_points_depth; i++) {
+ const __be32 *entry;
+
+ entry = &associativity[be32_to_cpu(distance_ref_points[i])];
+ distance_lookup_table[nid][i] = of_read_number(entry, 1);
+ }
+}
+
/* Returns nid in the range [0..MAX_NUMNODES-1], or -1 if no useful numa
* info is found.
*/
-static int of_node_to_nid_single(struct device_node *device)
+static int associativity_to_nid(const __be32 *associativity)
{
int nid = -1;
- const unsigned int *tmp;
if (min_common_depth == -1)
goto out;
- tmp = of_get_associativity(device);
- if (!tmp)
- goto out;
-
- if (tmp[0] >= min_common_depth)
- nid = tmp[min_common_depth];
+ if (of_read_number(associativity, 1) >= min_common_depth)
+ nid = of_read_number(&associativity[min_common_depth], 1);
/* POWER4 LPAR uses 0xffff as invalid node */
if (nid == 0xffff || nid >= MAX_NUMNODES)
nid = -1;
+
+ if (nid > 0 &&
+ of_read_number(associativity, 1) >= distance_ref_points_depth)
+ initialize_distance_lookup_table(nid, associativity);
+
out:
return nid;
}
+/* Returns the nid associated with the given device tree node,
+ * or -1 if not found.
+ */
+static int of_node_to_nid_single(struct device_node *device)
+{
+ int nid = -1;
+ const __be32 *tmp;
+
+ tmp = of_get_associativity(device);
+ if (tmp)
+ nid = associativity_to_nid(tmp);
+ return nid;
+}
+
/* Walk the device tree upwards, looking for an associativity id */
int of_node_to_nid(struct device_node *device)
{
@@ -226,50 +311,75 @@ int of_node_to_nid(struct device_node *device)
}
EXPORT_SYMBOL_GPL(of_node_to_nid);
-/*
- * In theory, the "ibm,associativity" property may contain multiple
- * associativity lists because a resource may be multiply connected
- * into the machine. This resource then has different associativity
- * characteristics relative to its multiple connections. We ignore
- * this for now. We also assume that all cpu and memory sets have
- * their distances represented at a common level. This won't be
- * true for hierarchical NUMA.
- *
- * In any case the ibm,associativity-reference-points should give
- * the correct depth for a normal NUMA system.
- *
- * - Dave Hansen <haveblue@us.ibm.com>
- */
static int __init find_min_common_depth(void)
{
int depth;
- const unsigned int *ref_points;
- struct device_node *rtas_root;
- unsigned int len;
-
- rtas_root = of_find_node_by_path("/rtas");
+ struct device_node *root;
- if (!rtas_root)
- return -1;
+ if (firmware_has_feature(FW_FEATURE_OPAL))
+ root = of_find_node_by_path("/ibm,opal");
+ else
+ root = of_find_node_by_path("/rtas");
+ if (!root)
+ root = of_find_node_by_path("/");
/*
- * this property is 2 32-bit integers, each representing a level of
- * depth in the associativity nodes. The first is for an SMP
- * configuration (should be all 0's) and the second is for a normal
- * NUMA configuration.
+ * This property is a set of 32-bit integers, each representing
+ * an index into the ibm,associativity nodes.
+ *
+ * With form 0 affinity the first integer is for an SMP configuration
+ * (should be all 0's) and the second is for a normal NUMA
+ * configuration. We have only one level of NUMA.
+ *
+ * With form 1 affinity the first integer is the most significant
+ * NUMA boundary and the following are progressively less significant
+ * boundaries. There can be more than one level of NUMA.
*/
- ref_points = of_get_property(rtas_root,
- "ibm,associativity-reference-points", &len);
+ distance_ref_points = of_get_property(root,
+ "ibm,associativity-reference-points",
+ &distance_ref_points_depth);
- if ((len >= 2 * sizeof(unsigned int)) && ref_points) {
- depth = ref_points[1];
- } else {
+ if (!distance_ref_points) {
dbg("NUMA: ibm,associativity-reference-points not found.\n");
- depth = -1;
+ goto err;
+ }
+
+ distance_ref_points_depth /= sizeof(int);
+
+ if (firmware_has_feature(FW_FEATURE_OPAL) ||
+ firmware_has_feature(FW_FEATURE_TYPE1_AFFINITY)) {
+ dbg("Using form 1 affinity\n");
+ form1_affinity = 1;
+ }
+
+ if (form1_affinity) {
+ depth = of_read_number(distance_ref_points, 1);
+ } else {
+ if (distance_ref_points_depth < 2) {
+ printk(KERN_WARNING "NUMA: "
+ "short ibm,associativity-reference-points\n");
+ goto err;
+ }
+
+ depth = of_read_number(&distance_ref_points[1], 1);
+ }
+
+ /*
+ * Warn and cap if the hardware supports more than
+ * MAX_DISTANCE_REF_POINTS domains.
+ */
+ if (distance_ref_points_depth > MAX_DISTANCE_REF_POINTS) {
+ printk(KERN_WARNING "NUMA: distance array capped at "
+ "%d entries\n", MAX_DISTANCE_REF_POINTS);
+ distance_ref_points_depth = MAX_DISTANCE_REF_POINTS;
}
- of_node_put(rtas_root);
+ of_node_put(root);
return depth;
+
+err:
+ of_node_put(root);
+ return -1;
}
static void __init get_n_mem_cells(int *n_addr_cells, int *n_size_cells)
@@ -285,65 +395,53 @@ static void __init get_n_mem_cells(int *n_addr_cells, int *n_size_cells)
of_node_put(memory);
}
-static unsigned long __devinit read_n_cells(int n, const unsigned int **buf)
+static unsigned long read_n_cells(int n, const __be32 **buf)
{
unsigned long result = 0;
while (n--) {
- result = (result << 32) | **buf;
+ result = (result << 32) | of_read_number(*buf, 1);
(*buf)++;
}
return result;
}
-struct of_drconf_cell {
- u64 base_addr;
- u32 drc_index;
- u32 reserved;
- u32 aa_index;
- u32 flags;
-};
-
-#define DRCONF_MEM_ASSIGNED 0x00000008
-#define DRCONF_MEM_AI_INVALID 0x00000040
-#define DRCONF_MEM_RESERVED 0x00000080
-
/*
- * Read the next lmb list entry from the ibm,dynamic-memory property
+ * Read the next memblock list entry from the ibm,dynamic-memory property
* and return the information in the provided of_drconf_cell structure.
*/
-static void read_drconf_cell(struct of_drconf_cell *drmem, const u32 **cellp)
+static void read_drconf_cell(struct of_drconf_cell *drmem, const __be32 **cellp)
{
- const u32 *cp;
+ const __be32 *cp;
drmem->base_addr = read_n_cells(n_mem_addr_cells, cellp);
cp = *cellp;
- drmem->drc_index = cp[0];
- drmem->reserved = cp[1];
- drmem->aa_index = cp[2];
- drmem->flags = cp[3];
+ drmem->drc_index = of_read_number(cp, 1);
+ drmem->reserved = of_read_number(&cp[1], 1);
+ drmem->aa_index = of_read_number(&cp[2], 1);
+ drmem->flags = of_read_number(&cp[3], 1);
*cellp = cp + 4;
}
/*
- * Retreive and validate the ibm,dynamic-memory property of the device tree.
+ * Retrieve and validate the ibm,dynamic-memory property of the device tree.
*
- * The layout of the ibm,dynamic-memory property is a number N of lmb
- * list entries followed by N lmb list entries. Each lmb list entry
- * contains information as layed out in the of_drconf_cell struct above.
+ * The layout of the ibm,dynamic-memory property is a number N of memblock
+ * list entries followed by N memblock list entries. Each memblock list entry
+ * contains information as laid out in the of_drconf_cell struct above.
*/
-static int of_get_drconf_memory(struct device_node *memory, const u32 **dm)
+static int of_get_drconf_memory(struct device_node *memory, const __be32 **dm)
{
- const u32 *prop;
+ const __be32 *prop;
u32 len, entries;
prop = of_get_property(memory, "ibm,dynamic-memory", &len);
if (!prop || len < sizeof(unsigned int))
return 0;
- entries = *prop++;
+ entries = of_read_number(prop++, 1);
/* Now that we know the number of entries, revalidate the size
* of the property read in to ensure we have everything
@@ -356,12 +454,12 @@ static int of_get_drconf_memory(struct device_node *memory, const u32 **dm)
}
/*
- * Retreive and validate the ibm,lmb-size property for drconf memory
+ * Retrieve and validate the ibm,lmb-size property for drconf memory
* from the device tree.
*/
static u64 of_get_lmb_size(struct device_node *memory)
{
- const u32 *prop;
+ const __be32 *prop;
u32 len;
prop = of_get_property(memory, "ibm,lmb-size", &len);
@@ -374,11 +472,11 @@ static u64 of_get_lmb_size(struct device_node *memory)
struct assoc_arrays {
u32 n_arrays;
u32 array_sz;
- const u32 *arrays;
+ const __be32 *arrays;
};
/*
- * Retreive and validate the list of associativity arrays for drconf
+ * Retrieve and validate the list of associativity arrays for drconf
* memory from the ibm,associativity-lookup-arrays property of the
* device tree..
*
@@ -390,17 +488,17 @@ struct assoc_arrays {
static int of_get_assoc_arrays(struct device_node *memory,
struct assoc_arrays *aa)
{
- const u32 *prop;
+ const __be32 *prop;
u32 len;
prop = of_get_property(memory, "ibm,associativity-lookup-arrays", &len);
if (!prop || len < 2 * sizeof(unsigned int))
return -1;
- aa->n_arrays = *prop++;
- aa->array_sz = *prop++;
+ aa->n_arrays = of_read_number(prop++, 1);
+ aa->array_sz = of_read_number(prop++, 1);
- /* Now that we know the number of arrrays and size of each array,
+ /* Now that we know the number of arrays and size of each array,
* revalidate the size of the property read in.
*/
if (len < (aa->n_arrays * aa->array_sz + 2) * sizeof(unsigned int))
@@ -425,7 +523,7 @@ static int of_drconf_to_nid_single(struct of_drconf_cell *drmem,
!(drmem->flags & DRCONF_MEM_AI_INVALID) &&
drmem->aa_index < aa->n_arrays) {
index = drmem->aa_index * aa->array_sz + min_common_depth - 1;
- nid = aa->arrays[index];
+ nid = of_read_number(&aa->arrays[index], 1);
if (nid == 0xffff || nid >= MAX_NUMNODES)
nid = default_nid;
@@ -438,20 +536,33 @@ static int of_drconf_to_nid_single(struct of_drconf_cell *drmem,
* Figure out to which domain a cpu belongs and stick it there.
* Return the id of the domain used.
*/
-static int __cpuinit numa_setup_cpu(unsigned long lcpu)
+static int numa_setup_cpu(unsigned long lcpu)
{
- int nid = 0;
- struct device_node *cpu = of_get_cpu_node(lcpu, NULL);
+ int nid;
+ struct device_node *cpu;
+
+ /*
+ * If a valid cpu-to-node mapping is already available, use it
+ * directly instead of querying the firmware, since it represents
+ * the most recent mapping notified to us by the platform (eg: VPHN).
+ */
+ if ((nid = numa_cpu_lookup_table[lcpu]) >= 0) {
+ map_cpu_to_node(lcpu, nid);
+ return nid;
+ }
+
+ cpu = of_get_cpu_node(lcpu, NULL);
if (!cpu) {
WARN_ON(1);
+ nid = 0;
goto out;
}
nid = of_node_to_nid_single(cpu);
if (nid < 0 || !node_online(nid))
- nid = any_online_node(NODE_MASK_ALL);
+ nid = first_online_node;
out:
map_cpu_to_node(lcpu, nid);
@@ -460,17 +571,38 @@ out:
return nid;
}
-static int __cpuinit cpu_numa_callback(struct notifier_block *nfb,
- unsigned long action,
+static void verify_cpu_node_mapping(int cpu, int node)
+{
+ int base, sibling, i;
+
+ /* Verify that all the threads in the core belong to the same node */
+ base = cpu_first_thread_sibling(cpu);
+
+ for (i = 0; i < threads_per_core; i++) {
+ sibling = base + i;
+
+ if (sibling == cpu || cpu_is_offline(sibling))
+ continue;
+
+ if (cpu_to_node(sibling) != node) {
+ WARN(1, "CPU thread siblings %d and %d don't belong"
+ " to the same node!\n", cpu, sibling);
+ break;
+ }
+ }
+}
+
+static int cpu_numa_callback(struct notifier_block *nfb, unsigned long action,
void *hcpu)
{
unsigned long lcpu = (unsigned long)hcpu;
- int ret = NOTIFY_DONE;
+ int ret = NOTIFY_DONE, nid;
switch (action) {
case CPU_UP_PREPARE:
case CPU_UP_PREPARE_FROZEN:
- numa_setup_cpu(lcpu);
+ nid = numa_setup_cpu(lcpu);
+ verify_cpu_node_mapping((int)lcpu, nid);
ret = NOTIFY_OK;
break;
#ifdef CONFIG_HOTPLUG_CPU
@@ -492,32 +624,32 @@ static int __cpuinit cpu_numa_callback(struct notifier_block *nfb,
* Returns the size the region should have to enforce the memory limit.
* This will either be the original value of size, a truncated value,
* or zero. If the returned value of size is 0 the region should be
- * discarded as it lies wholy above the memory limit.
+ * discarded as it lies wholly above the memory limit.
*/
static unsigned long __init numa_enforce_memory_limit(unsigned long start,
unsigned long size)
{
/*
- * We use lmb_end_of_DRAM() in here instead of memory_limit because
+ * We use memblock_end_of_DRAM() in here instead of memory_limit because
* we've already adjusted it for the limit and it takes care of
* having memory holes below the limit. Also, in the case of
* iommu_is_off, memory_limit is not set but is implicitly enforced.
*/
- if (start + size <= lmb_end_of_DRAM())
+ if (start + size <= memblock_end_of_DRAM())
return size;
- if (start >= lmb_end_of_DRAM())
+ if (start >= memblock_end_of_DRAM())
return 0;
- return lmb_end_of_DRAM() - start;
+ return memblock_end_of_DRAM() - start;
}
/*
* Reads the counter for a given entry in
* linux,drconf-usable-memory property
*/
-static inline int __init read_usm_ranges(const u32 **usm)
+static inline int __init read_usm_ranges(const __be32 **usm)
{
/*
* For each lmb in ibm,dynamic-memory a corresponding
@@ -534,11 +666,11 @@ static inline int __init read_usm_ranges(const u32 **usm)
*/
static void __init parse_drconf_memory(struct device_node *memory)
{
- const u32 *dm, *usm;
+ const __be32 *uninitialized_var(dm), *usm;
unsigned int n, rc, ranges, is_kexec_kdump = 0;
unsigned long lmb_size, base, size, sz;
int nid;
- struct assoc_arrays aa;
+ struct assoc_arrays aa = { .arrays = NULL };
n = of_get_drconf_memory(memory, &dm);
if (!n)
@@ -589,17 +721,15 @@ static void __init parse_drconf_memory(struct device_node *memory)
node_set_online(nid);
sz = numa_enforce_memory_limit(base, size);
if (sz)
- add_active_range(nid, base >> PAGE_SHIFT,
- (base >> PAGE_SHIFT)
- + (sz >> PAGE_SHIFT));
+ memblock_set_node(base, sz,
+ &memblock.memory, nid);
} while (--ranges);
}
}
static int __init parse_numa_properties(void)
{
- struct device_node *cpu = NULL;
- struct device_node *memory = NULL;
+ struct device_node *memory;
int default_nid = 0;
unsigned long i;
@@ -621,6 +751,7 @@ static int __init parse_numa_properties(void)
* each node to be onlined must have NODE_DATA etc backing it.
*/
for_each_present_cpu(i) {
+ struct device_node *cpu;
int nid;
cpu = of_get_cpu_node(i, NULL);
@@ -639,13 +770,13 @@ static int __init parse_numa_properties(void)
}
get_n_mem_cells(&n_mem_addr_cells, &n_mem_size_cells);
- memory = NULL;
- while ((memory = of_find_node_by_type(memory, "memory")) != NULL) {
+
+ for_each_node_by_type(memory, "memory") {
unsigned long start;
unsigned long size;
int nid;
int ranges;
- const unsigned int *memcell_buf;
+ const __be32 *memcell_buf;
unsigned int len;
memcell_buf = of_get_property(memory,
@@ -681,16 +812,16 @@ new_range:
continue;
}
- add_active_range(nid, start >> PAGE_SHIFT,
- (start >> PAGE_SHIFT) + (size >> PAGE_SHIFT));
+ memblock_set_node(start, size, &memblock.memory, nid);
if (--ranges)
goto new_range;
}
/*
- * Now do the same thing for each LMB listed in the ibm,dynamic-memory
- * property in the ibm,dynamic-reconfiguration-memory node.
+ * Now do the same thing for each MEMBLOCK listed in the
+ * ibm,dynamic-memory property in the
+ * ibm,dynamic-reconfiguration-memory node.
*/
memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
if (memory)
@@ -701,22 +832,25 @@ new_range:
static void __init setup_nonnuma(void)
{
- unsigned long top_of_ram = lmb_end_of_DRAM();
- unsigned long total_ram = lmb_phys_mem_size();
+ unsigned long top_of_ram = memblock_end_of_DRAM();
+ unsigned long total_ram = memblock_phys_mem_size();
unsigned long start_pfn, end_pfn;
- unsigned int i, nid = 0;
+ unsigned int nid = 0;
+ struct memblock_region *reg;
printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
top_of_ram, total_ram);
printk(KERN_DEBUG "Memory hole size: %ldMB\n",
(top_of_ram - total_ram) >> 20);
- for (i = 0; i < lmb.memory.cnt; ++i) {
- start_pfn = lmb.memory.region[i].base >> PAGE_SHIFT;
- end_pfn = start_pfn + lmb_size_pages(&lmb.memory, i);
+ for_each_memblock(memory, reg) {
+ start_pfn = memblock_region_memory_base_pfn(reg);
+ end_pfn = memblock_region_memory_end_pfn(reg);
fake_numa_create_new_node(end_pfn, &nid);
- add_active_range(nid, start_pfn, end_pfn);
+ memblock_set_node(PFN_PHYS(start_pfn),
+ PFN_PHYS(end_pfn - start_pfn),
+ &memblock.memory, nid);
node_set_online(nid);
}
}
@@ -737,8 +871,9 @@ void __init dump_numa_cpu_topology(void)
* If we used a CPU iterator here we would miss printing
* the holes in the cpumap.
*/
- for (cpu = 0; cpu < NR_CPUS; cpu++) {
- if (cpu_isset(cpu, numa_cpumask_lookup_table[node])) {
+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
+ if (cpumask_test_cpu(cpu,
+ node_to_cpumask_map[node])) {
if (count == 0)
printk(" %u", cpu);
++count;
@@ -750,7 +885,7 @@ void __init dump_numa_cpu_topology(void)
}
if (count > 1)
- printk("-%u", NR_CPUS - 1);
+ printk("-%u", nr_cpu_ids - 1);
printk("\n");
}
}
@@ -770,7 +905,7 @@ static void __init dump_numa_memory_topology(void)
count = 0;
- for (i = 0; i < lmb_end_of_DRAM();
+ for (i = 0; i < memblock_end_of_DRAM();
i += (1 << SECTION_SIZE_BITS)) {
if (early_pfn_to_nid(i >> PAGE_SHIFT) == node) {
if (count == 0)
@@ -790,7 +925,7 @@ static void __init dump_numa_memory_topology(void)
}
/*
- * Allocate some memory, satisfying the lmb or bootmem allocator where
+ * Allocate some memory, satisfying the memblock or bootmem allocator where
* required. nid is the preferred node and end is the physical address of
* the highest address in the node.
*
@@ -804,11 +939,11 @@ static void __init *careful_zallocation(int nid, unsigned long size,
int new_nid;
unsigned long ret_paddr;
- ret_paddr = __lmb_alloc_base(size, align, end_pfn << PAGE_SHIFT);
+ ret_paddr = __memblock_alloc_base(size, align, end_pfn << PAGE_SHIFT);
/* retry over all memory */
if (!ret_paddr)
- ret_paddr = __lmb_alloc_base(size, align, lmb_end_of_DRAM());
+ ret_paddr = __memblock_alloc_base(size, align, memblock_end_of_DRAM());
if (!ret_paddr)
panic("numa.c: cannot allocate %lu bytes for node %d",
@@ -818,14 +953,14 @@ static void __init *careful_zallocation(int nid, unsigned long size,
/*
* We initialize the nodes in numeric order: 0, 1, 2...
- * and hand over control from the LMB allocator to the
+ * and hand over control from the MEMBLOCK allocator to the
* bootmem allocator. If this function is called for
* node 5, then we know that all nodes <5 are using the
- * bootmem allocator instead of the LMB allocator.
+ * bootmem allocator instead of the MEMBLOCK allocator.
*
* So, check the nid from which this allocation came
* and double check to see if we need to use bootmem
- * instead of the LMB. We don't free the LMB memory
+ * instead of the MEMBLOCK. We don't free the MEMBLOCK memory
* since it would be useless.
*/
new_nid = early_pfn_to_nid(ret_paddr >> PAGE_SHIFT);
@@ -840,27 +975,26 @@ static void __init *careful_zallocation(int nid, unsigned long size,
return ret;
}
-static struct notifier_block __cpuinitdata ppc64_numa_nb = {
+static struct notifier_block ppc64_numa_nb = {
.notifier_call = cpu_numa_callback,
.priority = 1 /* Must run before sched domains notifier. */
};
-static void mark_reserved_regions_for_nid(int nid)
+static void __init mark_reserved_regions_for_nid(int nid)
{
struct pglist_data *node = NODE_DATA(nid);
- int i;
+ struct memblock_region *reg;
- for (i = 0; i < lmb.reserved.cnt; i++) {
- unsigned long physbase = lmb.reserved.region[i].base;
- unsigned long size = lmb.reserved.region[i].size;
+ for_each_memblock(reserved, reg) {
+ unsigned long physbase = reg->base;
+ unsigned long size = reg->size;
unsigned long start_pfn = physbase >> PAGE_SHIFT;
unsigned long end_pfn = PFN_UP(physbase + size);
struct node_active_region node_ar;
- unsigned long node_end_pfn = node->node_start_pfn +
- node->node_spanned_pages;
+ unsigned long node_end_pfn = pgdat_end_pfn(node);
/*
- * Check to make sure that this lmb.reserved area is
+ * Check to make sure that this memblock.reserved area is
* within the bounds of the node that we care about.
* Checking the nid of the start and end points is not
* sufficient because the reserved area could span the
@@ -918,7 +1052,7 @@ void __init do_init_bootmem(void)
int nid;
min_low_pfn = 0;
- max_low_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT;
+ max_low_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
max_pfn = max_low_pfn;
if (parse_numa_properties())
@@ -926,10 +1060,6 @@ void __init do_init_bootmem(void)
else
dump_numa_memory_topology();
- register_cpu_notifier(&ppc64_numa_nb);
- cpu_numa_callback(&ppc64_numa_nb, CPU_UP_PREPARE,
- (void *)(unsigned long)boot_cpuid);
-
for_each_online_node(nid) {
unsigned long start_pfn, end_pfn;
void *bootmem_vaddr;
@@ -983,13 +1113,24 @@ void __init do_init_bootmem(void)
}
init_bootmem_done = 1;
+
+ /*
+ * Now bootmem is initialised we can create the node to cpumask
+ * lookup tables and setup the cpu callback to populate them.
+ */
+ setup_node_to_cpumask_map();
+
+ reset_numa_cpu_lookup_table();
+ register_cpu_notifier(&ppc64_numa_nb);
+ cpu_numa_callback(&ppc64_numa_nb, CPU_UP_PREPARE,
+ (void *)(unsigned long)boot_cpuid);
}
void __init paging_init(void)
{
unsigned long max_zone_pfns[MAX_NR_ZONES];
memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
- max_zone_pfns[ZONE_DMA] = lmb_end_of_DRAM() >> PAGE_SHIFT;
+ max_zone_pfns[ZONE_DMA] = memblock_end_of_DRAM() >> PAGE_SHIFT;
free_area_init_nodes(max_zone_pfns);
}
@@ -1021,7 +1162,7 @@ early_param("numa", early_numa);
static int hot_add_drconf_scn_to_nid(struct device_node *memory,
unsigned long scn_addr)
{
- const u32 *dm;
+ const __be32 *dm;
unsigned int drconf_cell_cnt, rc;
unsigned long lmb_size;
struct assoc_arrays aa;
@@ -1064,17 +1205,17 @@ static int hot_add_drconf_scn_to_nid(struct device_node *memory,
/*
* Find the node associated with a hot added memory section for memory
* represented in the device tree as a node (i.e. memory@XXXX) for
- * each lmb.
+ * each memblock.
*/
-int hot_add_node_scn_to_nid(unsigned long scn_addr)
+static int hot_add_node_scn_to_nid(unsigned long scn_addr)
{
- struct device_node *memory = NULL;
+ struct device_node *memory;
int nid = -1;
- while ((memory = of_find_node_by_type(memory, "memory")) != NULL) {
+ for_each_node_by_type(memory, "memory") {
unsigned long start, size;
int ranges;
- const unsigned int *memcell_buf;
+ const __be32 *memcell_buf;
unsigned int len;
memcell_buf = of_get_property(memory, "reg", &len);
@@ -1095,18 +1236,19 @@ int hot_add_node_scn_to_nid(unsigned long scn_addr)
break;
}
- of_node_put(memory);
if (nid >= 0)
break;
}
+ of_node_put(memory);
+
return nid;
}
/*
* Find the node associated with a hot added memory section. Section
- * corresponds to a SPARSEMEM section, not an LMB. It is assumed that
- * sections are fully contained within a single LMB.
+ * corresponds to a SPARSEMEM section, not an MEMBLOCK. It is assumed that
+ * sections are fully contained within a single MEMBLOCK.
*/
int hot_add_scn_to_nid(unsigned long scn_addr)
{
@@ -1114,7 +1256,7 @@ int hot_add_scn_to_nid(unsigned long scn_addr)
int nid, found = 0;
if (!numa_enabled || (min_common_depth < 0))
- return any_online_node(NODE_MASK_ALL);
+ return first_online_node;
memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
if (memory) {
@@ -1125,7 +1267,7 @@ int hot_add_scn_to_nid(unsigned long scn_addr)
}
if (nid < 0 || !node_online(nid))
- nid = any_online_node(NODE_MASK_ALL);
+ nid = first_online_node;
if (NODE_DATA(nid)->node_spanned_pages)
return nid;
@@ -1141,4 +1283,527 @@ int hot_add_scn_to_nid(unsigned long scn_addr)
return nid;
}
+static u64 hot_add_drconf_memory_max(void)
+{
+ struct device_node *memory = NULL;
+ unsigned int drconf_cell_cnt = 0;
+ u64 lmb_size = 0;
+ const __be32 *dm = NULL;
+
+ memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
+ if (memory) {
+ drconf_cell_cnt = of_get_drconf_memory(memory, &dm);
+ lmb_size = of_get_lmb_size(memory);
+ of_node_put(memory);
+ }
+ return lmb_size * drconf_cell_cnt;
+}
+
+/*
+ * memory_hotplug_max - return max address of memory that may be added
+ *
+ * This is currently only used on systems that support drconfig memory
+ * hotplug.
+ */
+u64 memory_hotplug_max(void)
+{
+ return max(hot_add_drconf_memory_max(), memblock_end_of_DRAM());
+}
#endif /* CONFIG_MEMORY_HOTPLUG */
+
+/* Virtual Processor Home Node (VPHN) support */
+#ifdef CONFIG_PPC_SPLPAR
+struct topology_update_data {
+ struct topology_update_data *next;
+ unsigned int cpu;
+ int old_nid;
+ int new_nid;
+};
+
+static u8 vphn_cpu_change_counts[NR_CPUS][MAX_DISTANCE_REF_POINTS];
+static cpumask_t cpu_associativity_changes_mask;
+static int vphn_enabled;
+static int prrn_enabled;
+static void reset_topology_timer(void);
+
+/*
+ * Store the current values of the associativity change counters in the
+ * hypervisor.
+ */
+static void setup_cpu_associativity_change_counters(void)
+{
+ int cpu;
+
+ /* The VPHN feature supports a maximum of 8 reference points */
+ BUILD_BUG_ON(MAX_DISTANCE_REF_POINTS > 8);
+
+ for_each_possible_cpu(cpu) {
+ int i;
+ u8 *counts = vphn_cpu_change_counts[cpu];
+ volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts;
+
+ for (i = 0; i < distance_ref_points_depth; i++)
+ counts[i] = hypervisor_counts[i];
+ }
+}
+
+/*
+ * The hypervisor maintains a set of 8 associativity change counters in
+ * the VPA of each cpu that correspond to the associativity levels in the
+ * ibm,associativity-reference-points property. When an associativity
+ * level changes, the corresponding counter is incremented.
+ *
+ * Set a bit in cpu_associativity_changes_mask for each cpu whose home
+ * node associativity levels have changed.
+ *
+ * Returns the number of cpus with unhandled associativity changes.
+ */
+static int update_cpu_associativity_changes_mask(void)
+{
+ int cpu;
+ cpumask_t *changes = &cpu_associativity_changes_mask;
+
+ for_each_possible_cpu(cpu) {
+ int i, changed = 0;
+ u8 *counts = vphn_cpu_change_counts[cpu];
+ volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts;
+
+ for (i = 0; i < distance_ref_points_depth; i++) {
+ if (hypervisor_counts[i] != counts[i]) {
+ counts[i] = hypervisor_counts[i];
+ changed = 1;
+ }
+ }
+ if (changed) {
+ cpumask_or(changes, changes, cpu_sibling_mask(cpu));
+ cpu = cpu_last_thread_sibling(cpu);
+ }
+ }
+
+ return cpumask_weight(changes);
+}
+
+/*
+ * 6 64-bit registers unpacked into 12 32-bit associativity values. To form
+ * the complete property we have to add the length in the first cell.
+ */
+#define VPHN_ASSOC_BUFSIZE (6*sizeof(u64)/sizeof(u32) + 1)
+
+/*
+ * Convert the associativity domain numbers returned from the hypervisor
+ * to the sequence they would appear in the ibm,associativity property.
+ */
+static int vphn_unpack_associativity(const long *packed, __be32 *unpacked)
+{
+ int i, nr_assoc_doms = 0;
+ const __be16 *field = (const __be16 *) packed;
+
+#define VPHN_FIELD_UNUSED (0xffff)
+#define VPHN_FIELD_MSB (0x8000)
+#define VPHN_FIELD_MASK (~VPHN_FIELD_MSB)
+
+ for (i = 1; i < VPHN_ASSOC_BUFSIZE; i++) {
+ if (be16_to_cpup(field) == VPHN_FIELD_UNUSED) {
+ /* All significant fields processed, and remaining
+ * fields contain the reserved value of all 1's.
+ * Just store them.
+ */
+ unpacked[i] = *((__be32 *)field);
+ field += 2;
+ } else if (be16_to_cpup(field) & VPHN_FIELD_MSB) {
+ /* Data is in the lower 15 bits of this field */
+ unpacked[i] = cpu_to_be32(
+ be16_to_cpup(field) & VPHN_FIELD_MASK);
+ field++;
+ nr_assoc_doms++;
+ } else {
+ /* Data is in the lower 15 bits of this field
+ * concatenated with the next 16 bit field
+ */
+ unpacked[i] = *((__be32 *)field);
+ field += 2;
+ nr_assoc_doms++;
+ }
+ }
+
+ /* The first cell contains the length of the property */
+ unpacked[0] = cpu_to_be32(nr_assoc_doms);
+
+ return nr_assoc_doms;
+}
+
+/*
+ * Retrieve the new associativity information for a virtual processor's
+ * home node.
+ */
+static long hcall_vphn(unsigned long cpu, __be32 *associativity)
+{
+ long rc;
+ long retbuf[PLPAR_HCALL9_BUFSIZE] = {0};
+ u64 flags = 1;
+ int hwcpu = get_hard_smp_processor_id(cpu);
+
+ rc = plpar_hcall9(H_HOME_NODE_ASSOCIATIVITY, retbuf, flags, hwcpu);
+ vphn_unpack_associativity(retbuf, associativity);
+
+ return rc;
+}
+
+static long vphn_get_associativity(unsigned long cpu,
+ __be32 *associativity)
+{
+ long rc;
+
+ rc = hcall_vphn(cpu, associativity);
+
+ switch (rc) {
+ case H_FUNCTION:
+ printk(KERN_INFO
+ "VPHN is not supported. Disabling polling...\n");
+ stop_topology_update();
+ break;
+ case H_HARDWARE:
+ printk(KERN_ERR
+ "hcall_vphn() experienced a hardware fault "
+ "preventing VPHN. Disabling polling...\n");
+ stop_topology_update();
+ }
+
+ return rc;
+}
+
+/*
+ * Update the CPU maps and sysfs entries for a single CPU when its NUMA
+ * characteristics change. This function doesn't perform any locking and is
+ * only safe to call from stop_machine().
+ */
+static int update_cpu_topology(void *data)
+{
+ struct topology_update_data *update;
+ unsigned long cpu;
+
+ if (!data)
+ return -EINVAL;
+
+ cpu = smp_processor_id();
+
+ for (update = data; update; update = update->next) {
+ if (cpu != update->cpu)
+ continue;
+
+ unmap_cpu_from_node(update->cpu);
+ map_cpu_to_node(update->cpu, update->new_nid);
+ vdso_getcpu_init();
+ }
+
+ return 0;
+}
+
+static int update_lookup_table(void *data)
+{
+ struct topology_update_data *update;
+
+ if (!data)
+ return -EINVAL;
+
+ /*
+ * Upon topology update, the numa-cpu lookup table needs to be updated
+ * for all threads in the core, including offline CPUs, to ensure that
+ * future hotplug operations respect the cpu-to-node associativity
+ * properly.
+ */
+ for (update = data; update; update = update->next) {
+ int nid, base, j;
+
+ nid = update->new_nid;
+ base = cpu_first_thread_sibling(update->cpu);
+
+ for (j = 0; j < threads_per_core; j++) {
+ update_numa_cpu_lookup_table(base + j, nid);
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Update the node maps and sysfs entries for each cpu whose home node
+ * has changed. Returns 1 when the topology has changed, and 0 otherwise.
+ */
+int arch_update_cpu_topology(void)
+{
+ unsigned int cpu, sibling, changed = 0;
+ struct topology_update_data *updates, *ud;
+ __be32 associativity[VPHN_ASSOC_BUFSIZE] = {0};
+ cpumask_t updated_cpus;
+ struct device *dev;
+ int weight, new_nid, i = 0;
+
+ weight = cpumask_weight(&cpu_associativity_changes_mask);
+ if (!weight)
+ return 0;
+
+ updates = kzalloc(weight * (sizeof(*updates)), GFP_KERNEL);
+ if (!updates)
+ return 0;
+
+ cpumask_clear(&updated_cpus);
+
+ for_each_cpu(cpu, &cpu_associativity_changes_mask) {
+ /*
+ * If siblings aren't flagged for changes, updates list
+ * will be too short. Skip on this update and set for next
+ * update.
+ */
+ if (!cpumask_subset(cpu_sibling_mask(cpu),
+ &cpu_associativity_changes_mask)) {
+ pr_info("Sibling bits not set for associativity "
+ "change, cpu%d\n", cpu);
+ cpumask_or(&cpu_associativity_changes_mask,
+ &cpu_associativity_changes_mask,
+ cpu_sibling_mask(cpu));
+ cpu = cpu_last_thread_sibling(cpu);
+ continue;
+ }
+
+ /* Use associativity from first thread for all siblings */
+ vphn_get_associativity(cpu, associativity);
+ new_nid = associativity_to_nid(associativity);
+ if (new_nid < 0 || !node_online(new_nid))
+ new_nid = first_online_node;
+
+ if (new_nid == numa_cpu_lookup_table[cpu]) {
+ cpumask_andnot(&cpu_associativity_changes_mask,
+ &cpu_associativity_changes_mask,
+ cpu_sibling_mask(cpu));
+ cpu = cpu_last_thread_sibling(cpu);
+ continue;
+ }
+
+ for_each_cpu(sibling, cpu_sibling_mask(cpu)) {
+ ud = &updates[i++];
+ ud->cpu = sibling;
+ ud->new_nid = new_nid;
+ ud->old_nid = numa_cpu_lookup_table[sibling];
+ cpumask_set_cpu(sibling, &updated_cpus);
+ if (i < weight)
+ ud->next = &updates[i];
+ }
+ cpu = cpu_last_thread_sibling(cpu);
+ }
+
+ /*
+ * In cases where we have nothing to update (because the updates list
+ * is too short or because the new topology is same as the old one),
+ * skip invoking update_cpu_topology() via stop-machine(). This is
+ * necessary (and not just a fast-path optimization) since stop-machine
+ * can end up electing a random CPU to run update_cpu_topology(), and
+ * thus trick us into setting up incorrect cpu-node mappings (since
+ * 'updates' is kzalloc()'ed).
+ *
+ * And for the similar reason, we will skip all the following updating.
+ */
+ if (!cpumask_weight(&updated_cpus))
+ goto out;
+
+ stop_machine(update_cpu_topology, &updates[0], &updated_cpus);
+
+ /*
+ * Update the numa-cpu lookup table with the new mappings, even for
+ * offline CPUs. It is best to perform this update from the stop-
+ * machine context.
+ */
+ stop_machine(update_lookup_table, &updates[0],
+ cpumask_of(raw_smp_processor_id()));
+
+ for (ud = &updates[0]; ud; ud = ud->next) {
+ unregister_cpu_under_node(ud->cpu, ud->old_nid);
+ register_cpu_under_node(ud->cpu, ud->new_nid);
+
+ dev = get_cpu_device(ud->cpu);
+ if (dev)
+ kobject_uevent(&dev->kobj, KOBJ_CHANGE);
+ cpumask_clear_cpu(ud->cpu, &cpu_associativity_changes_mask);
+ changed = 1;
+ }
+
+out:
+ kfree(updates);
+ return changed;
+}
+
+static void topology_work_fn(struct work_struct *work)
+{
+ rebuild_sched_domains();
+}
+static DECLARE_WORK(topology_work, topology_work_fn);
+
+static void topology_schedule_update(void)
+{
+ schedule_work(&topology_work);
+}
+
+static void topology_timer_fn(unsigned long ignored)
+{
+ if (prrn_enabled && cpumask_weight(&cpu_associativity_changes_mask))
+ topology_schedule_update();
+ else if (vphn_enabled) {
+ if (update_cpu_associativity_changes_mask() > 0)
+ topology_schedule_update();
+ reset_topology_timer();
+ }
+}
+static struct timer_list topology_timer =
+ TIMER_INITIALIZER(topology_timer_fn, 0, 0);
+
+static void reset_topology_timer(void)
+{
+ topology_timer.data = 0;
+ topology_timer.expires = jiffies + 60 * HZ;
+ mod_timer(&topology_timer, topology_timer.expires);
+}
+
+#ifdef CONFIG_SMP
+
+static void stage_topology_update(int core_id)
+{
+ cpumask_or(&cpu_associativity_changes_mask,
+ &cpu_associativity_changes_mask, cpu_sibling_mask(core_id));
+ reset_topology_timer();
+}
+
+static int dt_update_callback(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct of_prop_reconfig *update;
+ int rc = NOTIFY_DONE;
+
+ switch (action) {
+ case OF_RECONFIG_UPDATE_PROPERTY:
+ update = (struct of_prop_reconfig *)data;
+ if (!of_prop_cmp(update->dn->type, "cpu") &&
+ !of_prop_cmp(update->prop->name, "ibm,associativity")) {
+ u32 core_id;
+ of_property_read_u32(update->dn, "reg", &core_id);
+ stage_topology_update(core_id);
+ rc = NOTIFY_OK;
+ }
+ break;
+ }
+
+ return rc;
+}
+
+static struct notifier_block dt_update_nb = {
+ .notifier_call = dt_update_callback,
+};
+
+#endif
+
+/*
+ * Start polling for associativity changes.
+ */
+int start_topology_update(void)
+{
+ int rc = 0;
+
+ if (firmware_has_feature(FW_FEATURE_PRRN)) {
+ if (!prrn_enabled) {
+ prrn_enabled = 1;
+ vphn_enabled = 0;
+#ifdef CONFIG_SMP
+ rc = of_reconfig_notifier_register(&dt_update_nb);
+#endif
+ }
+ } else if (firmware_has_feature(FW_FEATURE_VPHN) &&
+ lppaca_shared_proc(get_lppaca())) {
+ if (!vphn_enabled) {
+ prrn_enabled = 0;
+ vphn_enabled = 1;
+ setup_cpu_associativity_change_counters();
+ init_timer_deferrable(&topology_timer);
+ reset_topology_timer();
+ }
+ }
+
+ return rc;
+}
+
+/*
+ * Disable polling for VPHN associativity changes.
+ */
+int stop_topology_update(void)
+{
+ int rc = 0;
+
+ if (prrn_enabled) {
+ prrn_enabled = 0;
+#ifdef CONFIG_SMP
+ rc = of_reconfig_notifier_unregister(&dt_update_nb);
+#endif
+ } else if (vphn_enabled) {
+ vphn_enabled = 0;
+ rc = del_timer_sync(&topology_timer);
+ }
+
+ return rc;
+}
+
+int prrn_is_enabled(void)
+{
+ return prrn_enabled;
+}
+
+static int topology_read(struct seq_file *file, void *v)
+{
+ if (vphn_enabled || prrn_enabled)
+ seq_puts(file, "on\n");
+ else
+ seq_puts(file, "off\n");
+
+ return 0;
+}
+
+static int topology_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, topology_read, NULL);
+}
+
+static ssize_t topology_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *off)
+{
+ char kbuf[4]; /* "on" or "off" plus null. */
+ int read_len;
+
+ read_len = count < 3 ? count : 3;
+ if (copy_from_user(kbuf, buf, read_len))
+ return -EINVAL;
+
+ kbuf[read_len] = '\0';
+
+ if (!strncmp(kbuf, "on", 2))
+ start_topology_update();
+ else if (!strncmp(kbuf, "off", 3))
+ stop_topology_update();
+ else
+ return -EINVAL;
+
+ return count;
+}
+
+static const struct file_operations topology_ops = {
+ .read = seq_read,
+ .write = topology_write,
+ .open = topology_open,
+ .release = single_release
+};
+
+static int topology_update_init(void)
+{
+ start_topology_update();
+ proc_create("powerpc/topology_updates", 0644, NULL, &topology_ops);
+
+ return 0;
+}
+device_initcall(topology_update_init);
+#endif /* CONFIG_PPC_SPLPAR */