diff options
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/cpuset.h | 2 | ||||
-rw-r--r-- | include/linux/mmzone.h | 85 |
2 files changed, 82 insertions, 5 deletions
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h index 4d8adf66368..748d2c99663 100644 --- a/include/linux/cpuset.h +++ b/include/linux/cpuset.h @@ -23,6 +23,7 @@ extern void cpuset_fork(struct task_struct *p); extern void cpuset_exit(struct task_struct *p); extern cpumask_t cpuset_cpus_allowed(struct task_struct *p); extern nodemask_t cpuset_mems_allowed(struct task_struct *p); +#define cpuset_current_mems_allowed (current->mems_allowed) void cpuset_init_current_mems_allowed(void); void cpuset_update_task_memory_state(void); #define cpuset_nodes_subset_current_mems_allowed(nodes) \ @@ -83,6 +84,7 @@ static inline nodemask_t cpuset_mems_allowed(struct task_struct *p) return node_possible_map; } +#define cpuset_current_mems_allowed (node_online_map) static inline void cpuset_init_current_mems_allowed(void) {} static inline void cpuset_update_task_memory_state(void) {} #define cpuset_nodes_subset_current_mems_allowed(nodes) (1) diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index e06683e2bea..09bf9d8d7b7 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -288,19 +288,94 @@ struct zone { */ #define DEF_PRIORITY 12 +/* Maximum number of zones on a zonelist */ +#define MAX_ZONES_PER_ZONELIST (MAX_NUMNODES * MAX_NR_ZONES) + +#ifdef CONFIG_NUMA +/* + * We cache key information from each zonelist for smaller cache + * footprint when scanning for free pages in get_page_from_freelist(). + * + * 1) The BITMAP fullzones tracks which zones in a zonelist have come + * up short of free memory since the last time (last_fullzone_zap) + * we zero'd fullzones. + * 2) The array z_to_n[] maps each zone in the zonelist to its node + * id, so that we can efficiently evaluate whether that node is + * set in the current tasks mems_allowed. + * + * Both fullzones and z_to_n[] are one-to-one with the zonelist, + * indexed by a zones offset in the zonelist zones[] array. + * + * The get_page_from_freelist() routine does two scans. During the + * first scan, we skip zones whose corresponding bit in 'fullzones' + * is set or whose corresponding node in current->mems_allowed (which + * comes from cpusets) is not set. During the second scan, we bypass + * this zonelist_cache, to ensure we look methodically at each zone. + * + * Once per second, we zero out (zap) fullzones, forcing us to + * reconsider nodes that might have regained more free memory. + * The field last_full_zap is the time we last zapped fullzones. + * + * This mechanism reduces the amount of time we waste repeatedly + * reexaming zones for free memory when they just came up low on + * memory momentarilly ago. + * + * The zonelist_cache struct members logically belong in struct + * zonelist. However, the mempolicy zonelists constructed for + * MPOL_BIND are intentionally variable length (and usually much + * shorter). A general purpose mechanism for handling structs with + * multiple variable length members is more mechanism than we want + * here. We resort to some special case hackery instead. + * + * The MPOL_BIND zonelists don't need this zonelist_cache (in good + * part because they are shorter), so we put the fixed length stuff + * at the front of the zonelist struct, ending in a variable length + * zones[], as is needed by MPOL_BIND. + * + * Then we put the optional zonelist cache on the end of the zonelist + * struct. This optional stuff is found by a 'zlcache_ptr' pointer in + * the fixed length portion at the front of the struct. This pointer + * both enables us to find the zonelist cache, and in the case of + * MPOL_BIND zonelists, (which will just set the zlcache_ptr to NULL) + * to know that the zonelist cache is not there. + * + * The end result is that struct zonelists come in two flavors: + * 1) The full, fixed length version, shown below, and + * 2) The custom zonelists for MPOL_BIND. + * The custom MPOL_BIND zonelists have a NULL zlcache_ptr and no zlcache. + * + * Even though there may be multiple CPU cores on a node modifying + * fullzones or last_full_zap in the same zonelist_cache at the same + * time, we don't lock it. This is just hint data - if it is wrong now + * and then, the allocator will still function, perhaps a bit slower. + */ + + +struct zonelist_cache { + DECLARE_BITMAP(fullzones, MAX_ZONES_PER_ZONELIST); /* zone full? */ + unsigned short z_to_n[MAX_ZONES_PER_ZONELIST]; /* zone->nid */ + unsigned long last_full_zap; /* when last zap'd (jiffies) */ +}; +#else +struct zonelist_cache; +#endif + /* * One allocation request operates on a zonelist. A zonelist * is a list of zones, the first one is the 'goal' of the * allocation, the other zones are fallback zones, in decreasing * priority. * - * Right now a zonelist takes up less than a cacheline. We never - * modify it apart from boot-up, and only a few indices are used, - * so despite the zonelist table being relatively big, the cache - * footprint of this construct is very small. + * If zlcache_ptr is not NULL, then it is just the address of zlcache, + * as explained above. If zlcache_ptr is NULL, there is no zlcache. */ + struct zonelist { - struct zone *zones[MAX_NUMNODES * MAX_NR_ZONES + 1]; // NULL delimited + struct zonelist_cache *zlcache_ptr; // NULL or &zlcache + struct zone *zones[MAX_ZONES_PER_ZONELIST + 1]; // NULL delimited +#ifdef CONFIG_NUMA + struct zonelist_cache zlcache; // optional ... +#endif }; #ifdef CONFIG_ARCH_POPULATES_NODE_MAP |