From 454e2398be9b9fa30433fccc548db34d19aa9958 Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 23 Jun 2006 02:02:57 -0700 Subject: [PATCH] VFS: Permit filesystem to override root dentry on mount Extend the get_sb() filesystem operation to take an extra argument that permits the VFS to pass in the target vfsmount that defines the mountpoint. The filesystem is then required to manually set the superblock and root dentry pointers. For most filesystems, this should be done with simple_set_mnt() which will set the superblock pointer and then set the root dentry to the superblock's s_root (as per the old default behaviour). The get_sb() op now returns an integer as there's now no need to return the superblock pointer. This patch permits a superblock to be implicitly shared amongst several mount points, such as can be done with NFS to avoid potential inode aliasing. In such a case, simple_set_mnt() would not be called, and instead the mnt_root and mnt_sb would be set directly. The patch also makes the following changes: (*) the get_sb_*() convenience functions in the core kernel now take a vfsmount pointer argument and return an integer, so most filesystems have to change very little. (*) If one of the convenience function is not used, then get_sb() should normally call simple_set_mnt() to instantiate the vfsmount. This will always return 0, and so can be tail-called from get_sb(). (*) generic_shutdown_super() now calls shrink_dcache_sb() to clean up the dcache upon superblock destruction rather than shrink_dcache_anon(). This is required because the superblock may now have multiple trees that aren't actually bound to s_root, but that still need to be cleaned up. The currently called functions assume that the whole tree is rooted at s_root, and that anonymous dentries are not the roots of trees which results in dentries being left unculled. However, with the way NFS superblock sharing are currently set to be implemented, these assumptions are violated: the root of the filesystem is simply a dummy dentry and inode (the real inode for '/' may well be inaccessible), and all the vfsmounts are rooted on anonymous[*] dentries with child trees. [*] Anonymous until discovered from another tree. (*) The documentation has been adjusted, including the additional bit of changing ext2_* into foo_* in the documentation. [akpm@osdl.org: convert ipath_fs, do other stuff] Signed-off-by: David Howells Acked-by: Al Viro Cc: Nathan Scott Cc: Roland Dreier Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/shmem.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'mm') diff --git a/mm/shmem.c b/mm/shmem.c index 1e43c8a865b..7617bb1c6bf 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -2233,10 +2233,10 @@ static struct vm_operations_struct shmem_vm_ops = { }; -static struct super_block *shmem_get_sb(struct file_system_type *fs_type, - int flags, const char *dev_name, void *data) +static int shmem_get_sb(struct file_system_type *fs_type, + int flags, const char *dev_name, void *data, struct vfsmount *mnt) { - return get_sb_nodev(fs_type, flags, data, shmem_fill_super); + return get_sb_nodev(fs_type, flags, data, shmem_fill_super, mnt); } static struct file_system_type tmpfs_fs_type = { -- cgit v1.2.3-18-g5258 From 726c334223180e3c0197cc980a432681370d4baf Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 23 Jun 2006 02:02:58 -0700 Subject: [PATCH] VFS: Permit filesystem to perform statfs with a known root dentry Give the statfs superblock operation a dentry pointer rather than a superblock pointer. This complements the get_sb() patch. That reduced the significance of sb->s_root, allowing NFS to place a fake root there. However, NFS does require a dentry to use as a target for the statfs operation. This permits the root in the vfsmount to be used instead. linux/mount.h has been added where necessary to make allyesconfig build successfully. Interest has also been expressed for use with the FUSE and XFS filesystems. Signed-off-by: David Howells Acked-by: Al Viro Cc: Nathan Scott Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/shmem.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/shmem.c b/mm/shmem.c index 7617bb1c6bf..10020d8b407 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -1654,9 +1654,9 @@ static ssize_t shmem_file_sendfile(struct file *in_file, loff_t *ppos, return desc.error; } -static int shmem_statfs(struct super_block *sb, struct kstatfs *buf) +static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf) { - struct shmem_sb_info *sbinfo = SHMEM_SB(sb); + struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb); buf->f_type = TMPFS_MAGIC; buf->f_bsize = PAGE_CACHE_SIZE; -- cgit v1.2.3-18-g5258 From cb2b95e1c6b56e3d2369d3a5f4bc97f4fa180683 Mon Sep 17 00:00:00 2001 From: Andy Whitcroft Date: Fri, 23 Jun 2006 02:03:01 -0700 Subject: [PATCH] zone handle unaligned zone boundaries The buddy allocator has a requirement that boundaries between contigious zones occur aligned with the the MAX_ORDER ranges. Where they do not we will incorrectly merge pages cross zone boundaries. This can lead to pages from the wrong zone being handed out. Originally the buddy allocator would check that buddies were in the same zone by referencing the zone start and end page frame numbers. This was removed as it became very expensive and the buddy allocator already made the assumption that zones boundaries were aligned. It is clear that not all configurations and architectures are honouring this alignment requirement. Therefore it seems safest to reintroduce support for non-aligned zone boundaries. This patch introduces a new check when considering a page a buddy it compares the zone_table index for the two pages and refuses to merge the pages where they do not match. The zone_table index is unique for each node/zone combination when FLATMEM/DISCONTIGMEM is enabled and for each section/zone combination when SPARSEMEM is enabled (a SPARSEMEM section is at least a MAX_ORDER size). Signed-off-by: Andy Whitcroft Cc: Dave Hansen Cc: Mel Gorman Cc: Yasunori Goto Cc: KAMEZAWA Hiroyuki Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/page_alloc.c | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) (limited to 'mm') diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 253a450c400..fd631c2536a 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -286,22 +286,27 @@ __find_combined_index(unsigned long page_idx, unsigned int order) * we can do coalesce a page and its buddy if * (a) the buddy is not in a hole && * (b) the buddy is in the buddy system && - * (c) a page and its buddy have the same order. + * (c) a page and its buddy have the same order && + * (d) a page and its buddy are in the same zone. * * For recording whether a page is in the buddy system, we use PG_buddy. * Setting, clearing, and testing PG_buddy is serialized by zone->lock. * * For recording page's order, we use page_private(page). */ -static inline int page_is_buddy(struct page *page, int order) +static inline int page_is_buddy(struct page *page, struct page *buddy, + int order) { #ifdef CONFIG_HOLES_IN_ZONE - if (!pfn_valid(page_to_pfn(page))) + if (!pfn_valid(page_to_pfn(buddy))) return 0; #endif - if (PageBuddy(page) && page_order(page) == order) { - BUG_ON(page_count(page) != 0); + if (page_zone_id(page) != page_zone_id(buddy)) + return 0; + + if (PageBuddy(buddy) && page_order(buddy) == order) { + BUG_ON(page_count(buddy) != 0); return 1; } return 0; @@ -352,7 +357,7 @@ static inline void __free_one_page(struct page *page, struct page *buddy; buddy = __page_find_buddy(page, page_idx, order); - if (!page_is_buddy(buddy, order)) + if (!page_is_buddy(page, buddy, order)) break; /* Move the buddy up one level. */ list_del(&buddy->lru); -- cgit v1.2.3-18-g5258 From 4da5eda0dca9730f59f391230304526ab4bffec7 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Fri, 23 Jun 2006 02:03:04 -0700 Subject: [PATCH] Page Migration: Make do_swap_page redo the fault It is better to redo the complete fault if do_swap_page() finds that the page is not in PageSwapCache() because the page migration code may have replaced the swap pte already with a pte pointing to valid memory. do_swap_page() may interpret an invalid swap entry without this patch because we do not reload the pte if we are looping back. The page migration code may already have reused the swap entry referenced by our local swp_entry. Signed-off-by: Christoph Lameter Cc: Hugh Dickins Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memory.c | 7 ------- 1 file changed, 7 deletions(-) (limited to 'mm') diff --git a/mm/memory.c b/mm/memory.c index 0ec7bc64427..7e3683fd4f3 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1879,7 +1879,6 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, goto out; entry = pte_to_swp_entry(orig_pte); -again: page = lookup_swap_cache(entry); if (!page) { swapin_readahead(entry, address, vma); @@ -1903,12 +1902,6 @@ again: mark_page_accessed(page); lock_page(page); - if (!PageSwapCache(page)) { - /* Page migration has occured */ - unlock_page(page); - page_cache_release(page); - goto again; - } /* * Back out if somebody else already faulted in this pte. -- cgit v1.2.3-18-g5258 From 729bd0b74ce9ac6c829109052fcd565f5c366ca5 Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Fri, 23 Jun 2006 02:03:05 -0700 Subject: [PATCH] slab: extract cache_free_alien from __cache_free Move alien object freeing to cache_free_alien() to reduce #ifdef clutter in __cache_free(). Signed-off-by: Pekka Enberg Acked-by: Christoph Lameter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/slab.c | 77 ++++++++++++++++++++++++++++++++++----------------------------- 1 file changed, 42 insertions(+), 35 deletions(-) (limited to 'mm') diff --git a/mm/slab.c b/mm/slab.c index f1b644eb39d..bf05ea900ce 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1024,6 +1024,40 @@ static void drain_alien_cache(struct kmem_cache *cachep, } } } + +static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) +{ + struct slab *slabp = virt_to_slab(objp); + int nodeid = slabp->nodeid; + struct kmem_list3 *l3; + struct array_cache *alien = NULL; + + /* + * Make sure we are not freeing a object from another node to the array + * cache on this cpu. + */ + if (likely(slabp->nodeid == numa_node_id())) + return 0; + + l3 = cachep->nodelists[numa_node_id()]; + STATS_INC_NODEFREES(cachep); + if (l3->alien && l3->alien[nodeid]) { + alien = l3->alien[nodeid]; + spin_lock(&alien->lock); + if (unlikely(alien->avail == alien->limit)) { + STATS_INC_ACOVERFLOW(cachep); + __drain_alien_cache(cachep, alien, nodeid); + } + alien->entry[alien->avail++] = objp; + spin_unlock(&alien->lock); + } else { + spin_lock(&(cachep->nodelists[nodeid])->list_lock); + free_block(cachep, &objp, 1, nodeid); + spin_unlock(&(cachep->nodelists[nodeid])->list_lock); + } + return 1; +} + #else #define drain_alien_cache(cachep, alien) do { } while (0) @@ -1038,6 +1072,11 @@ static inline void free_alien_cache(struct array_cache **ac_ptr) { } +static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) +{ + return 0; +} + #endif static int cpuup_callback(struct notifier_block *nfb, @@ -3087,41 +3126,9 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp) check_irq_off(); objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0)); - /* Make sure we are not freeing a object from another - * node to the array cache on this cpu. - */ -#ifdef CONFIG_NUMA - { - struct slab *slabp; - slabp = virt_to_slab(objp); - if (unlikely(slabp->nodeid != numa_node_id())) { - struct array_cache *alien = NULL; - int nodeid = slabp->nodeid; - struct kmem_list3 *l3; - - l3 = cachep->nodelists[numa_node_id()]; - STATS_INC_NODEFREES(cachep); - if (l3->alien && l3->alien[nodeid]) { - alien = l3->alien[nodeid]; - spin_lock(&alien->lock); - if (unlikely(alien->avail == alien->limit)) { - STATS_INC_ACOVERFLOW(cachep); - __drain_alien_cache(cachep, - alien, nodeid); - } - alien->entry[alien->avail++] = objp; - spin_unlock(&alien->lock); - } else { - spin_lock(&(cachep->nodelists[nodeid])-> - list_lock); - free_block(cachep, &objp, 1, nodeid); - spin_unlock(&(cachep->nodelists[nodeid])-> - list_lock); - } - return; - } - } -#endif + if (cache_free_alien(cachep, objp)) + return; + if (likely(ac->avail < ac->limit)) { STATS_INC_FREEHIT(cachep); ac->entry[ac->avail++] = objp; -- cgit v1.2.3-18-g5258 From 4776874ff096cd410382c0eca5d75f69c9dfa58f Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Fri, 23 Jun 2006 02:03:07 -0700 Subject: [PATCH] slab: page mapping cleanup Clean up slab allocator page mapping a bit. The memory allocated for a slab is physically contiguous so it is okay to assume struct pages are too so kill the long-standing comment. Furthermore, rename set_slab_attr to slab_map_pages and add a comment explaining why its needed. Signed-off-by: Pekka Enberg Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/slab.c | 27 ++++++++++++++++----------- 1 file changed, 16 insertions(+), 11 deletions(-) (limited to 'mm') diff --git a/mm/slab.c b/mm/slab.c index bf05ea900ce..a94cf0fea8a 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -2499,23 +2499,28 @@ static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp, slabp->inuse--; } -static void set_slab_attr(struct kmem_cache *cachep, struct slab *slabp, - void *objp) +/* + * Map pages beginning at addr to the given cache and slab. This is required + * for the slab allocator to be able to lookup the cache and slab of a + * virtual address for kfree, ksize, kmem_ptr_validate, and slab debugging. + */ +static void slab_map_pages(struct kmem_cache *cache, struct slab *slab, + void *addr) { - int i; + int nr_pages; struct page *page; - /* Nasty!!!!!! I hope this is OK. */ - page = virt_to_page(objp); + page = virt_to_page(addr); - i = 1; + nr_pages = 1; if (likely(!PageCompound(page))) - i <<= cachep->gfporder; + nr_pages <<= cache->gfporder; + do { - page_set_cache(page, cachep); - page_set_slab(page, slabp); + page_set_cache(page, cache); + page_set_slab(page, slab); page++; - } while (--i); + } while (--nr_pages); } /* @@ -2587,7 +2592,7 @@ static int cache_grow(struct kmem_cache *cachep, gfp_t flags, int nodeid) goto opps1; slabp->nodeid = nodeid; - set_slab_attr(cachep, slabp, objp); + slab_map_pages(cachep, slabp, objp); cache_init_objs(cachep, slabp, ctor_flags); -- cgit v1.2.3-18-g5258 From 3c5a87f476bed45616e7e543dcaea4440c77bf93 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Fri, 23 Jun 2006 02:03:08 -0700 Subject: [PATCH] migration: remove unnecessary PageSwapCache checks Remove two unnecessary PageSwapCache checks. The page refcount is raised and therefore page migration cannot occur in both functions. Signed-off-by: Christoph Lameter Cc: Hugh Dickins Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/shmem.c | 8 -------- mm/swapfile.c | 7 ------- 2 files changed, 15 deletions(-) (limited to 'mm') diff --git a/mm/shmem.c b/mm/shmem.c index 10020d8b407..84b5cf9b63c 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -1081,14 +1081,6 @@ repeat: page_cache_release(swappage); goto repeat; } - if (!PageSwapCache(swappage)) { - /* Page migration has occured */ - shmem_swp_unmap(entry); - spin_unlock(&info->lock); - unlock_page(swappage); - page_cache_release(swappage); - goto repeat; - } if (PageWriteback(swappage)) { shmem_swp_unmap(entry); spin_unlock(&info->lock); diff --git a/mm/swapfile.c b/mm/swapfile.c index e5fd5385f0c..47a6812f5f8 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -716,7 +716,6 @@ static int try_to_unuse(unsigned int type) */ swap_map = &si->swap_map[i]; entry = swp_entry(type, i); -again: page = read_swap_cache_async(entry, NULL, 0); if (!page) { /* @@ -751,12 +750,6 @@ again: wait_on_page_locked(page); wait_on_page_writeback(page); lock_page(page); - if (!PageSwapCache(page)) { - /* Page migration has occured */ - unlock_page(page); - page_cache_release(page); - goto again; - } wait_on_page_writeback(page); /* -- cgit v1.2.3-18-g5258 From 02b694dea473ad3db1e2d1b14c1fef8fbd92e5e6 Mon Sep 17 00:00:00 2001 From: Yasunori Goto Date: Fri, 23 Jun 2006 02:03:08 -0700 Subject: [PATCH] wait_table and zonelist initializing for memory hotadd: change name of wait_table_size() This is just to rename from wait_table_size() to wait_table_hash_nr_entries(). Signed-off-by: Yasunori Goto Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/page_alloc.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) (limited to 'mm') diff --git a/mm/page_alloc.c b/mm/page_alloc.c index fd631c2536a..27320a0542d 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1727,7 +1727,7 @@ void __init build_all_zonelists(void) */ #define PAGES_PER_WAITQUEUE 256 -static inline unsigned long wait_table_size(unsigned long pages) +static inline unsigned long wait_table_hash_nr_entries(unsigned long pages) { unsigned long size = 1; @@ -2019,13 +2019,15 @@ void zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages) * The per-page waitqueue mechanism uses hashed waitqueues * per zone. */ - zone->wait_table_size = wait_table_size(zone_size_pages); - zone->wait_table_bits = wait_table_bits(zone->wait_table_size); + zone->wait_table_hash_nr_entries = + wait_table_hash_nr_entries(zone_size_pages); + zone->wait_table_bits = + wait_table_bits(zone->wait_table_hash_nr_entries); zone->wait_table = (wait_queue_head_t *) - alloc_bootmem_node(pgdat, zone->wait_table_size + alloc_bootmem_node(pgdat, zone->wait_table_hash_nr_entries * sizeof(wait_queue_head_t)); - for(i = 0; i < zone->wait_table_size; ++i) + for(i = 0; i < zone->wait_table_hash_nr_entries; ++i) init_waitqueue_head(zone->wait_table + i); } -- cgit v1.2.3-18-g5258 From 86356ab147669bd3bcb2149fd9561d1280835c24 Mon Sep 17 00:00:00 2001 From: Yasunori Goto Date: Fri, 23 Jun 2006 02:03:09 -0700 Subject: [PATCH] wait_table and zonelist initializing for memory hotadd: change to meminit for build_zonelist Change definitions of some functions and data from __init to __meminit. These functions and data can be used after bootup by this patch to be used for hot-add codes. Signed-off-by: Yasunori Goto Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/page_alloc.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) (limited to 'mm') diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 27320a0542d..5ae75bead4d 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -83,8 +83,8 @@ EXPORT_SYMBOL(zone_table); static char *zone_names[MAX_NR_ZONES] = { "DMA", "DMA32", "Normal", "HighMem" }; int min_free_kbytes = 1024; -unsigned long __initdata nr_kernel_pages; -unsigned long __initdata nr_all_pages; +unsigned long __meminitdata nr_kernel_pages; +unsigned long __meminitdata nr_all_pages; #ifdef CONFIG_DEBUG_VM static int page_outside_zone_boundaries(struct zone *zone, struct page *page) @@ -1517,7 +1517,7 @@ void show_free_areas(void) * * Add all populated zones of a node to the zonelist. */ -static int __init build_zonelists_node(pg_data_t *pgdat, +static int __meminit build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist, int nr_zones, int zone_type) { struct zone *zone; @@ -1553,7 +1553,7 @@ static inline int highest_zone(int zone_bits) #ifdef CONFIG_NUMA #define MAX_NODE_LOAD (num_online_nodes()) -static int __initdata node_load[MAX_NUMNODES]; +static int __meminitdata node_load[MAX_NUMNODES]; /** * find_next_best_node - find the next node that should appear in a given node's fallback list * @node: node whose fallback list we're appending @@ -1568,7 +1568,7 @@ static int __initdata node_load[MAX_NUMNODES]; * on them otherwise. * It returns -1 if no node is found. */ -static int __init find_next_best_node(int node, nodemask_t *used_node_mask) +static int __meminit find_next_best_node(int node, nodemask_t *used_node_mask) { int n, val; int min_val = INT_MAX; @@ -1614,7 +1614,7 @@ static int __init find_next_best_node(int node, nodemask_t *used_node_mask) return best_node; } -static void __init build_zonelists(pg_data_t *pgdat) +static void __meminit build_zonelists(pg_data_t *pgdat) { int i, j, k, node, local_node; int prev_node, load; @@ -1666,7 +1666,7 @@ static void __init build_zonelists(pg_data_t *pgdat) #else /* CONFIG_NUMA */ -static void __init build_zonelists(pg_data_t *pgdat) +static void __meminit build_zonelists(pg_data_t *pgdat) { int i, j, k, node, local_node; @@ -2071,7 +2071,7 @@ static __meminit void init_currently_empty_zone(struct zone *zone, * - mark all memory queues empty * - clear the memory bitmaps */ -static void __init free_area_init_core(struct pglist_data *pgdat, +static void __meminit free_area_init_core(struct pglist_data *pgdat, unsigned long *zones_size, unsigned long *zholes_size) { unsigned long j; @@ -2159,7 +2159,7 @@ static void __init alloc_node_mem_map(struct pglist_data *pgdat) #endif /* CONFIG_FLAT_NODE_MEM_MAP */ } -void __init free_area_init_node(int nid, struct pglist_data *pgdat, +void __meminit free_area_init_node(int nid, struct pglist_data *pgdat, unsigned long *zones_size, unsigned long node_start_pfn, unsigned long *zholes_size) { -- cgit v1.2.3-18-g5258 From 718127cc3170454f4aa274fdd2f1e01574fecd66 Mon Sep 17 00:00:00 2001 From: Yasunori Goto Date: Fri, 23 Jun 2006 02:03:10 -0700 Subject: [PATCH] wait_table and zonelist initializing for memory hotadd: add return code for init_current_empty_zone When add_zone() is called against empty zone (not populated zone), we have to initialize the zone which didn't initialize at boot time. But, init_currently_empty_zone() may fail due to allocation of wait table. So, this patch is to catch its error code. Changes against wait_table is in the next patch. Signed-off-by: KAMEZAWA Hiroyuki Signed-off-by: Yasunori Goto Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memory_hotplug.c | 15 +++++++++++++-- mm/page_alloc.c | 11 ++++++++--- 2 files changed, 21 insertions(+), 5 deletions(-) (limited to 'mm') diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 70df5c0d957..71da5c98c9c 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -26,7 +26,7 @@ extern void zonetable_add(struct zone *zone, int nid, int zid, unsigned long pfn, unsigned long size); -static void __add_zone(struct zone *zone, unsigned long phys_start_pfn) +static int __add_zone(struct zone *zone, unsigned long phys_start_pfn) { struct pglist_data *pgdat = zone->zone_pgdat; int nr_pages = PAGES_PER_SECTION; @@ -34,8 +34,15 @@ static void __add_zone(struct zone *zone, unsigned long phys_start_pfn) int zone_type; zone_type = zone - pgdat->node_zones; + if (!populated_zone(zone)) { + int ret = 0; + ret = init_currently_empty_zone(zone, phys_start_pfn, nr_pages); + if (ret < 0) + return ret; + } memmap_init_zone(nr_pages, nid, zone_type, phys_start_pfn); zonetable_add(zone, nid, zone_type, phys_start_pfn, nr_pages); + return 0; } extern int sparse_add_one_section(struct zone *zone, unsigned long start_pfn, @@ -50,7 +57,11 @@ static int __add_section(struct zone *zone, unsigned long phys_start_pfn) if (ret < 0) return ret; - __add_zone(zone, phys_start_pfn); + ret = __add_zone(zone, phys_start_pfn); + + if (ret < 0) + return ret; + return register_new_memory(__pfn_to_section(phys_start_pfn)); } diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 5ae75bead4d..4bc66f6b771 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2050,8 +2050,9 @@ static __meminit void zone_pcp_init(struct zone *zone) zone->name, zone->present_pages, batch); } -static __meminit void init_currently_empty_zone(struct zone *zone, - unsigned long zone_start_pfn, unsigned long size) +__meminit int init_currently_empty_zone(struct zone *zone, + unsigned long zone_start_pfn, + unsigned long size) { struct pglist_data *pgdat = zone->zone_pgdat; @@ -2063,6 +2064,8 @@ static __meminit void init_currently_empty_zone(struct zone *zone, memmap_init(size, pgdat->node_id, zone_idx(zone), zone_start_pfn); zone_init_free_lists(pgdat, zone, zone->spanned_pages); + + return 0; } /* @@ -2077,6 +2080,7 @@ static void __meminit free_area_init_core(struct pglist_data *pgdat, unsigned long j; int nid = pgdat->node_id; unsigned long zone_start_pfn = pgdat->node_start_pfn; + int ret; pgdat_resize_init(pgdat); pgdat->nr_zones = 0; @@ -2118,7 +2122,8 @@ static void __meminit free_area_init_core(struct pglist_data *pgdat, continue; zonetable_add(zone, nid, j, zone_start_pfn, size); - init_currently_empty_zone(zone, zone_start_pfn, size); + ret = init_currently_empty_zone(zone, zone_start_pfn, size); + BUG_ON(ret); zone_start_pfn += size; } } -- cgit v1.2.3-18-g5258 From cca448fe92246fb59efe55ba2e048ded0971a9af Mon Sep 17 00:00:00 2001 From: Yasunori Goto Date: Fri, 23 Jun 2006 02:03:10 -0700 Subject: [PATCH] wait_table and zonelist initializing for memory hotadd: wait_table initialization Wait_table is initialized according to zone size at boot time. But, we cannot know the maixmum zone size when memory hotplug is enabled. It can be changed.... And resizing of wait_table is hard. So kernel allocate and initialzie wait_table as its maximum size. Signed-off-by: KAMEZAWA Hiroyuki Signed-off-by: Yasunori Goto Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/page_alloc.c | 59 +++++++++++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 53 insertions(+), 6 deletions(-) (limited to 'mm') diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 4bc66f6b771..62564e27b44 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1727,6 +1727,7 @@ void __init build_all_zonelists(void) */ #define PAGES_PER_WAITQUEUE 256 +#ifndef CONFIG_MEMORY_HOTPLUG static inline unsigned long wait_table_hash_nr_entries(unsigned long pages) { unsigned long size = 1; @@ -1745,6 +1746,29 @@ static inline unsigned long wait_table_hash_nr_entries(unsigned long pages) return max(size, 4UL); } +#else +/* + * A zone's size might be changed by hot-add, so it is not possible to determine + * a suitable size for its wait_table. So we use the maximum size now. + * + * The max wait table size = 4096 x sizeof(wait_queue_head_t). ie: + * + * i386 (preemption config) : 4096 x 16 = 64Kbyte. + * ia64, x86-64 (no preemption): 4096 x 20 = 80Kbyte. + * ia64, x86-64 (preemption) : 4096 x 24 = 96Kbyte. + * + * The maximum entries are prepared when a zone's memory is (512K + 256) pages + * or more by the traditional way. (See above). It equals: + * + * i386, x86-64, powerpc(4K page size) : = ( 2G + 1M)byte. + * ia64(16K page size) : = ( 8G + 4M)byte. + * powerpc (64K page size) : = (32G +16M)byte. + */ +static inline unsigned long wait_table_hash_nr_entries(unsigned long pages) +{ + return 4096UL; +} +#endif /* * This is an integer logarithm so that shifts can be used later @@ -2010,10 +2034,11 @@ void __init setup_per_cpu_pageset(void) #endif static __meminit -void zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages) +int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages) { int i; struct pglist_data *pgdat = zone->zone_pgdat; + size_t alloc_size; /* * The per-page waitqueue mechanism uses hashed waitqueues @@ -2023,12 +2048,32 @@ void zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages) wait_table_hash_nr_entries(zone_size_pages); zone->wait_table_bits = wait_table_bits(zone->wait_table_hash_nr_entries); - zone->wait_table = (wait_queue_head_t *) - alloc_bootmem_node(pgdat, zone->wait_table_hash_nr_entries - * sizeof(wait_queue_head_t)); + alloc_size = zone->wait_table_hash_nr_entries + * sizeof(wait_queue_head_t); + + if (system_state == SYSTEM_BOOTING) { + zone->wait_table = (wait_queue_head_t *) + alloc_bootmem_node(pgdat, alloc_size); + } else { + /* + * This case means that a zone whose size was 0 gets new memory + * via memory hot-add. + * But it may be the case that a new node was hot-added. In + * this case vmalloc() will not be able to use this new node's + * memory - this wait_table must be initialized to use this new + * node itself as well. + * To use this new node's memory, further consideration will be + * necessary. + */ + zone->wait_table = (wait_queue_head_t *)vmalloc(alloc_size); + } + if (!zone->wait_table) + return -ENOMEM; for(i = 0; i < zone->wait_table_hash_nr_entries; ++i) init_waitqueue_head(zone->wait_table + i); + + return 0; } static __meminit void zone_pcp_init(struct zone *zone) @@ -2055,8 +2100,10 @@ __meminit int init_currently_empty_zone(struct zone *zone, unsigned long size) { struct pglist_data *pgdat = zone->zone_pgdat; - - zone_wait_table_init(zone, size); + int ret; + ret = zone_wait_table_init(zone, size); + if (ret) + return ret; pgdat->nr_zones = zone_idx(zone) + 1; zone->zone_start_pfn = zone_start_pfn; -- cgit v1.2.3-18-g5258 From 6811378e7d8b9aa4fca2a1ca73d24c9d67c9cb12 Mon Sep 17 00:00:00 2001 From: Yasunori Goto Date: Fri, 23 Jun 2006 02:03:11 -0700 Subject: [PATCH] wait_table and zonelist initializing for memory hotadd: update zonelists In current code, zonelist is considered to be build once, no modification. But MemoryHotplug can add new zone/pgdat. It must be updated. This patch modifies build_all_zonelists(). By this, build_all_zonelist() can reconfig pgdat's zonelists. To update them safety, this patch use stop_machine_run(). Other cpus don't touch among updating them by using it. In old version (V2 of node hotadd), kernel updated them after zone initialization. But present_page of its new zone is still 0, because online_page() is not called yet at this time. Build_zonelists() checks present_pages to find present zone. It was too early. So, I changed it after online_pages(). Signed-off-by: Yasunori Goto Signed-off-by: KAMEZAWA Hiroyuki Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memory_hotplug.c | 12 ++++++++++++ mm/page_alloc.c | 26 +++++++++++++++++++++----- 2 files changed, 33 insertions(+), 5 deletions(-) (limited to 'mm') diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 71da5c98c9c..1b1ac3db218 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -127,6 +127,7 @@ int online_pages(unsigned long pfn, unsigned long nr_pages) unsigned long flags; unsigned long onlined_pages = 0; struct zone *zone; + int need_zonelists_rebuild = 0; /* * This doesn't need a lock to do pfn_to_page(). @@ -139,6 +140,14 @@ int online_pages(unsigned long pfn, unsigned long nr_pages) grow_pgdat_span(zone->zone_pgdat, pfn, pfn + nr_pages); pgdat_resize_unlock(zone->zone_pgdat, &flags); + /* + * If this zone is not populated, then it is not in zonelist. + * This means the page allocator ignores this zone. + * So, zonelist must be updated after online. + */ + if (!populated_zone(zone)) + need_zonelists_rebuild = 1; + for (i = 0; i < nr_pages; i++) { struct page *page = pfn_to_page(pfn + i); online_page(page); @@ -149,5 +158,8 @@ int online_pages(unsigned long pfn, unsigned long nr_pages) setup_per_zone_pages_min(); + if (need_zonelists_rebuild) + build_all_zonelists(); + return 0; } diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 62564e27b44..9dfbe6b7d1c 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -37,6 +37,7 @@ #include #include #include +#include #include #include @@ -1704,14 +1705,29 @@ static void __meminit build_zonelists(pg_data_t *pgdat) #endif /* CONFIG_NUMA */ -void __init build_all_zonelists(void) +/* return values int ....just for stop_machine_run() */ +static int __meminit __build_all_zonelists(void *dummy) { - int i; + int nid; + for_each_online_node(nid) + build_zonelists(NODE_DATA(nid)); + return 0; +} + +void __meminit build_all_zonelists(void) +{ + if (system_state == SYSTEM_BOOTING) { + __build_all_zonelists(0); + cpuset_init_current_mems_allowed(); + } else { + /* we have to stop all cpus to guaranntee there is no user + of zonelist */ + stop_machine_run(__build_all_zonelists, NULL, NR_CPUS); + /* cpuset refresh routine should be here */ + } - for_each_online_node(i) - build_zonelists(NODE_DATA(i)); printk("Built %i zonelists\n", num_online_nodes()); - cpuset_init_current_mems_allowed(); + } /* -- cgit v1.2.3-18-g5258 From 67de648211fa041fe08a0c25241a4980bbb90698 Mon Sep 17 00:00:00 2001 From: Andy Whitcroft Date: Fri, 23 Jun 2006 02:03:12 -0700 Subject: [PATCH] squash duplicate page_to_pfn and pfn_to_page We have architectures where the size of page_to_pfn and pfn_to_page are significant enough to overall image size that they wish to push them out of line. However, in the process we have grown a second copy of the implementation of each of these routines for each memory model. Share the implmentation exposing it either inline or out-of-line as required. Signed-off-by: Andy Whitcroft Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/page_alloc.c | 32 ++------------------------------ 1 file changed, 2 insertions(+), 30 deletions(-) (limited to 'mm') diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 9dfbe6b7d1c..5af33186a25 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2879,42 +2879,14 @@ void *__init alloc_large_system_hash(const char *tablename, } #ifdef CONFIG_OUT_OF_LINE_PFN_TO_PAGE -/* - * pfn <-> page translation. out-of-line version. - * (see asm-generic/memory_model.h) - */ -#if defined(CONFIG_FLATMEM) -struct page *pfn_to_page(unsigned long pfn) -{ - return mem_map + (pfn - ARCH_PFN_OFFSET); -} -unsigned long page_to_pfn(struct page *page) -{ - return (page - mem_map) + ARCH_PFN_OFFSET; -} -#elif defined(CONFIG_DISCONTIGMEM) struct page *pfn_to_page(unsigned long pfn) { - int nid = arch_pfn_to_nid(pfn); - return NODE_DATA(nid)->node_mem_map + arch_local_page_offset(pfn,nid); + return __pfn_to_page(pfn); } unsigned long page_to_pfn(struct page *page) { - struct pglist_data *pgdat = NODE_DATA(page_to_nid(page)); - return (page - pgdat->node_mem_map) + pgdat->node_start_pfn; -} -#elif defined(CONFIG_SPARSEMEM) -struct page *pfn_to_page(unsigned long pfn) -{ - return __section_mem_map_addr(__pfn_to_section(pfn)) + pfn; -} - -unsigned long page_to_pfn(struct page *page) -{ - long section_id = page_to_section(page); - return page - __section_mem_map_addr(__nr_to_section(section_id)); + return __page_to_pfn(page); } -#endif /* CONFIG_FLATMEM/DISCONTIGMME/SPARSEMEM */ EXPORT_SYMBOL(pfn_to_page); EXPORT_SYMBOL(page_to_pfn); #endif /* CONFIG_OUT_OF_LINE_PFN_TO_PAGE */ -- cgit v1.2.3-18-g5258 From fadd8fbd153c12963f8fe3c9ef7f8967f286f98b Mon Sep 17 00:00:00 2001 From: KAMEZAWA Hiroyuki Date: Fri, 23 Jun 2006 02:03:13 -0700 Subject: [PATCH] support for panic at OOM This patch adds panic_on_oom sysctl under sys.vm. When sysctl vm.panic_on_oom = 1, the kernel panics intead of killing rogue processes. And if vm.panic_on_oom is 0 the kernel will do oom_kill() in the same way as it does today. Of course, the default value is 0 and only root can modifies it. In general, oom_killer works well and kill rogue processes. So the whole system can survive. But there are environments where panic is preferable rather than kill some processes. Signed-off-by: KAMEZAWA Hiroyuki Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/oom_kill.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'mm') diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 042e6436c3e..f9bb3cf3238 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -22,6 +22,7 @@ #include #include +int sysctl_panic_on_oom; /* #define DEBUG */ /** @@ -344,6 +345,8 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order) break; case CONSTRAINT_NONE: + if (sysctl_panic_on_oom) + panic("out of memory. panic_on_oom is selected\n"); retry: /* * Rambo mode: Shoot down a process and hope it solves whatever -- cgit v1.2.3-18-g5258 From 6937a25cff818d32d0f9ff58a518c9ab96760aeb Mon Sep 17 00:00:00 2001 From: Dave Peterson Date: Fri, 23 Jun 2006 02:03:13 -0700 Subject: [PATCH] mm: fix typos in comments in mm/oom_kill.c This fixes a few typos in the comments in mm/oom_kill.c. Signed-off-by: David S. Peterson Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/oom_kill.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'mm') diff --git a/mm/oom_kill.c b/mm/oom_kill.c index f9bb3cf3238..d46ed0f1dc0 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -26,7 +26,7 @@ int sysctl_panic_on_oom; /* #define DEBUG */ /** - * oom_badness - calculate a numeric value for how bad this task has been + * badness - calculate a numeric value for how bad this task has been * @p: task struct of which task we should calculate * @uptime: current uptime in seconds * @@ -201,7 +201,7 @@ static struct task_struct *select_bad_process(unsigned long *ppoints) continue; /* - * This is in the process of releasing memory so for wait it + * This is in the process of releasing memory so wait for it * to finish before killing some other task by mistake. */ releasing = test_tsk_thread_flag(p, TIF_MEMDIE) || @@ -307,7 +307,7 @@ static int oom_kill_process(struct task_struct *p, unsigned long points, } /** - * oom_kill - kill the "best" process when we run out of memory + * out_of_memory - kill the "best" process when we run out of memory * * If we run out of memory, we have the choice between either * killing a random task (bad), letting the system crash (worse) -- cgit v1.2.3-18-g5258 From a43a8c39bbb493c9e93f6764b350de2e33e18e92 Mon Sep 17 00:00:00 2001 From: "Chen, Kenneth W" Date: Fri, 23 Jun 2006 02:03:15 -0700 Subject: [PATCH] tightening hugetlb strict accounting Current hugetlb strict accounting for shared mapping always assume mapping starts at zero file offset and reserves pages between zero and size of the file. This assumption often reserves (or lock down) a lot more pages then necessary if application maps at none zero file offset. libhugetlbfs is one example that requires proper reservation on shared mapping starts at none zero offset. This patch extends the reservation and hugetlb strict accounting to support any arbitrary pair of (offset, len), resulting a much more robust and accurate scheme. More importantly, it won't lock down any hugetlb pages outside file mapping. Signed-off-by: Ken Chen Acked-by: Adam Litke Cc: David Gibson Cc: William Lee Irwin III Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/hugetlb.c | 282 ++++++++++++++++++++++++++++++++++------------------------- 1 file changed, 162 insertions(+), 120 deletions(-) (limited to 'mm') diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 832f676ca03..df499973255 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -22,7 +22,7 @@ #include "internal.h" const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL; -static unsigned long nr_huge_pages, free_huge_pages, reserved_huge_pages; +static unsigned long nr_huge_pages, free_huge_pages, resv_huge_pages; unsigned long max_huge_pages; static struct list_head hugepage_freelists[MAX_NUMNODES]; static unsigned int nr_huge_pages_node[MAX_NUMNODES]; @@ -123,39 +123,13 @@ static int alloc_fresh_huge_page(void) static struct page *alloc_huge_page(struct vm_area_struct *vma, unsigned long addr) { - struct inode *inode = vma->vm_file->f_dentry->d_inode; struct page *page; - int use_reserve = 0; - unsigned long idx; spin_lock(&hugetlb_lock); - - if (vma->vm_flags & VM_MAYSHARE) { - - /* idx = radix tree index, i.e. offset into file in - * HPAGE_SIZE units */ - idx = ((addr - vma->vm_start) >> HPAGE_SHIFT) - + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT)); - - /* The hugetlbfs specific inode info stores the number - * of "guaranteed available" (huge) pages. That is, - * the first 'prereserved_hpages' pages of the inode - * are either already instantiated, or have been - * pre-reserved (by hugetlb_reserve_for_inode()). Here - * we're in the process of instantiating the page, so - * we use this to determine whether to draw from the - * pre-reserved pool or the truly free pool. */ - if (idx < HUGETLBFS_I(inode)->prereserved_hpages) - use_reserve = 1; - } - - if (!use_reserve) { - if (free_huge_pages <= reserved_huge_pages) - goto fail; - } else { - BUG_ON(reserved_huge_pages == 0); - reserved_huge_pages--; - } + if (vma->vm_flags & VM_MAYSHARE) + resv_huge_pages--; + else if (free_huge_pages <= resv_huge_pages) + goto fail; page = dequeue_huge_page(vma, addr); if (!page) @@ -165,96 +139,11 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma, set_page_refcounted(page); return page; - fail: - WARN_ON(use_reserve); /* reserved allocations shouldn't fail */ +fail: spin_unlock(&hugetlb_lock); return NULL; } -/* hugetlb_extend_reservation() - * - * Ensure that at least 'atleast' hugepages are, and will remain, - * available to instantiate the first 'atleast' pages of the given - * inode. If the inode doesn't already have this many pages reserved - * or instantiated, set aside some hugepages in the reserved pool to - * satisfy later faults (or fail now if there aren't enough, rather - * than getting the SIGBUS later). - */ -int hugetlb_extend_reservation(struct hugetlbfs_inode_info *info, - unsigned long atleast) -{ - struct inode *inode = &info->vfs_inode; - unsigned long change_in_reserve = 0; - int ret = 0; - - spin_lock(&hugetlb_lock); - read_lock_irq(&inode->i_mapping->tree_lock); - - if (info->prereserved_hpages >= atleast) - goto out; - - /* Because we always call this on shared mappings, none of the - * pages beyond info->prereserved_hpages can have been - * instantiated, so we need to reserve all of them now. */ - change_in_reserve = atleast - info->prereserved_hpages; - - if ((reserved_huge_pages + change_in_reserve) > free_huge_pages) { - ret = -ENOMEM; - goto out; - } - - reserved_huge_pages += change_in_reserve; - info->prereserved_hpages = atleast; - - out: - read_unlock_irq(&inode->i_mapping->tree_lock); - spin_unlock(&hugetlb_lock); - - return ret; -} - -/* hugetlb_truncate_reservation() - * - * This returns pages reserved for the given inode to the general free - * hugepage pool. If the inode has any pages prereserved, but not - * instantiated, beyond offset (atmost << HPAGE_SIZE), then release - * them. - */ -void hugetlb_truncate_reservation(struct hugetlbfs_inode_info *info, - unsigned long atmost) -{ - struct inode *inode = &info->vfs_inode; - struct address_space *mapping = inode->i_mapping; - unsigned long idx; - unsigned long change_in_reserve = 0; - struct page *page; - - spin_lock(&hugetlb_lock); - read_lock_irq(&inode->i_mapping->tree_lock); - - if (info->prereserved_hpages <= atmost) - goto out; - - /* Count pages which were reserved, but not instantiated, and - * which we can now release. */ - for (idx = atmost; idx < info->prereserved_hpages; idx++) { - page = radix_tree_lookup(&mapping->page_tree, idx); - if (!page) - /* Pages which are already instantiated can't - * be unreserved (and in fact have already - * been removed from the reserved pool) */ - change_in_reserve++; - } - - BUG_ON(reserved_huge_pages < change_in_reserve); - reserved_huge_pages -= change_in_reserve; - info->prereserved_hpages = atmost; - - out: - read_unlock_irq(&inode->i_mapping->tree_lock); - spin_unlock(&hugetlb_lock); -} - static int __init hugetlb_init(void) { unsigned long i; @@ -334,7 +223,7 @@ static unsigned long set_max_huge_pages(unsigned long count) return nr_huge_pages; spin_lock(&hugetlb_lock); - count = max(count, reserved_huge_pages); + count = max(count, resv_huge_pages); try_to_free_low(count); while (count < nr_huge_pages) { struct page *page = dequeue_huge_page(NULL, 0); @@ -361,11 +250,11 @@ int hugetlb_report_meminfo(char *buf) return sprintf(buf, "HugePages_Total: %5lu\n" "HugePages_Free: %5lu\n" - "HugePages_Rsvd: %5lu\n" + "HugePages_Rsvd: %5lu\n" "Hugepagesize: %5lu kB\n", nr_huge_pages, free_huge_pages, - reserved_huge_pages, + resv_huge_pages, HPAGE_SIZE/1024); } @@ -754,3 +643,156 @@ void hugetlb_change_protection(struct vm_area_struct *vma, flush_tlb_range(vma, start, end); } +struct file_region { + struct list_head link; + long from; + long to; +}; + +static long region_add(struct list_head *head, long f, long t) +{ + struct file_region *rg, *nrg, *trg; + + /* Locate the region we are either in or before. */ + list_for_each_entry(rg, head, link) + if (f <= rg->to) + break; + + /* Round our left edge to the current segment if it encloses us. */ + if (f > rg->from) + f = rg->from; + + /* Check for and consume any regions we now overlap with. */ + nrg = rg; + list_for_each_entry_safe(rg, trg, rg->link.prev, link) { + if (&rg->link == head) + break; + if (rg->from > t) + break; + + /* If this area reaches higher then extend our area to + * include it completely. If this is not the first area + * which we intend to reuse, free it. */ + if (rg->to > t) + t = rg->to; + if (rg != nrg) { + list_del(&rg->link); + kfree(rg); + } + } + nrg->from = f; + nrg->to = t; + return 0; +} + +static long region_chg(struct list_head *head, long f, long t) +{ + struct file_region *rg, *nrg; + long chg = 0; + + /* Locate the region we are before or in. */ + list_for_each_entry(rg, head, link) + if (f <= rg->to) + break; + + /* If we are below the current region then a new region is required. + * Subtle, allocate a new region at the position but make it zero + * size such that we can guarentee to record the reservation. */ + if (&rg->link == head || t < rg->from) { + nrg = kmalloc(sizeof(*nrg), GFP_KERNEL); + if (nrg == 0) + return -ENOMEM; + nrg->from = f; + nrg->to = f; + INIT_LIST_HEAD(&nrg->link); + list_add(&nrg->link, rg->link.prev); + + return t - f; + } + + /* Round our left edge to the current segment if it encloses us. */ + if (f > rg->from) + f = rg->from; + chg = t - f; + + /* Check for and consume any regions we now overlap with. */ + list_for_each_entry(rg, rg->link.prev, link) { + if (&rg->link == head) + break; + if (rg->from > t) + return chg; + + /* We overlap with this area, if it extends futher than + * us then we must extend ourselves. Account for its + * existing reservation. */ + if (rg->to > t) { + chg += rg->to - t; + t = rg->to; + } + chg -= rg->to - rg->from; + } + return chg; +} + +static long region_truncate(struct list_head *head, long end) +{ + struct file_region *rg, *trg; + long chg = 0; + + /* Locate the region we are either in or before. */ + list_for_each_entry(rg, head, link) + if (end <= rg->to) + break; + if (&rg->link == head) + return 0; + + /* If we are in the middle of a region then adjust it. */ + if (end > rg->from) { + chg = rg->to - end; + rg->to = end; + rg = list_entry(rg->link.next, typeof(*rg), link); + } + + /* Drop any remaining regions. */ + list_for_each_entry_safe(rg, trg, rg->link.prev, link) { + if (&rg->link == head) + break; + chg += rg->to - rg->from; + list_del(&rg->link); + kfree(rg); + } + return chg; +} + +static int hugetlb_acct_memory(long delta) +{ + int ret = -ENOMEM; + + spin_lock(&hugetlb_lock); + if ((delta + resv_huge_pages) <= free_huge_pages) { + resv_huge_pages += delta; + ret = 0; + } + spin_unlock(&hugetlb_lock); + return ret; +} + +int hugetlb_reserve_pages(struct inode *inode, long from, long to) +{ + long ret, chg; + + chg = region_chg(&inode->i_mapping->private_list, from, to); + if (chg < 0) + return chg; + ret = hugetlb_acct_memory(chg); + if (ret < 0) + return ret; + region_add(&inode->i_mapping->private_list, from, to); + return 0; +} + +void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed) +{ + long chg = region_truncate(&inode->i_mapping->private_list, offset); + hugetlb_acct_memory(freed - chg); +} -- cgit v1.2.3-18-g5258 From e1b6aa6f1404f162697650df2cdb6c374b1d6a5b Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 23 Jun 2006 02:03:17 -0700 Subject: [PATCH] slab: clean up kmem_getpages The last ifdef addition hit the ugliness treshold on this functions, so: - rename the variable i to nr_pages so it's somewhat descriptive - remove the addr variable and do the page_address call at the very end - instead of ifdef'ing the whole alloc_pages_node call just make the __GFP_COMP addition to flags conditional - rewrite the __GFP_COMP comment to make sense Signed-off-by: Christoph Hellwig Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/slab.c | 30 ++++++++++++++---------------- 1 file changed, 14 insertions(+), 16 deletions(-) (limited to 'mm') diff --git a/mm/slab.c b/mm/slab.c index a94cf0fea8a..7bd19639efd 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1489,31 +1489,29 @@ __initcall(cpucache_init); static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid) { struct page *page; - void *addr; + int nr_pages; int i; - flags |= cachep->gfpflags; #ifndef CONFIG_MMU - /* nommu uses slab's for process anonymous memory allocations, so - * requires __GFP_COMP to properly refcount higher order allocations" + /* + * Nommu uses slab's for process anonymous memory allocations, and thus + * requires __GFP_COMP to properly refcount higher order allocations */ - page = alloc_pages_node(nodeid, (flags | __GFP_COMP), cachep->gfporder); -#else - page = alloc_pages_node(nodeid, flags, cachep->gfporder); + flags |= __GFP_COMP; #endif + flags |= cachep->gfpflags; + + page = alloc_pages_node(nodeid, flags, cachep->gfporder); if (!page) return NULL; - addr = page_address(page); - i = (1 << cachep->gfporder); + nr_pages = (1 << cachep->gfporder); if (cachep->flags & SLAB_RECLAIM_ACCOUNT) - atomic_add(i, &slab_reclaim_pages); - add_page_state(nr_slab, i); - while (i--) { - __SetPageSlab(page); - page++; - } - return addr; + atomic_add(nr_pages, &slab_reclaim_pages); + add_page_state(nr_slab, nr_pages); + for (i = 0; i < nr_pages; i++) + __SetPageSlab(page + i); + return page_address(page); } /* -- cgit v1.2.3-18-g5258 From 7a7c381d25067b9a2bfe025dfcb16459daec0373 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 23 Jun 2006 02:03:17 -0700 Subject: [PATCH] slab: stop using list_for_each Use the _entry variant everywhere to clean the code up a tiny bit. Signed-off-by: Christoph Hellwig Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/slab.c | 38 +++++++++++--------------------------- 1 file changed, 11 insertions(+), 27 deletions(-) (limited to 'mm') diff --git a/mm/slab.c b/mm/slab.c index 7bd19639efd..54bd2eb3f2f 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1950,8 +1950,7 @@ kmem_cache_create (const char *name, size_t size, size_t align, void (*dtor)(void*, struct kmem_cache *, unsigned long)) { size_t left_over, slab_size, ralign; - struct kmem_cache *cachep = NULL; - struct list_head *p; + struct kmem_cache *cachep = NULL, *pc; /* * Sanity checks... these are all serious usage bugs. @@ -1971,8 +1970,7 @@ kmem_cache_create (const char *name, size_t size, size_t align, mutex_lock(&cache_chain_mutex); - list_for_each(p, &cache_chain) { - struct kmem_cache *pc = list_entry(p, struct kmem_cache, next); + list_for_each_entry(pc, &cache_chain, next) { mm_segment_t old_fs = get_fs(); char tmp; int res; @@ -3690,7 +3688,7 @@ void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3, */ static void cache_reap(void *unused) { - struct list_head *walk; + struct kmem_cache *searchp; struct kmem_list3 *l3; int node = numa_node_id(); @@ -3701,13 +3699,11 @@ static void cache_reap(void *unused) return; } - list_for_each(walk, &cache_chain) { - struct kmem_cache *searchp; + list_for_each_entry(searchp, &cache_chain, next) { struct list_head *p; int tofree; struct slab *slabp; - searchp = list_entry(walk, struct kmem_cache, next); check_irq_on(); /* @@ -3835,7 +3831,6 @@ static void s_stop(struct seq_file *m, void *p) static int s_show(struct seq_file *m, void *p) { struct kmem_cache *cachep = p; - struct list_head *q; struct slab *slabp; unsigned long active_objs; unsigned long num_objs; @@ -3856,15 +3851,13 @@ static int s_show(struct seq_file *m, void *p) check_irq_on(); spin_lock_irq(&l3->list_lock); - list_for_each(q, &l3->slabs_full) { - slabp = list_entry(q, struct slab, list); + list_for_each_entry(slabp, &l3->slabs_full, list) { if (slabp->inuse != cachep->num && !error) error = "slabs_full accounting error"; active_objs += cachep->num; active_slabs++; } - list_for_each(q, &l3->slabs_partial) { - slabp = list_entry(q, struct slab, list); + list_for_each_entry(slabp, &l3->slabs_partial, list) { if (slabp->inuse == cachep->num && !error) error = "slabs_partial inuse accounting error"; if (!slabp->inuse && !error) @@ -3872,8 +3865,7 @@ static int s_show(struct seq_file *m, void *p) active_objs += slabp->inuse; active_slabs++; } - list_for_each(q, &l3->slabs_free) { - slabp = list_entry(q, struct slab, list); + list_for_each_entry(slabp, &l3->slabs_free, list) { if (slabp->inuse && !error) error = "slabs_free/inuse accounting error"; num_slabs++; @@ -3966,7 +3958,7 @@ ssize_t slabinfo_write(struct file *file, const char __user * buffer, { char kbuf[MAX_SLABINFO_WRITE + 1], *tmp; int limit, batchcount, shared, res; - struct list_head *p; + struct kmem_cache *cachep; if (count > MAX_SLABINFO_WRITE) return -EINVAL; @@ -3985,10 +3977,7 @@ ssize_t slabinfo_write(struct file *file, const char __user * buffer, /* Find the cache in the chain of caches. */ mutex_lock(&cache_chain_mutex); res = -EINVAL; - list_for_each(p, &cache_chain) { - struct kmem_cache *cachep; - - cachep = list_entry(p, struct kmem_cache, next); + list_for_each_entry(cachep, &cache_chain, next) { if (!strcmp(cachep->name, kbuf)) { if (limit < 1 || batchcount < 1 || batchcount > limit || shared < 0) { @@ -4090,7 +4079,6 @@ static void show_symbol(struct seq_file *m, unsigned long address) static int leaks_show(struct seq_file *m, void *p) { struct kmem_cache *cachep = p; - struct list_head *q; struct slab *slabp; struct kmem_list3 *l3; const char *name; @@ -4115,14 +4103,10 @@ static int leaks_show(struct seq_file *m, void *p) check_irq_on(); spin_lock_irq(&l3->list_lock); - list_for_each(q, &l3->slabs_full) { - slabp = list_entry(q, struct slab, list); + list_for_each_entry(slabp, &l3->slabs_full, list) handle_slab(n, cachep, slabp); - } - list_for_each(q, &l3->slabs_partial) { - slabp = list_entry(q, struct slab, list); + list_for_each_entry(slabp, &l3->slabs_partial, list) handle_slab(n, cachep, slabp); - } spin_unlock_irq(&l3->list_lock); } name = cachep->name; -- cgit v1.2.3-18-g5258 From d6277db4ab271862ed599da08d78961c70f00002 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Fri, 23 Jun 2006 02:03:18 -0700 Subject: [PATCH] swsusp: rework memory shrinker MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Rework the swsusp's memory shrinker in the following way: - Simplify balance_pgdat() by removing all of the swsusp-related code from it. - Make shrink_all_memory() use shrink_slab() and a new function shrink_all_zones() which calls shrink_active_list() and shrink_inactive_list() directly for each zone in a way that's optimized for suspend. In shrink_all_memory() we try to free exactly as many pages as the caller asks for, preferably in one shot, starting from easier targets.  If slab caches are huge, they are most likely to have enough pages to reclaim.  The inactive lists are next (the zones with more inactive pages go first) etc. Each time shrink_all_memory() attempts to shrink the active and inactive lists for each zone in 5 passes.  In the first pass, only the inactive lists are taken into consideration.  In the next two passes the active lists are also shrunk, but mapped pages are not reclaimed.  In the last two passes the active and inactive lists are shrunk and mapped pages are reclaimed as well. The aim of this is to alter the reclaim logic to choose the best pages to keep on resume and improve the responsiveness of the resumed system. Signed-off-by: Rafael J. Wysocki Signed-off-by: Con Kolivas Signed-off-by: Adrian Bunk Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/vmscan.c | 219 +++++++++++++++++++++++++++++++++++++++++++++--------------- 1 file changed, 164 insertions(+), 55 deletions(-) (limited to 'mm') diff --git a/mm/vmscan.c b/mm/vmscan.c index 440a733fe2e..46be8a02280 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -61,6 +61,8 @@ struct scan_control { * In this context, it doesn't matter that we scan the * whole list at once. */ int swap_cluster_max; + + int swappiness; }; /* @@ -741,7 +743,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone, * A 100% value of vm_swappiness overrides this algorithm * altogether. */ - swap_tendency = mapped_ratio / 2 + distress + vm_swappiness; + swap_tendency = mapped_ratio / 2 + distress + sc->swappiness; /* * Now use this metric to decide whether to start moving mapped @@ -957,6 +959,7 @@ unsigned long try_to_free_pages(struct zone **zones, gfp_t gfp_mask) .may_writepage = !laptop_mode, .swap_cluster_max = SWAP_CLUSTER_MAX, .may_swap = 1, + .swappiness = vm_swappiness, }; inc_page_state(allocstall); @@ -1021,10 +1024,6 @@ out: * For kswapd, balance_pgdat() will work across all this node's zones until * they are all at pages_high. * - * If `nr_pages' is non-zero then it is the number of pages which are to be - * reclaimed, regardless of the zone occupancies. This is a software suspend - * special. - * * Returns the number of pages which were actually freed. * * There is special handling here for zones which are full of pinned pages. @@ -1042,10 +1041,8 @@ out: * the page allocator fallback scheme to ensure that aging of pages is balanced * across the zones. */ -static unsigned long balance_pgdat(pg_data_t *pgdat, unsigned long nr_pages, - int order) +static unsigned long balance_pgdat(pg_data_t *pgdat, int order) { - unsigned long to_free = nr_pages; int all_zones_ok; int priority; int i; @@ -1055,7 +1052,8 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, unsigned long nr_pages, struct scan_control sc = { .gfp_mask = GFP_KERNEL, .may_swap = 1, - .swap_cluster_max = nr_pages ? nr_pages : SWAP_CLUSTER_MAX, + .swap_cluster_max = SWAP_CLUSTER_MAX, + .swappiness = vm_swappiness, }; loop_again: @@ -1082,31 +1080,26 @@ loop_again: all_zones_ok = 1; - if (nr_pages == 0) { - /* - * Scan in the highmem->dma direction for the highest - * zone which needs scanning - */ - for (i = pgdat->nr_zones - 1; i >= 0; i--) { - struct zone *zone = pgdat->node_zones + i; + /* + * Scan in the highmem->dma direction for the highest + * zone which needs scanning + */ + for (i = pgdat->nr_zones - 1; i >= 0; i--) { + struct zone *zone = pgdat->node_zones + i; - if (!populated_zone(zone)) - continue; + if (!populated_zone(zone)) + continue; - if (zone->all_unreclaimable && - priority != DEF_PRIORITY) - continue; + if (zone->all_unreclaimable && priority != DEF_PRIORITY) + continue; - if (!zone_watermark_ok(zone, order, - zone->pages_high, 0, 0)) { - end_zone = i; - goto scan; - } + if (!zone_watermark_ok(zone, order, zone->pages_high, + 0, 0)) { + end_zone = i; + goto scan; } - goto out; - } else { - end_zone = pgdat->nr_zones - 1; } + goto out; scan: for (i = 0; i <= end_zone; i++) { struct zone *zone = pgdat->node_zones + i; @@ -1133,11 +1126,9 @@ scan: if (zone->all_unreclaimable && priority != DEF_PRIORITY) continue; - if (nr_pages == 0) { /* Not software suspend */ - if (!zone_watermark_ok(zone, order, - zone->pages_high, end_zone, 0)) - all_zones_ok = 0; - } + if (!zone_watermark_ok(zone, order, zone->pages_high, + end_zone, 0)) + all_zones_ok = 0; zone->temp_priority = priority; if (zone->prev_priority > priority) zone->prev_priority = priority; @@ -1162,8 +1153,6 @@ scan: total_scanned > nr_reclaimed + nr_reclaimed / 2) sc.may_writepage = 1; } - if (nr_pages && to_free > nr_reclaimed) - continue; /* swsusp: need to do more work */ if (all_zones_ok) break; /* kswapd: all done */ /* @@ -1179,7 +1168,7 @@ scan: * matches the direct reclaim path behaviour in terms of impact * on zone->*_priority. */ - if ((nr_reclaimed >= SWAP_CLUSTER_MAX) && !nr_pages) + if (nr_reclaimed >= SWAP_CLUSTER_MAX) break; } out: @@ -1261,7 +1250,7 @@ static int kswapd(void *p) } finish_wait(&pgdat->kswapd_wait, &wait); - balance_pgdat(pgdat, 0, order); + balance_pgdat(pgdat, order); } return 0; } @@ -1290,35 +1279,154 @@ void wakeup_kswapd(struct zone *zone, int order) #ifdef CONFIG_PM /* - * Try to free `nr_pages' of memory, system-wide. Returns the number of freed - * pages. + * Helper function for shrink_all_memory(). Tries to reclaim 'nr_pages' pages + * from LRU lists system-wide, for given pass and priority, and returns the + * number of reclaimed pages + * + * For pass > 3 we also try to shrink the LRU lists that contain a few pages + */ +static unsigned long shrink_all_zones(unsigned long nr_pages, int pass, + int prio, struct scan_control *sc) +{ + struct zone *zone; + unsigned long nr_to_scan, ret = 0; + + for_each_zone(zone) { + + if (!populated_zone(zone)) + continue; + + if (zone->all_unreclaimable && prio != DEF_PRIORITY) + continue; + + /* For pass = 0 we don't shrink the active list */ + if (pass > 0) { + zone->nr_scan_active += (zone