diff options
Diffstat (limited to 'mm/truncate.c')
| -rw-r--r-- | mm/truncate.c | 450 | 
1 files changed, 317 insertions, 133 deletions
diff --git a/mm/truncate.c b/mm/truncate.c index ba887bff48c..eda24730716 100644 --- a/mm/truncate.c +++ b/mm/truncate.c @@ -12,20 +12,61 @@  #include <linux/gfp.h>  #include <linux/mm.h>  #include <linux/swap.h> -#include <linux/module.h> +#include <linux/export.h>  #include <linux/pagemap.h>  #include <linux/highmem.h>  #include <linux/pagevec.h>  #include <linux/task_io_accounting_ops.h>  #include <linux/buffer_head.h>	/* grr. try_to_release_page,  				   do_invalidatepage */ +#include <linux/cleancache.h>  #include "internal.h" +static void clear_exceptional_entry(struct address_space *mapping, +				    pgoff_t index, void *entry) +{ +	struct radix_tree_node *node; +	void **slot; + +	/* Handled by shmem itself */ +	if (shmem_mapping(mapping)) +		return; + +	spin_lock_irq(&mapping->tree_lock); +	/* +	 * Regular page slots are stabilized by the page lock even +	 * without the tree itself locked.  These unlocked entries +	 * need verification under the tree lock. +	 */ +	if (!__radix_tree_lookup(&mapping->page_tree, index, &node, &slot)) +		goto unlock; +	if (*slot != entry) +		goto unlock; +	radix_tree_replace_slot(slot, NULL); +	mapping->nrshadows--; +	if (!node) +		goto unlock; +	workingset_node_shadows_dec(node); +	/* +	 * Don't track node without shadow entries. +	 * +	 * Avoid acquiring the list_lru lock if already untracked. +	 * The list_empty() test is safe as node->private_list is +	 * protected by mapping->tree_lock. +	 */ +	if (!workingset_node_shadows(node) && +	    !list_empty(&node->private_list)) +		list_lru_del(&workingset_shadow_nodes, &node->private_list); +	__radix_tree_delete_node(&mapping->page_tree, node); +unlock: +	spin_unlock_irq(&mapping->tree_lock); +}  /**   * do_invalidatepage - invalidate part or all of a page   * @page: the page which is affected - * @offset: the index of the truncation point + * @offset: start of the range to invalidate + * @length: length of the range to invalidate   *   * do_invalidatepage() is called when all or part of the page has become   * invalidated by a truncate operation. @@ -36,23 +77,18 @@   * point.  Because the caller is about to free (and possibly reuse) those   * blocks on-disk.   */ -void do_invalidatepage(struct page *page, unsigned long offset) +void do_invalidatepage(struct page *page, unsigned int offset, +		       unsigned int length)  { -	void (*invalidatepage)(struct page *, unsigned long); +	void (*invalidatepage)(struct page *, unsigned int, unsigned int); +  	invalidatepage = page->mapping->a_ops->invalidatepage;  #ifdef CONFIG_BLOCK  	if (!invalidatepage)  		invalidatepage = block_invalidatepage;  #endif  	if (invalidatepage) -		(*invalidatepage)(page, offset); -} - -static inline void truncate_partial_page(struct page *page, unsigned partial) -{ -	zero_user_segment(page, partial, PAGE_CACHE_SIZE); -	if (page_has_private(page)) -		do_invalidatepage(page, partial); +		(*invalidatepage)(page, offset, length);  }  /* @@ -101,14 +137,12 @@ truncate_complete_page(struct address_space *mapping, struct page *page)  		return -EIO;  	if (page_has_private(page)) -		do_invalidatepage(page, 0); +		do_invalidatepage(page, 0, PAGE_CACHE_SIZE);  	cancel_dirty_page(page, PAGE_CACHE_SIZE); -	clear_page_mlock(page); -	remove_from_page_cache(page);  	ClearPageMappedToDisk(page); -	page_cache_release(page);	/* pagecache ref */ +	delete_from_page_cache(page);  	return 0;  } @@ -131,7 +165,6 @@ invalidate_complete_page(struct address_space *mapping, struct page *page)  	if (page_has_private(page) && !try_to_release_page(page, 0))  		return 0; -	clear_page_mlock(page);  	ret = remove_mapping(mapping, page);  	return ret; @@ -183,14 +216,14 @@ int invalidate_inode_page(struct page *page)  }  /** - * truncate_inode_pages - truncate range of pages specified by start & end byte offsets + * truncate_inode_pages_range - truncate range of pages specified by start & end byte offsets   * @mapping: mapping to truncate   * @lstart: offset from which to truncate - * @lend: offset to which to truncate + * @lend: offset to which to truncate (inclusive)   *   * Truncate the page cache, removing the pages that are between - * specified offsets (and zeroing out partial page - * (if lstart is not page aligned)). + * specified offsets (and zeroing out partial pages + * if lstart or lend + 1 is not page aligned).   *   * Truncate takes two passes - the first pass is nonblocking.  It will not   * block on page locks and it will not block on writeback.  The second pass @@ -198,47 +231,73 @@ int invalidate_inode_page(struct page *page)   * The first pass will remove most pages, so the search cost of the second pass   * is low.   * - * When looking at page->index outside the page lock we need to be careful to - * copy it into a local to avoid races (it could change at any time). - *   * We pass down the cache-hot hint to the page freeing code.  Even if the   * mapping is large, it is probably the case that the final pages are the most   * recently touched, and freeing happens in ascending file offset order. + * + * Note that since ->invalidatepage() accepts range to invalidate + * truncate_inode_pages_range is able to handle cases where lend + 1 is not + * page aligned properly.   */  void truncate_inode_pages_range(struct address_space *mapping,  				loff_t lstart, loff_t lend)  { -	const pgoff_t start = (lstart + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT; -	pgoff_t end; -	const unsigned partial = lstart & (PAGE_CACHE_SIZE - 1); -	struct pagevec pvec; -	pgoff_t next; -	int i; - -	if (mapping->nrpages == 0) +	pgoff_t		start;		/* inclusive */ +	pgoff_t		end;		/* exclusive */ +	unsigned int	partial_start;	/* inclusive */ +	unsigned int	partial_end;	/* exclusive */ +	struct pagevec	pvec; +	pgoff_t		indices[PAGEVEC_SIZE]; +	pgoff_t		index; +	int		i; + +	cleancache_invalidate_inode(mapping); +	if (mapping->nrpages == 0 && mapping->nrshadows == 0)  		return; -	BUG_ON((lend & (PAGE_CACHE_SIZE - 1)) != (PAGE_CACHE_SIZE - 1)); -	end = (lend >> PAGE_CACHE_SHIFT); +	/* Offsets within partial pages */ +	partial_start = lstart & (PAGE_CACHE_SIZE - 1); +	partial_end = (lend + 1) & (PAGE_CACHE_SIZE - 1); + +	/* +	 * 'start' and 'end' always covers the range of pages to be fully +	 * truncated. Partial pages are covered with 'partial_start' at the +	 * start of the range and 'partial_end' at the end of the range. +	 * Note that 'end' is exclusive while 'lend' is inclusive. +	 */ +	start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; +	if (lend == -1) +		/* +		 * lend == -1 indicates end-of-file so we have to set 'end' +		 * to the highest possible pgoff_t and since the type is +		 * unsigned we're using -1. +		 */ +		end = -1; +	else +		end = (lend + 1) >> PAGE_CACHE_SHIFT;  	pagevec_init(&pvec, 0); -	next = start; -	while (next <= end && -	       pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) { +	index = start; +	while (index < end && pagevec_lookup_entries(&pvec, mapping, index, +			min(end - index, (pgoff_t)PAGEVEC_SIZE), +			indices)) { +		mem_cgroup_uncharge_start();  		for (i = 0; i < pagevec_count(&pvec); i++) {  			struct page *page = pvec.pages[i]; -			pgoff_t page_index = page->index; -			if (page_index > end) { -				next = page_index; +			/* We rely upon deletion not changing page->index */ +			index = indices[i]; +			if (index >= end)  				break; + +			if (radix_tree_exceptional_entry(page)) { +				clear_exceptional_entry(mapping, index, page); +				continue;  			} -			if (page_index > next) -				next = page_index; -			next++;  			if (!trylock_page(page))  				continue; +			WARN_ON(page->index != index);  			if (PageWriteback(page)) {  				unlock_page(page);  				continue; @@ -246,30 +305,67 @@ void truncate_inode_pages_range(struct address_space *mapping,  			truncate_inode_page(mapping, page);  			unlock_page(page);  		} +		pagevec_remove_exceptionals(&pvec);  		pagevec_release(&pvec); +		mem_cgroup_uncharge_end();  		cond_resched(); +		index++;  	} -	if (partial) { +	if (partial_start) {  		struct page *page = find_lock_page(mapping, start - 1);  		if (page) { +			unsigned int top = PAGE_CACHE_SIZE; +			if (start > end) { +				/* Truncation within a single page */ +				top = partial_end; +				partial_end = 0; +			} +			wait_on_page_writeback(page); +			zero_user_segment(page, partial_start, top); +			cleancache_invalidate_page(mapping, page); +			if (page_has_private(page)) +				do_invalidatepage(page, partial_start, +						  top - partial_start); +			unlock_page(page); +			page_cache_release(page); +		} +	} +	if (partial_end) { +		struct page *page = find_lock_page(mapping, end); +		if (page) {  			wait_on_page_writeback(page); -			truncate_partial_page(page, partial); +			zero_user_segment(page, 0, partial_end); +			cleancache_invalidate_page(mapping, page); +			if (page_has_private(page)) +				do_invalidatepage(page, 0, +						  partial_end);  			unlock_page(page);  			page_cache_release(page);  		}  	} +	/* +	 * If the truncation happened within a single page no pages +	 * will be released, just zeroed, so we can bail out now. +	 */ +	if (start >= end) +		return; -	next = start; +	index = start;  	for ( ; ; ) {  		cond_resched(); -		if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) { -			if (next == start) +		if (!pagevec_lookup_entries(&pvec, mapping, index, +			min(end - index, (pgoff_t)PAGEVEC_SIZE), indices)) { +			/* If all gone from start onwards, we're done */ +			if (index == start)  				break; -			next = start; +			/* Otherwise restart to make sure all gone */ +			index = start;  			continue;  		} -		if (pvec.pages[0]->index > end) { +		if (index == start && indices[0] >= end) { +			/* All gone out of hole to be punched, we're done */ +			pagevec_remove_exceptionals(&pvec);  			pagevec_release(&pvec);  			break;  		} @@ -277,19 +373,31 @@ void truncate_inode_pages_range(struct address_space *mapping,  		for (i = 0; i < pagevec_count(&pvec); i++) {  			struct page *page = pvec.pages[i]; -			if (page->index > end) +			/* We rely upon deletion not changing page->index */ +			index = indices[i]; +			if (index >= end) { +				/* Restart punch to make sure all gone */ +				index = start - 1;  				break; +			} + +			if (radix_tree_exceptional_entry(page)) { +				clear_exceptional_entry(mapping, index, page); +				continue; +			} +  			lock_page(page); +			WARN_ON(page->index != index);  			wait_on_page_writeback(page);  			truncate_inode_page(mapping, page); -			if (page->index > next) -				next = page->index; -			next++;  			unlock_page(page);  		} +		pagevec_remove_exceptionals(&pvec);  		pagevec_release(&pvec);  		mem_cgroup_uncharge_end(); +		index++;  	} +	cleancache_invalidate_inode(mapping);  }  EXPORT_SYMBOL(truncate_inode_pages_range); @@ -299,6 +407,11 @@ EXPORT_SYMBOL(truncate_inode_pages_range);   * @lstart: offset from which to truncate   *   * Called under (and serialised by) inode->i_mutex. + * + * Note: When this function returns, there can be a page in the process of + * deletion (inside __delete_from_page_cache()) in the specified range.  Thus + * mapping->nrpages can be non-zero when this function returns even after + * truncation of the whole mapping.   */  void truncate_inode_pages(struct address_space *mapping, loff_t lstart)  { @@ -307,6 +420,53 @@ void truncate_inode_pages(struct address_space *mapping, loff_t lstart)  EXPORT_SYMBOL(truncate_inode_pages);  /** + * truncate_inode_pages_final - truncate *all* pages before inode dies + * @mapping: mapping to truncate + * + * Called under (and serialized by) inode->i_mutex. + * + * Filesystems have to use this in the .evict_inode path to inform the + * VM that this is the final truncate and the inode is going away. + */ +void truncate_inode_pages_final(struct address_space *mapping) +{ +	unsigned long nrshadows; +	unsigned long nrpages; + +	/* +	 * Page reclaim can not participate in regular inode lifetime +	 * management (can't call iput()) and thus can race with the +	 * inode teardown.  Tell it when the address space is exiting, +	 * so that it does not install eviction information after the +	 * final truncate has begun. +	 */ +	mapping_set_exiting(mapping); + +	/* +	 * When reclaim installs eviction entries, it increases +	 * nrshadows first, then decreases nrpages.  Make sure we see +	 * this in the right order or we might miss an entry. +	 */ +	nrpages = mapping->nrpages; +	smp_rmb(); +	nrshadows = mapping->nrshadows; + +	if (nrpages || nrshadows) { +		/* +		 * As truncation uses a lockless tree lookup, cycle +		 * the tree lock to make sure any ongoing tree +		 * modification that does not see AS_EXITING is +		 * completed before starting the final truncate. +		 */ +		spin_lock_irq(&mapping->tree_lock); +		spin_unlock_irq(&mapping->tree_lock); + +		truncate_inode_pages(mapping, 0); +	} +} +EXPORT_SYMBOL(truncate_inode_pages_final); + +/**   * invalidate_mapping_pages - Invalidate all the unlocked pages of one inode   * @mapping: the address_space which holds the pages to invalidate   * @start: the offset 'from' which to invalidate @@ -320,48 +480,53 @@ EXPORT_SYMBOL(truncate_inode_pages);   * pagetables.   */  unsigned long invalidate_mapping_pages(struct address_space *mapping, -				       pgoff_t start, pgoff_t end) +		pgoff_t start, pgoff_t end)  { +	pgoff_t indices[PAGEVEC_SIZE];  	struct pagevec pvec; -	pgoff_t next = start; -	unsigned long ret = 0; +	pgoff_t index = start; +	unsigned long ret; +	unsigned long count = 0;  	int i;  	pagevec_init(&pvec, 0); -	while (next <= end && -			pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) { +	while (index <= end && pagevec_lookup_entries(&pvec, mapping, index, +			min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1, +			indices)) {  		mem_cgroup_uncharge_start();  		for (i = 0; i < pagevec_count(&pvec); i++) {  			struct page *page = pvec.pages[i]; -			pgoff_t index; -			int lock_failed; -			lock_failed = !trylock_page(page); +			/* We rely upon deletion not changing page->index */ +			index = indices[i]; +			if (index > end) +				break; -			/* -			 * We really shouldn't be looking at the ->index of an -			 * unlocked page.  But we're not allowed to lock these -			 * pages.  So we rely upon nobody altering the ->index -			 * of this (pinned-by-us) page. -			 */ -			index = page->index; -			if (index > next) -				next = index; -			next++; -			if (lock_failed) +			if (radix_tree_exceptional_entry(page)) { +				clear_exceptional_entry(mapping, index, page);  				continue; +			} -			ret += invalidate_inode_page(page); - +			if (!trylock_page(page)) +				continue; +			WARN_ON(page->index != index); +			ret = invalidate_inode_page(page);  			unlock_page(page); -			if (next > end) -				break; +			/* +			 * Invalidation is a hint that the page is no longer +			 * of interest and try to speed up its reclaim. +			 */ +			if (!ret) +				deactivate_page(page); +			count += ret;  		} +		pagevec_remove_exceptionals(&pvec);  		pagevec_release(&pvec);  		mem_cgroup_uncharge_end();  		cond_resched(); +		index++;  	} -	return ret; +	return count;  }  EXPORT_SYMBOL(invalidate_mapping_pages); @@ -385,11 +550,14 @@ invalidate_complete_page2(struct address_space *mapping, struct page *page)  	if (PageDirty(page))  		goto failed; -	clear_page_mlock(page);  	BUG_ON(page_has_private(page)); -	__remove_from_page_cache(page); +	__delete_from_page_cache(page, NULL);  	spin_unlock_irq(&mapping->tree_lock);  	mem_cgroup_uncharge_cache_page(page); + +	if (mapping->a_ops->freepage) +		mapping->a_ops->freepage(page); +  	page_cache_release(page);	/* pagecache ref */  	return 1;  failed: @@ -420,37 +588,40 @@ static int do_launder_page(struct address_space *mapping, struct page *page)  int invalidate_inode_pages2_range(struct address_space *mapping,  				  pgoff_t start, pgoff_t end)  { +	pgoff_t indices[PAGEVEC_SIZE];  	struct pagevec pvec; -	pgoff_t next; +	pgoff_t index;  	int i;  	int ret = 0;  	int ret2 = 0;  	int did_range_unmap = 0; -	int wrapped = 0; +	cleancache_invalidate_inode(mapping);  	pagevec_init(&pvec, 0); -	next = start; -	while (next <= end && !wrapped && -		pagevec_lookup(&pvec, mapping, next, -			min(end - next, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) { +	index = start; +	while (index <= end && pagevec_lookup_entries(&pvec, mapping, index, +			min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1, +			indices)) {  		mem_cgroup_uncharge_start();  		for (i = 0; i < pagevec_count(&pvec); i++) {  			struct page *page = pvec.pages[i]; -			pgoff_t page_index; + +			/* We rely upon deletion not changing page->index */ +			index = indices[i]; +			if (index > end) +				break; + +			if (radix_tree_exceptional_entry(page)) { +				clear_exceptional_entry(mapping, index, page); +				continue; +			}  			lock_page(page); +			WARN_ON(page->index != index);  			if (page->mapping != mapping) {  				unlock_page(page);  				continue;  			} -			page_index = page->index; -			next = page_index + 1; -			if (next == 0) -				wrapped = 1; -			if (page_index > end) { -				unlock_page(page); -				break; -			}  			wait_on_page_writeback(page);  			if (page_mapped(page)) {  				if (!did_range_unmap) { @@ -458,9 +629,9 @@ int invalidate_inode_pages2_range(struct address_space *mapping,  					 * Zap the rest of the file in one hit.  					 */  					unmap_mapping_range(mapping, -					   (loff_t)page_index<<PAGE_CACHE_SHIFT, -					   (loff_t)(end - page_index + 1) -							<< PAGE_CACHE_SHIFT, +					   (loff_t)index << PAGE_CACHE_SHIFT, +					   (loff_t)(1 + end - index) +							 << PAGE_CACHE_SHIFT,  					    0);  					did_range_unmap = 1;  				} else { @@ -468,8 +639,8 @@ int invalidate_inode_pages2_range(struct address_space *mapping,  					 * Just zap this page  					 */  					unmap_mapping_range(mapping, -					  (loff_t)page_index<<PAGE_CACHE_SHIFT, -					  PAGE_CACHE_SIZE, 0); +					   (loff_t)index << PAGE_CACHE_SHIFT, +					   PAGE_CACHE_SIZE, 0);  				}  			}  			BUG_ON(page_mapped(page)); @@ -482,10 +653,13 @@ int invalidate_inode_pages2_range(struct address_space *mapping,  				ret = ret2;  			unlock_page(page);  		} +		pagevec_remove_exceptionals(&pvec);  		pagevec_release(&pvec);  		mem_cgroup_uncharge_end();  		cond_resched(); +		index++;  	} +	cleancache_invalidate_inode(mapping);  	return ret;  }  EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range); @@ -508,8 +682,7 @@ EXPORT_SYMBOL_GPL(invalidate_inode_pages2);  /**   * truncate_pagecache - unmap and remove pagecache that has been truncated   * @inode: inode - * @old: old file offset - * @new: new file offset + * @newsize: new file size   *   * inode's new i_size must already be written before truncate_pagecache   * is called. @@ -521,9 +694,10 @@ EXPORT_SYMBOL_GPL(invalidate_inode_pages2);   * situations such as writepage being called for a page that has already   * had its underlying blocks deallocated.   */ -void truncate_pagecache(struct inode *inode, loff_t old, loff_t new) +void truncate_pagecache(struct inode *inode, loff_t newsize)  {  	struct address_space *mapping = inode->i_mapping; +	loff_t holebegin = round_up(newsize, PAGE_SIZE);  	/*  	 * unmap_mapping_range is called twice, first simply for @@ -534,9 +708,9 @@ void truncate_pagecache(struct inode *inode, loff_t old, loff_t new)  	 * truncate_inode_pages finishes, hence the second  	 * unmap_mapping_range call must be made for correctness.  	 */ -	unmap_mapping_range(mapping, new + PAGE_SIZE - 1, 0, 1); -	truncate_inode_pages(mapping, new); -	unmap_mapping_range(mapping, new + PAGE_SIZE - 1, 0, 1); +	unmap_mapping_range(mapping, holebegin, 0, 1); +	truncate_inode_pages(mapping, newsize); +	unmap_mapping_range(mapping, holebegin, 0, 1);  }  EXPORT_SYMBOL(truncate_pagecache); @@ -545,44 +719,54 @@ EXPORT_SYMBOL(truncate_pagecache);   * @inode: inode   * @newsize: new file size   * - * truncate_setsize updastes i_size update and performs pagecache - * truncation (if necessary) for a file size updates. It will be - * typically be called from the filesystem's setattr function when - * ATTR_SIZE is passed in. + * truncate_setsize updates i_size and performs pagecache truncation (if + * necessary) to @newsize. It will be typically be called from the filesystem's + * setattr function when ATTR_SIZE is passed in.   * - * Must be called with inode_mutex held and after all filesystem - * specific block truncation has been performed. + * Must be called with inode_mutex held and before all filesystem specific + * block truncation has been performed.   */  void truncate_setsize(struct inode *inode, loff_t newsize)  { -	loff_t oldsize; - -	oldsize = inode->i_size;  	i_size_write(inode, newsize); - -	truncate_pagecache(inode, oldsize, newsize); +	truncate_pagecache(inode, newsize);  }  EXPORT_SYMBOL(truncate_setsize);  /** - * vmtruncate - unmap mappings "freed" by truncate() syscall - * @inode: inode of the file used - * @offset: file offset to start truncating + * truncate_pagecache_range - unmap and remove pagecache that is hole-punched + * @inode: inode + * @lstart: offset of beginning of hole + * @lend: offset of last byte of hole   * - * This function is deprecated and truncate_setsize or truncate_pagecache - * should be used instead, together with filesystem specific block truncation. + * This function should typically be called before the filesystem + * releases resources associated with the freed range (eg. deallocates + * blocks). This way, pagecache will always stay logically coherent + * with on-disk format, and the filesystem would not have to deal with + * situations such as writepage being called for a page that has already + * had its underlying blocks deallocated.   */ -int vmtruncate(struct inode *inode, loff_t offset) +void truncate_pagecache_range(struct inode *inode, loff_t lstart, loff_t lend)  { -	int error; - -	error = inode_newsize_ok(inode, offset); -	if (error) -		return error; +	struct address_space *mapping = inode->i_mapping; +	loff_t unmap_start = round_up(lstart, PAGE_SIZE); +	loff_t unmap_end = round_down(1 + lend, PAGE_SIZE) - 1; +	/* +	 * This rounding is currently just for example: unmap_mapping_range +	 * expands its hole outwards, whereas we want it to contract the hole +	 * inwards.  However, existing callers of truncate_pagecache_range are +	 * doing their own page rounding first.  Note that unmap_mapping_range +	 * allows holelen 0 for all, and we allow lend -1 for end of file. +	 */ -	truncate_setsize(inode, offset); -	if (inode->i_op->truncate) -		inode->i_op->truncate(inode); -	return 0; +	/* +	 * Unlike in truncate_pagecache, unmap_mapping_range is called only +	 * once (before truncating pagecache), and without "even_cows" flag: +	 * hole-punching should not remove private COWed pages from the hole. +	 */ +	if ((u64)unmap_end > (u64)unmap_start) +		unmap_mapping_range(mapping, unmap_start, +				    1 + unmap_end - unmap_start, 0); +	truncate_inode_pages_range(mapping, lstart, lend);  } -EXPORT_SYMBOL(vmtruncate); +EXPORT_SYMBOL(truncate_pagecache_range);  | 
