aboutsummaryrefslogtreecommitdiff
path: root/fs/ntfs/aops.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/ntfs/aops.c')
-rw-r--r--fs/ntfs/aops.c121
1 files changed, 58 insertions, 63 deletions
diff --git a/fs/ntfs/aops.c b/fs/ntfs/aops.c
index 580412d330c..d267ea6aa1a 100644
--- a/fs/ntfs/aops.c
+++ b/fs/ntfs/aops.c
@@ -2,7 +2,7 @@
* aops.c - NTFS kernel address space operations and page cache handling.
* Part of the Linux-NTFS project.
*
- * Copyright (c) 2001-2006 Anton Altaparmakov
+ * Copyright (c) 2001-2007 Anton Altaparmakov
* Copyright (c) 2002 Richard Russon
*
* This program/include file is free software; you can redistribute it and/or
@@ -23,6 +23,7 @@
#include <linux/errno.h>
#include <linux/fs.h>
+#include <linux/gfp.h>
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/swap.h>
@@ -86,17 +87,19 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
}
/* Check for the current buffer head overflowing. */
if (unlikely(file_ofs + bh->b_size > init_size)) {
- u8 *kaddr;
int ofs;
+ void *kaddr;
ofs = 0;
if (file_ofs < init_size)
ofs = init_size - file_ofs;
- kaddr = kmap_atomic(page, KM_BIO_SRC_IRQ);
+ local_irq_save(flags);
+ kaddr = kmap_atomic(page);
memset(kaddr + bh_offset(bh) + ofs, 0,
bh->b_size - ofs);
- kunmap_atomic(kaddr, KM_BIO_SRC_IRQ);
flush_dcache_page(page);
+ kunmap_atomic(kaddr);
+ local_irq_restore(flags);
}
} else {
clear_buffer_uptodate(bh);
@@ -143,11 +146,13 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
recs = PAGE_CACHE_SIZE / rec_size;
/* Should have been verified before we got here... */
BUG_ON(!recs);
- kaddr = kmap_atomic(page, KM_BIO_SRC_IRQ);
+ local_irq_save(flags);
+ kaddr = kmap_atomic(page);
for (i = 0; i < recs; i++)
post_read_mst_fixup((NTFS_RECORD*)(kaddr +
i * rec_size), rec_size);
- kunmap_atomic(kaddr, KM_BIO_SRC_IRQ);
+ kunmap_atomic(kaddr);
+ local_irq_restore(flags);
flush_dcache_page(page);
if (likely(page_uptodate && !PageError(page)))
SetPageUptodate(page);
@@ -241,8 +246,7 @@ static int ntfs_read_block(struct page *page)
rl = NULL;
nr = i = 0;
do {
- u8 *kaddr;
- int err;
+ int err = 0;
if (unlikely(buffer_uptodate(bh)))
continue;
@@ -250,11 +254,10 @@ static int ntfs_read_block(struct page *page)
arr[nr++] = bh;
continue;
}
- err = 0;
bh->b_bdev = vol->sb->s_bdev;
/* Is the block within the allowed limits? */
if (iblock < lblock) {
- BOOL is_retry = FALSE;
+ bool is_retry = false;
/* Convert iblock into corresponding vcn and offset. */
vcn = (VCN)iblock << blocksize_bits >>
@@ -292,7 +295,7 @@ lock_retry_remap:
goto handle_hole;
/* If first try and runlist unmapped, map and retry. */
if (!is_retry && lcn == LCN_RL_NOT_MAPPED) {
- is_retry = TRUE;
+ is_retry = true;
/*
* Attempt to map runlist, dropping lock for
* the duration.
@@ -336,10 +339,7 @@ handle_hole:
bh->b_blocknr = -1UL;
clear_buffer_mapped(bh);
handle_zblock:
- kaddr = kmap_atomic(page, KM_USER0);
- memset(kaddr + i * blocksize, 0, blocksize);
- kunmap_atomic(kaddr, KM_USER0);
- flush_dcache_page(page);
+ zero_user(page, i * blocksize, blocksize);
if (likely(!err))
set_buffer_uptodate(bh);
} while (i++, iblock++, (bh = bh->b_this_page) != head);
@@ -401,7 +401,7 @@ static int ntfs_readpage(struct file *file, struct page *page)
loff_t i_size;
struct inode *vi;
ntfs_inode *ni, *base_ni;
- u8 *kaddr;
+ u8 *addr;
ntfs_attr_search_ctx *ctx;
MFT_RECORD *mrec;
unsigned long flags;
@@ -410,6 +410,15 @@ static int ntfs_readpage(struct file *file, struct page *page)
retry_readpage:
BUG_ON(!PageLocked(page));
+ vi = page->mapping->host;
+ i_size = i_size_read(vi);
+ /* Is the page fully outside i_size? (truncate in progress) */
+ if (unlikely(page->index >= (i_size + PAGE_CACHE_SIZE - 1) >>
+ PAGE_CACHE_SHIFT)) {
+ zero_user(page, 0, PAGE_CACHE_SIZE);
+ ntfs_debug("Read outside i_size - truncated?");
+ goto done;
+ }
/*
* This can potentially happen because we clear PageUptodate() during
* ntfs_writepage() of MstProtected() attributes.
@@ -418,7 +427,6 @@ retry_readpage:
unlock_page(page);
return 0;
}
- vi = page->mapping->host;
ni = NTFS_I(vi);
/*
* Only $DATA attributes can be encrypted and only unnamed $DATA
@@ -456,10 +464,7 @@ retry_readpage:
* ok to ignore the compressed flag here.
*/
if (unlikely(page->index > 0)) {
- kaddr = kmap_atomic(page, KM_USER0);
- memset(kaddr, 0, PAGE_CACHE_SIZE);
- flush_dcache_page(page);
- kunmap_atomic(kaddr, KM_USER0);
+ zero_user(page, 0, PAGE_CACHE_SIZE);
goto done;
}
if (!NInoAttr(ni))
@@ -499,15 +504,15 @@ retry_readpage:
/* Race with shrinking truncate. */
attr_len = i_size;
}
- kaddr = kmap_atomic(page, KM_USER0);
+ addr = kmap_atomic(page);
/* Copy the data to the page. */
- memcpy(kaddr, (u8*)ctx->attr +
+ memcpy(addr, (u8*)ctx->attr +
le16_to_cpu(ctx->attr->data.resident.value_offset),
attr_len);
/* Zero the remainder of the page. */
- memset(kaddr + attr_len, 0, PAGE_CACHE_SIZE - attr_len);
+ memset(addr + attr_len, 0, PAGE_CACHE_SIZE - attr_len);
flush_dcache_page(page);
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(addr);
put_unm_err_out:
ntfs_attr_put_search_ctx(ctx);
unm_err_out:
@@ -558,7 +563,7 @@ static int ntfs_write_block(struct page *page, struct writeback_control *wbc)
unsigned long flags;
unsigned int blocksize, vcn_ofs;
int err;
- BOOL need_end_writeback;
+ bool need_end_writeback;
unsigned char blocksize_bits;
vi = page->mapping->host;
@@ -626,7 +631,7 @@ static int ntfs_write_block(struct page *page, struct writeback_control *wbc)
rl = NULL;
err = 0;
do {
- BOOL is_retry = FALSE;
+ bool is_retry = false;
if (unlikely(block >= dblock)) {
/*
@@ -741,14 +746,14 @@ lock_retry_remap:
unsigned long *bpos, *bend;
/* Check if the buffer is zero. */
- kaddr = kmap_atomic(page, KM_USER0);
+ kaddr = kmap_atomic(page);
bpos = (unsigned long *)(kaddr + bh_offset(bh));
bend = (unsigned long *)((u8*)bpos + blocksize);
do {
if (unlikely(*bpos))
break;
} while (likely(++bpos < bend));
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
if (bpos == bend) {
/*
* Buffer is zero and sparse, no need to write
@@ -768,7 +773,7 @@ lock_retry_remap:
}
/* If first try and runlist unmapped, map and retry. */
if (!is_retry && lcn == LCN_RL_NOT_MAPPED) {
- is_retry = TRUE;
+ is_retry = true;
/*
* Attempt to map runlist, dropping lock for
* the duration.
@@ -786,14 +791,9 @@ lock_retry_remap:
* uptodate so it can get discarded by the VM.
*/
if (err == -ENOENT || lcn == LCN_ENOENT) {
- u8 *kaddr;
-
bh->b_blocknr = -1;
clear_buffer_dirty(bh);
- kaddr = kmap_atomic(page, KM_USER0);
- memset(kaddr + bh_offset(bh), 0, blocksize);
- kunmap_atomic(kaddr, KM_USER0);
- flush_dcache_page(page);
+ zero_user(page, bh_offset(bh), blocksize);
set_buffer_uptodate(bh);
err = 0;
continue;
@@ -874,12 +874,12 @@ lock_retry_remap:
set_page_writeback(page); /* Keeps try_to_free_buffers() away. */
/* Submit the prepared buffers for i/o. */
- need_end_writeback = TRUE;
+ need_end_writeback = true;
do {
struct buffer_head *next = bh->b_this_page;
if (buffer_async_write(bh)) {
submit_bh(WRITE, bh);
- need_end_writeback = FALSE;
+ need_end_writeback = false;
}
bh = next;
} while (bh != head);
@@ -932,7 +932,7 @@ static int ntfs_write_mst_block(struct page *page,
runlist_element *rl;
int i, nr_locked_nis, nr_recs, nr_bhs, max_bhs, bhs_per_rec, err, err2;
unsigned bh_size, rec_size_bits;
- BOOL sync, is_mft, page_is_dirty, rec_is_dirty;
+ bool sync, is_mft, page_is_dirty, rec_is_dirty;
unsigned char bh_size_bits;
ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, page index "
@@ -975,10 +975,10 @@ static int ntfs_write_mst_block(struct page *page,
rl = NULL;
err = err2 = nr_bhs = nr_recs = nr_locked_nis = 0;
- page_is_dirty = rec_is_dirty = FALSE;
+ page_is_dirty = rec_is_dirty = false;
rec_start_bh = NULL;
do {
- BOOL is_retry = FALSE;
+ bool is_retry = false;
if (likely(block < rec_block)) {
if (unlikely(block >= dblock)) {
@@ -1009,10 +1009,10 @@ static int ntfs_write_mst_block(struct page *page,
}
if (!buffer_dirty(bh)) {
/* Clean records are not written out. */
- rec_is_dirty = FALSE;
+ rec_is_dirty = false;
continue;
}
- rec_is_dirty = TRUE;
+ rec_is_dirty = true;
rec_start_bh = bh;
}
/* Need to map the buffer if it is not mapped already. */
@@ -1053,7 +1053,7 @@ lock_retry_remap:
*/
if (!is_mft && !is_retry &&
lcn == LCN_RL_NOT_MAPPED) {
- is_retry = TRUE;
+ is_retry = true;
/*
* Attempt to map runlist, dropping
* lock for the duration.
@@ -1063,7 +1063,7 @@ lock_retry_remap:
if (likely(!err2))
goto lock_retry_remap;
if (err2 == -ENOMEM)
- page_is_dirty = TRUE;
+ page_is_dirty = true;
lcn = err2;
} else {
err2 = -EIO;
@@ -1145,7 +1145,7 @@ lock_retry_remap:
* means we need to redirty the page before
* returning.
*/
- page_is_dirty = TRUE;
+ page_is_dirty = true;
/*
* Remove the buffers in this mft record from
* the list of buffers to write.
@@ -1195,7 +1195,7 @@ lock_retry_remap:
tbh = bhs[i];
if (!tbh)
continue;
- if (unlikely(test_set_buffer_locked(tbh)))
+ if (!trylock_buffer(tbh))
BUG();
/* The buffer dirty state is now irrelevant, just clean it. */
clear_buffer_dirty(tbh);
@@ -1356,7 +1356,7 @@ static int ntfs_writepage(struct page *page, struct writeback_control *wbc)
loff_t i_size;
struct inode *vi = page->mapping->host;
ntfs_inode *base_ni = NULL, *ni = NTFS_I(vi);
- char *kaddr;
+ char *addr;
ntfs_attr_search_ctx *ctx = NULL;
MFT_RECORD *m = NULL;
u32 attr_len;
@@ -1372,7 +1372,7 @@ retry_writepage:
* The page may have dirty, unmapped buffers. Make them
* freeable here, so the page does not leak.
*/
- block_invalidatepage(page, 0);
+ block_invalidatepage(page, 0, PAGE_CACHE_SIZE);
unlock_page(page);
ntfs_debug("Write outside i_size - truncated?");
return 0;
@@ -1418,10 +1418,7 @@ retry_writepage:
if (page->index >= (i_size >> PAGE_CACHE_SHIFT)) {
/* The page straddles i_size. */
unsigned int ofs = i_size & ~PAGE_CACHE_MASK;
- kaddr = kmap_atomic(page, KM_USER0);
- memset(kaddr + ofs, 0, PAGE_CACHE_SIZE - ofs);
- kunmap_atomic(kaddr, KM_USER0);
- flush_dcache_page(page);
+ zero_user_segment(page, ofs, PAGE_CACHE_SIZE);
}
/* Handle mst protected attributes. */
if (NInoMstProtected(ni))
@@ -1498,14 +1495,14 @@ retry_writepage:
/* Shrinking cannot fail. */
BUG_ON(err);
}
- kaddr = kmap_atomic(page, KM_USER0);
+ addr = kmap_atomic(page);
/* Copy the data from the page to the mft record. */
memcpy((u8*)ctx->attr +
le16_to_cpu(ctx->attr->data.resident.value_offset),
- kaddr, attr_len);
+ addr, attr_len);
/* Zero out of bounds area in the page cache page. */
- memset(kaddr + attr_len, 0, PAGE_CACHE_SIZE - attr_len);
- kunmap_atomic(kaddr, KM_USER0);
+ memset(addr + attr_len, 0, PAGE_CACHE_SIZE - attr_len);
+ kunmap_atomic(addr);
flush_dcache_page(page);
flush_dcache_mft_record_page(ctx->ntfs_ino);
/* We are done with the page. */
@@ -1544,26 +1541,23 @@ err_out:
/**
* ntfs_aops - general address space operations for inodes and attributes
*/
-struct address_space_operations ntfs_aops = {
+const struct address_space_operations ntfs_aops = {
.readpage = ntfs_readpage, /* Fill page with data. */
- .sync_page = block_sync_page, /* Currently, just unplugs the
- disk request queue. */
#ifdef NTFS_RW
.writepage = ntfs_writepage, /* Write dirty page to disk. */
#endif /* NTFS_RW */
.migratepage = buffer_migrate_page, /* Move a page cache page from
one physical page to an
other. */
+ .error_remove_page = generic_error_remove_page,
};
/**
* ntfs_mst_aops - general address space operations for mst protecteed inodes
* and attributes
*/
-struct address_space_operations ntfs_mst_aops = {
+const struct address_space_operations ntfs_mst_aops = {
.readpage = ntfs_readpage, /* Fill page with data. */
- .sync_page = block_sync_page, /* Currently, just unplugs the
- disk request queue. */
#ifdef NTFS_RW
.writepage = ntfs_writepage, /* Write dirty page to disk. */
.set_page_dirty = __set_page_dirty_nobuffers, /* Set the page dirty
@@ -1573,6 +1567,7 @@ struct address_space_operations ntfs_mst_aops = {
.migratepage = buffer_migrate_page, /* Move a page cache page from
one physical page to an
other. */
+ .error_remove_page = generic_error_remove_page,
};
#ifdef NTFS_RW