diff options
Diffstat (limited to 'fs/ntfs/aops.c')
| -rw-r--r-- | fs/ntfs/aops.c | 151 |
1 files changed, 75 insertions, 76 deletions
diff --git a/fs/ntfs/aops.c b/fs/ntfs/aops.c index 1c0a4315876..d267ea6aa1a 100644 --- a/fs/ntfs/aops.c +++ b/fs/ntfs/aops.c @@ -2,7 +2,7 @@ * aops.c - NTFS kernel address space operations and page cache handling. * Part of the Linux-NTFS project. * - * Copyright (c) 2001-2005 Anton Altaparmakov + * Copyright (c) 2001-2007 Anton Altaparmakov * Copyright (c) 2002 Richard Russon * * This program/include file is free software; you can redistribute it and/or @@ -22,6 +22,8 @@ */ #include <linux/errno.h> +#include <linux/fs.h> +#include <linux/gfp.h> #include <linux/mm.h> #include <linux/pagemap.h> #include <linux/swap.h> @@ -85,17 +87,19 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate) } /* Check for the current buffer head overflowing. */ if (unlikely(file_ofs + bh->b_size > init_size)) { - u8 *kaddr; int ofs; + void *kaddr; ofs = 0; if (file_ofs < init_size) ofs = init_size - file_ofs; - kaddr = kmap_atomic(page, KM_BIO_SRC_IRQ); + local_irq_save(flags); + kaddr = kmap_atomic(page); memset(kaddr + bh_offset(bh) + ofs, 0, bh->b_size - ofs); - kunmap_atomic(kaddr, KM_BIO_SRC_IRQ); flush_dcache_page(page); + kunmap_atomic(kaddr); + local_irq_restore(flags); } } else { clear_buffer_uptodate(bh); @@ -142,11 +146,13 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate) recs = PAGE_CACHE_SIZE / rec_size; /* Should have been verified before we got here... */ BUG_ON(!recs); - kaddr = kmap_atomic(page, KM_BIO_SRC_IRQ); + local_irq_save(flags); + kaddr = kmap_atomic(page); for (i = 0; i < recs; i++) post_read_mst_fixup((NTFS_RECORD*)(kaddr + i * rec_size), rec_size); - kunmap_atomic(kaddr, KM_BIO_SRC_IRQ); + kunmap_atomic(kaddr); + local_irq_restore(flags); flush_dcache_page(page); if (likely(page_uptodate && !PageError(page))) SetPageUptodate(page); @@ -200,8 +206,8 @@ static int ntfs_read_block(struct page *page) /* $MFT/$DATA must have its complete runlist in memory at all times. */ BUG_ON(!ni->runlist.rl && !ni->mft_no && !NInoAttr(ni)); - blocksize_bits = VFS_I(ni)->i_blkbits; - blocksize = 1 << blocksize_bits; + blocksize = vol->sb->s_blocksize; + blocksize_bits = vol->sb->s_blocksize_bits; if (!page_has_buffers(page)) { create_empty_buffers(page, blocksize, 0); @@ -240,8 +246,7 @@ static int ntfs_read_block(struct page *page) rl = NULL; nr = i = 0; do { - u8 *kaddr; - int err; + int err = 0; if (unlikely(buffer_uptodate(bh))) continue; @@ -249,11 +254,10 @@ static int ntfs_read_block(struct page *page) arr[nr++] = bh; continue; } - err = 0; bh->b_bdev = vol->sb->s_bdev; /* Is the block within the allowed limits? */ if (iblock < lblock) { - BOOL is_retry = FALSE; + bool is_retry = false; /* Convert iblock into corresponding vcn and offset. */ vcn = (VCN)iblock << blocksize_bits >> @@ -291,7 +295,7 @@ lock_retry_remap: goto handle_hole; /* If first try and runlist unmapped, map and retry. */ if (!is_retry && lcn == LCN_RL_NOT_MAPPED) { - is_retry = TRUE; + is_retry = true; /* * Attempt to map runlist, dropping lock for * the duration. @@ -335,10 +339,7 @@ handle_hole: bh->b_blocknr = -1UL; clear_buffer_mapped(bh); handle_zblock: - kaddr = kmap_atomic(page, KM_USER0); - memset(kaddr + i * blocksize, 0, blocksize); - kunmap_atomic(kaddr, KM_USER0); - flush_dcache_page(page); + zero_user(page, i * blocksize, blocksize); if (likely(!err)) set_buffer_uptodate(bh); } while (i++, iblock++, (bh = bh->b_this_page) != head); @@ -400,7 +401,7 @@ static int ntfs_readpage(struct file *file, struct page *page) loff_t i_size; struct inode *vi; ntfs_inode *ni, *base_ni; - u8 *kaddr; + u8 *addr; ntfs_attr_search_ctx *ctx; MFT_RECORD *mrec; unsigned long flags; @@ -409,6 +410,15 @@ static int ntfs_readpage(struct file *file, struct page *page) retry_readpage: BUG_ON(!PageLocked(page)); + vi = page->mapping->host; + i_size = i_size_read(vi); + /* Is the page fully outside i_size? (truncate in progress) */ + if (unlikely(page->index >= (i_size + PAGE_CACHE_SIZE - 1) >> + PAGE_CACHE_SHIFT)) { + zero_user(page, 0, PAGE_CACHE_SIZE); + ntfs_debug("Read outside i_size - truncated?"); + goto done; + } /* * This can potentially happen because we clear PageUptodate() during * ntfs_writepage() of MstProtected() attributes. @@ -417,7 +427,6 @@ retry_readpage: unlock_page(page); return 0; } - vi = page->mapping->host; ni = NTFS_I(vi); /* * Only $DATA attributes can be encrypted and only unnamed $DATA @@ -455,10 +464,7 @@ retry_readpage: * ok to ignore the compressed flag here. */ if (unlikely(page->index > 0)) { - kaddr = kmap_atomic(page, KM_USER0); - memset(kaddr, 0, PAGE_CACHE_SIZE); - flush_dcache_page(page); - kunmap_atomic(kaddr, KM_USER0); + zero_user(page, 0, PAGE_CACHE_SIZE); goto done; } if (!NInoAttr(ni)) @@ -498,15 +504,15 @@ retry_readpage: /* Race with shrinking truncate. */ attr_len = i_size; } - kaddr = kmap_atomic(page, KM_USER0); + addr = kmap_atomic(page); /* Copy the data to the page. */ - memcpy(kaddr, (u8*)ctx->attr + + memcpy(addr, (u8*)ctx->attr + le16_to_cpu(ctx->attr->data.resident.value_offset), attr_len); /* Zero the remainder of the page. */ - memset(kaddr + attr_len, 0, PAGE_CACHE_SIZE - attr_len); + memset(addr + attr_len, 0, PAGE_CACHE_SIZE - attr_len); flush_dcache_page(page); - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(addr); put_unm_err_out: ntfs_attr_put_search_ctx(ctx); unm_err_out: @@ -557,7 +563,7 @@ static int ntfs_write_block(struct page *page, struct writeback_control *wbc) unsigned long flags; unsigned int blocksize, vcn_ofs; int err; - BOOL need_end_writeback; + bool need_end_writeback; unsigned char blocksize_bits; vi = page->mapping->host; @@ -569,10 +575,8 @@ static int ntfs_write_block(struct page *page, struct writeback_control *wbc) BUG_ON(!NInoNonResident(ni)); BUG_ON(NInoMstProtected(ni)); - - blocksize_bits = vi->i_blkbits; - blocksize = 1 << blocksize_bits; - + blocksize = vol->sb->s_blocksize; + blocksize_bits = vol->sb->s_blocksize_bits; if (!page_has_buffers(page)) { BUG_ON(!PageUptodate(page)); create_empty_buffers(page, blocksize, @@ -627,7 +631,7 @@ static int ntfs_write_block(struct page *page, struct writeback_control *wbc) rl = NULL; err = 0; do { - BOOL is_retry = FALSE; + bool is_retry = false; if (unlikely(block >= dblock)) { /* @@ -742,14 +746,14 @@ lock_retry_remap: unsigned long *bpos, *bend; /* Check if the buffer is zero. */ - kaddr = kmap_atomic(page, KM_USER0); + kaddr = kmap_atomic(page); bpos = (unsigned long *)(kaddr + bh_offset(bh)); bend = (unsigned long *)((u8*)bpos + blocksize); do { if (unlikely(*bpos)) break; } while (likely(++bpos < bend)); - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); if (bpos == bend) { /* * Buffer is zero and sparse, no need to write @@ -769,7 +773,7 @@ lock_retry_remap: } /* If first try and runlist unmapped, map and retry. */ if (!is_retry && lcn == LCN_RL_NOT_MAPPED) { - is_retry = TRUE; + is_retry = true; /* * Attempt to map runlist, dropping lock for * the duration. @@ -787,14 +791,9 @@ lock_retry_remap: * uptodate so it can get discarded by the VM. */ if (err == -ENOENT || lcn == LCN_ENOENT) { - u8 *kaddr; - bh->b_blocknr = -1; clear_buffer_dirty(bh); - kaddr = kmap_atomic(page, KM_USER0); - memset(kaddr + bh_offset(bh), 0, blocksize); - kunmap_atomic(kaddr, KM_USER0); - flush_dcache_page(page); + zero_user(page, bh_offset(bh), blocksize); set_buffer_uptodate(bh); err = 0; continue; @@ -875,12 +874,12 @@ lock_retry_remap: set_page_writeback(page); /* Keeps try_to_free_buffers() away. */ /* Submit the prepared buffers for i/o. */ - need_end_writeback = TRUE; + need_end_writeback = true; do { struct buffer_head *next = bh->b_this_page; if (buffer_async_write(bh)) { submit_bh(WRITE, bh); - need_end_writeback = FALSE; + need_end_writeback = false; } bh = next; } while (bh != head); @@ -933,7 +932,7 @@ static int ntfs_write_mst_block(struct page *page, runlist_element *rl; int i, nr_locked_nis, nr_recs, nr_bhs, max_bhs, bhs_per_rec, err, err2; unsigned bh_size, rec_size_bits; - BOOL sync, is_mft, page_is_dirty, rec_is_dirty; + bool sync, is_mft, page_is_dirty, rec_is_dirty; unsigned char bh_size_bits; ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, page index " @@ -949,8 +948,8 @@ static int ntfs_write_mst_block(struct page *page, */ BUG_ON(!(is_mft || S_ISDIR(vi->i_mode) || (NInoAttr(ni) && ni->type == AT_INDEX_ALLOCATION))); - bh_size_bits = vi->i_blkbits; - bh_size = 1 << bh_size_bits; + bh_size = vol->sb->s_blocksize; + bh_size_bits = vol->sb->s_blocksize_bits; max_bhs = PAGE_CACHE_SIZE / bh_size; BUG_ON(!max_bhs); BUG_ON(max_bhs > MAX_BUF_PER_PAGE); @@ -976,10 +975,10 @@ static int ntfs_write_mst_block(struct page *page, rl = NULL; err = err2 = nr_bhs = nr_recs = nr_locked_nis = 0; - page_is_dirty = rec_is_dirty = FALSE; + page_is_dirty = rec_is_dirty = false; rec_start_bh = NULL; do { - BOOL is_retry = FALSE; + bool is_retry = false; if (likely(block < rec_block)) { if (unlikely(block >= dblock)) { @@ -1010,10 +1009,10 @@ static int ntfs_write_mst_block(struct page *page, } if (!buffer_dirty(bh)) { /* Clean records are not written out. */ - rec_is_dirty = FALSE; + rec_is_dirty = false; continue; } - rec_is_dirty = TRUE; + rec_is_dirty = true; rec_start_bh = bh; } /* Need to map the buffer if it is not mapped already. */ @@ -1054,7 +1053,7 @@ lock_retry_remap: */ if (!is_mft && !is_retry && lcn == LCN_RL_NOT_MAPPED) { - is_retry = TRUE; + is_retry = true; /* * Attempt to map runlist, dropping * lock for the duration. @@ -1064,7 +1063,7 @@ lock_retry_remap: if (likely(!err2)) goto lock_retry_remap; if (err2 == -ENOMEM) - page_is_dirty = TRUE; + page_is_dirty = true; lcn = err2; } else { err2 = -EIO; @@ -1146,7 +1145,7 @@ lock_retry_remap: * means we need to redirty the page before * returning. */ - page_is_dirty = TRUE; + page_is_dirty = true; /* * Remove the buffers in this mft record from * the list of buffers to write. @@ -1196,7 +1195,7 @@ lock_retry_remap: tbh = bhs[i]; if (!tbh) continue; - if (unlikely(test_set_buffer_locked(tbh))) + if (!trylock_buffer(tbh)) BUG(); /* The buffer dirty state is now irrelevant, just clean it. */ clear_buffer_dirty(tbh); @@ -1279,18 +1278,18 @@ unm_done: tni = locked_nis[nr_locked_nis]; /* Get the base inode. */ - down(&tni->extent_lock); + mutex_lock(&tni->extent_lock); if (tni->nr_extents >= 0) base_tni = tni; else { base_tni = tni->ext.base_ntfs_ino; BUG_ON(!base_tni); } - up(&tni->extent_lock); + mutex_unlock(&tni->extent_lock); ntfs_debug("Unlocking %s inode 0x%lx.", tni == base_tni ? "base" : "extent", tni->mft_no); - up(&tni->mrec_lock); + mutex_unlock(&tni->mrec_lock); atomic_dec(&tni->count); iput(VFS_I(base_tni)); } @@ -1357,7 +1356,7 @@ static int ntfs_writepage(struct page *page, struct writeback_control *wbc) loff_t i_size; struct inode *vi = page->mapping->host; ntfs_inode *base_ni = NULL, *ni = NTFS_I(vi); - char *kaddr; + char *addr; ntfs_attr_search_ctx *ctx = NULL; MFT_RECORD *m = NULL; u32 attr_len; @@ -1373,7 +1372,7 @@ retry_writepage: * The page may have dirty, unmapped buffers. Make them * freeable here, so the page does not leak. */ - block_invalidatepage(page, 0); + block_invalidatepage(page, 0, PAGE_CACHE_SIZE); unlock_page(page); ntfs_debug("Write outside i_size - truncated?"); return 0; @@ -1419,10 +1418,7 @@ retry_writepage: if (page->index >= (i_size >> PAGE_CACHE_SHIFT)) { /* The page straddles i_size. */ unsigned int ofs = i_size & ~PAGE_CACHE_MASK; - kaddr = kmap_atomic(page, KM_USER0); - memset(kaddr + ofs, 0, PAGE_CACHE_SIZE - ofs); - kunmap_atomic(kaddr, KM_USER0); - flush_dcache_page(page); + zero_user_segment(page, ofs, PAGE_CACHE_SIZE); } /* Handle mst protected attributes. */ if (NInoMstProtected(ni)) @@ -1499,14 +1495,14 @@ retry_writepage: /* Shrinking cannot fail. */ BUG_ON(err); } - kaddr = kmap_atomic(page, KM_USER0); + addr = kmap_atomic(page); /* Copy the data from the page to the mft record. */ memcpy((u8*)ctx->attr + le16_to_cpu(ctx->attr->data.resident.value_offset), - kaddr, attr_len); + addr, attr_len); /* Zero out of bounds area in the page cache page. */ - memset(kaddr + attr_len, 0, PAGE_CACHE_SIZE - attr_len); - kunmap_atomic(kaddr, KM_USER0); + memset(addr + attr_len, 0, PAGE_CACHE_SIZE - attr_len); + kunmap_atomic(addr); flush_dcache_page(page); flush_dcache_mft_record_page(ctx->ntfs_ino); /* We are done with the page. */ @@ -1531,7 +1527,6 @@ err_out: "error %i.", err); SetPageError(page); NVolSetErrors(ni->vol); - make_bad_inode(vi); } unlock_page(page); if (ctx) @@ -1546,29 +1541,33 @@ err_out: /** * ntfs_aops - general address space operations for inodes and attributes */ -struct address_space_operations ntfs_aops = { +const struct address_space_operations ntfs_aops = { .readpage = ntfs_readpage, /* Fill page with data. */ - .sync_page = block_sync_page, /* Currently, just unplugs the - disk request queue. */ #ifdef NTFS_RW .writepage = ntfs_writepage, /* Write dirty page to disk. */ #endif /* NTFS_RW */ + .migratepage = buffer_migrate_page, /* Move a page cache page from + one physical page to an + other. */ + .error_remove_page = generic_error_remove_page, }; /** * ntfs_mst_aops - general address space operations for mst protecteed inodes * and attributes */ -struct address_space_operations ntfs_mst_aops = { +const struct address_space_operations ntfs_mst_aops = { .readpage = ntfs_readpage, /* Fill page with data. */ - .sync_page = block_sync_page, /* Currently, just unplugs the - disk request queue. */ #ifdef NTFS_RW .writepage = ntfs_writepage, /* Write dirty page to disk. */ .set_page_dirty = __set_page_dirty_nobuffers, /* Set the page dirty without touching the buffers belonging to the page. */ #endif /* NTFS_RW */ + .migratepage = buffer_migrate_page, /* Move a page cache page from + one physical page to an + other. */ + .error_remove_page = generic_error_remove_page, }; #ifdef NTFS_RW @@ -1596,7 +1595,7 @@ void mark_ntfs_record_dirty(struct page *page, const unsigned int ofs) { BUG_ON(!PageUptodate(page)); end = ofs + ni->itype.index.block_size; - bh_size = 1 << VFS_I(ni)->i_blkbits; + bh_size = VFS_I(ni)->i_sb->s_blocksize; spin_lock(&mapping->private_lock); if (unlikely(!page_has_buffers(page))) { spin_unlock(&mapping->private_lock); |
