aboutsummaryrefslogtreecommitdiff
path: root/fs/ext4/extents.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/ext4/extents.c')
-rw-r--r--fs/ext4/extents.c1168
1 files changed, 883 insertions, 285 deletions
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 57cf568a98a..61fa9e1614a 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -42,7 +42,6 @@
#include <asm/uaccess.h>
#include <linux/fiemap.h>
#include "ext4_jbd2.h"
-#include "ext4_extents.h"
#include <trace/events/ext4.h>
@@ -96,13 +95,17 @@ static int ext4_ext_get_access(handle_t *handle, struct inode *inode,
* - ENOMEM
* - EIO
*/
-static int ext4_ext_dirty(handle_t *handle, struct inode *inode,
- struct ext4_ext_path *path)
+#define ext4_ext_dirty(handle, inode, path) \
+ __ext4_ext_dirty(__func__, __LINE__, (handle), (inode), (path))
+static int __ext4_ext_dirty(const char *where, unsigned int line,
+ handle_t *handle, struct inode *inode,
+ struct ext4_ext_path *path)
{
int err;
if (path->p_bh) {
/* path points to block */
- err = ext4_handle_dirty_metadata(handle, inode, path->p_bh);
+ err = __ext4_handle_dirty_metadata(where, line, handle,
+ inode, path->p_bh);
} else {
/* path points to leaf/index in inode body */
err = ext4_mark_inode_dirty(handle, inode);
@@ -114,11 +117,9 @@ static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode,
struct ext4_ext_path *path,
ext4_lblk_t block)
{
- int depth;
-
if (path) {
+ int depth = path->p_depth;
struct ext4_extent *ex;
- depth = path->p_depth;
/*
* Try to predict block placement assuming that we are
@@ -180,12 +181,10 @@ static inline int ext4_ext_space_block(struct inode *inode, int check)
size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
/ sizeof(struct ext4_extent);
- if (!check) {
#ifdef AGGRESSIVE_TEST
- if (size > 6)
- size = 6;
+ if (!check && size > 6)
+ size = 6;
#endif
- }
return size;
}
@@ -195,12 +194,10 @@ static inline int ext4_ext_space_block_idx(struct inode *inode, int check)
size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
/ sizeof(struct ext4_extent_idx);
- if (!check) {
#ifdef AGGRESSIVE_TEST
- if (size > 5)
- size = 5;
+ if (!check && size > 5)
+ size = 5;
#endif
- }
return size;
}
@@ -211,12 +208,10 @@ static inline int ext4_ext_space_root(struct inode *inode, int check)
size = sizeof(EXT4_I(inode)->i_data);
size -= sizeof(struct ext4_extent_header);
size /= sizeof(struct ext4_extent);
- if (!check) {
#ifdef AGGRESSIVE_TEST
- if (size > 3)
- size = 3;
+ if (!check && size > 3)
+ size = 3;
#endif
- }
return size;
}
@@ -227,12 +222,10 @@ static inline int ext4_ext_space_root_idx(struct inode *inode, int check)
size = sizeof(EXT4_I(inode)->i_data);
size -= sizeof(struct ext4_extent_header);
size /= sizeof(struct ext4_extent_idx);
- if (!check) {
#ifdef AGGRESSIVE_TEST
- if (size > 4)
- size = 4;
+ if (!check && size > 4)
+ size = 4;
#endif
- }
return size;
}
@@ -244,7 +237,7 @@ static inline int ext4_ext_space_root_idx(struct inode *inode, int check)
int ext4_ext_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock)
{
struct ext4_inode_info *ei = EXT4_I(inode);
- int idxs, num = 0;
+ int idxs;
idxs = ((inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
/ sizeof(struct ext4_extent_idx));
@@ -259,6 +252,8 @@ int ext4_ext_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock)
*/
if (ei->i_da_metadata_calc_len &&
ei->i_da_metadata_calc_last_lblock+1 == lblock) {
+ int num = 0;
+
if ((ei->i_da_metadata_calc_len % idxs) == 0)
num++;
if ((ei->i_da_metadata_calc_len % (idxs*idxs)) == 0)
@@ -321,8 +316,6 @@ static int ext4_valid_extent_entries(struct inode *inode,
struct ext4_extent_header *eh,
int depth)
{
- struct ext4_extent *ext;
- struct ext4_extent_idx *ext_idx;
unsigned short entries;
if (eh->eh_entries == 0)
return 1;
@@ -331,7 +324,7 @@ static int ext4_valid_extent_entries(struct inode *inode,
if (depth == 0) {
/* leaf entries */
- ext = EXT_FIRST_EXTENT(eh);
+ struct ext4_extent *ext = EXT_FIRST_EXTENT(eh);
while (entries) {
if (!ext4_valid_extent(inode, ext))
return 0;
@@ -339,7 +332,7 @@ static int ext4_valid_extent_entries(struct inode *inode,
entries--;
}
} else {
- ext_idx = EXT_FIRST_INDEX(eh);
+ struct ext4_extent_idx *ext_idx = EXT_FIRST_INDEX(eh);
while (entries) {
if (!ext4_valid_extent_idx(inode, ext_idx))
return 0;
@@ -751,31 +744,30 @@ static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
return -EIO;
}
- len = EXT_MAX_INDEX(curp->p_hdr) - curp->p_idx;
if (logical > le32_to_cpu(curp->p_idx->ei_block)) {
/* insert after */
- if (curp->p_idx != EXT_LAST_INDEX(curp->p_hdr)) {
- len = (len - 1) * sizeof(struct ext4_extent_idx);
- len = len < 0 ? 0 : len;
- ext_debug("insert new index %d after: %llu. "
- "move %d from 0x%p to 0x%p\n",
- logical, ptr, len,
- (curp->p_idx + 1), (curp->p_idx + 2));
- memmove(curp->p_idx + 2, curp->p_idx + 1, len);
- }
+ ext_debug("insert new index %d after: %llu\n", logical, ptr);
ix = curp->p_idx + 1;
} else {
/* insert before */
- len = len * sizeof(struct ext4_extent_idx);
- len = len < 0 ? 0 : len;
- ext_debug("insert new index %d before: %llu. "
- "move %d from 0x%p to 0x%p\n",
- logical, ptr, len,
- curp->p_idx, (curp->p_idx + 1));
- memmove(curp->p_idx + 1, curp->p_idx, len);
+ ext_debug("insert new index %d before: %llu\n", logical, ptr);
ix = curp->p_idx;
}
+ len = EXT_LAST_INDEX(curp->p_hdr) - ix + 1;
+ BUG_ON(len < 0);
+ if (len > 0) {
+ ext_debug("insert new index %d: "
+ "move %d indices from 0x%p to 0x%p\n",
+ logical, len, ix, ix + 1);
+ memmove(ix + 1, ix, len * sizeof(struct ext4_extent_idx));
+ }
+
+ if (unlikely(ix > EXT_MAX_INDEX(curp->p_hdr))) {
+ EXT4_ERROR_INODE(inode, "ix > EXT_MAX_INDEX!");
+ return -EIO;
+ }
+
ix->ei_block = cpu_to_le32(logical);
ext4_idx_store_pblock(ix, ptr);
le16_add_cpu(&curp->p_hdr->eh_entries, 1);
@@ -1042,16 +1034,14 @@ cleanup:
*/
static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
unsigned int flags,
- struct ext4_ext_path *path,
struct ext4_extent *newext)
{
- struct ext4_ext_path *curp = path;
struct ext4_extent_header *neh;
struct buffer_head *bh;
ext4_fsblk_t newblock;
int err = 0;
- newblock = ext4_ext_new_meta_block(handle, inode, path,
+ newblock = ext4_ext_new_meta_block(handle, inode, NULL,
newext, &err, flags);
if (newblock == 0)
return err;
@@ -1071,7 +1061,8 @@ static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
}
/* move top-level index/leaf into new block */
- memmove(bh->b_data, curp->p_hdr, sizeof(EXT4_I(inode)->i_data));
+ memmove(bh->b_data, EXT4_I(inode)->i_data,
+ sizeof(EXT4_I(inode)->i_data));
/* set size of new block */
neh = ext_block_hdr(bh);
@@ -1089,32 +1080,23 @@ static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
if (err)
goto out;
- /* create index in new top-level index: num,max,pointer */
- err = ext4_ext_get_access(handle, inode, curp);
- if (err)
- goto out;
-
- curp->p_hdr->eh_magic = EXT4_EXT_MAGIC;
- curp->p_hdr->eh_max = cpu_to_le16(ext4_ext_space_root_idx(inode, 0));
- curp->p_hdr->eh_entries = cpu_to_le16(1);
- curp->p_idx = EXT_FIRST_INDEX(curp->p_hdr);
-
- if (path[0].p_hdr->eh_depth)
- curp->p_idx->ei_block =
- EXT_FIRST_INDEX(path[0].p_hdr)->ei_block;
- else
- curp->p_idx->ei_block =
- EXT_FIRST_EXTENT(path[0].p_hdr)->ee_block;
- ext4_idx_store_pblock(curp->p_idx, newblock);
-
+ /* Update top-level index: num,max,pointer */
neh = ext_inode_hdr(inode);
+ neh->eh_entries = cpu_to_le16(1);
+ ext4_idx_store_pblock(EXT_FIRST_INDEX(neh), newblock);
+ if (neh->eh_depth == 0) {
+ /* Root extent block becomes index block */
+ neh->eh_max = cpu_to_le16(ext4_ext_space_root_idx(inode, 0));
+ EXT_FIRST_INDEX(neh)->ei_block =
+ EXT_FIRST_EXTENT(neh)->ee_block;
+ }
ext_debug("new root: num %d(%d), lblock %d, ptr %llu\n",
le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max),
le32_to_cpu(EXT_FIRST_INDEX(neh)->ei_block),
ext4_idx_pblock(EXT_FIRST_INDEX(neh)));
- neh->eh_depth = cpu_to_le16(path->p_depth + 1);
- err = ext4_ext_dirty(handle, inode, curp);
+ neh->eh_depth = cpu_to_le16(neh->eh_depth + 1);
+ ext4_mark_inode_dirty(handle, inode);
out:
brelse(bh);
@@ -1162,8 +1144,7 @@ repeat:
err = PTR_ERR(path);
} else {
/* tree is full, time to grow in depth */
- err = ext4_ext_grow_indepth(handle, inode, flags,
- path, newext);
+ err = ext4_ext_grow_indepth(handle, inode, flags, newext);
if (err)
goto out;
@@ -1235,9 +1216,9 @@ static int ext4_ext_search_left(struct inode *inode,
if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) {
EXT4_ERROR_INODE(inode,
"ix (%d) != EXT_FIRST_INDEX (%d) (depth %d)!",
- ix != NULL ? ix->ei_block : 0,
+ ix != NULL ? le32_to_cpu(ix->ei_block) : 0,
EXT_FIRST_INDEX(path[depth].p_hdr) != NULL ?
- EXT_FIRST_INDEX(path[depth].p_hdr)->ei_block : 0,
+ le32_to_cpu(EXT_FIRST_INDEX(path[depth].p_hdr)->ei_block) : 0,
depth);
return -EIO;
}
@@ -1260,13 +1241,14 @@ static int ext4_ext_search_left(struct inode *inode,
/*
* search the closest allocated block to the right for *logical
* and returns it at @logical + it's physical address at @phys
- * if *logical is the smallest allocated block, the function
+ * if *logical is the largest allocated block, the function
* returns 0 at @phys
* return value contains 0 (success) or error code
*/
static int ext4_ext_search_right(struct inode *inode,
struct ext4_ext_path *path,
- ext4_lblk_t *logical, ext4_fsblk_t *phys)
+ ext4_lblk_t *logical, ext4_fsblk_t *phys,
+ struct ext4_extent **ret_ex)
{
struct buffer_head *bh = NULL;
struct ext4_extent_header *eh;
@@ -1308,9 +1290,7 @@ static int ext4_ext_search_right(struct inode *inode,
return -EIO;
}
}
- *logical = le32_to_cpu(ex->ee_block);
- *phys = ext4_ext_pblock(ex);
- return 0;
+ goto found_extent;
}
if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) {
@@ -1323,9 +1303,7 @@ static int ext4_ext_search_right(struct inode *inode,
if (ex != EXT_LAST_EXTENT(path[depth].p_hdr)) {
/* next allocated block in this leaf */
ex++;
- *logical = le32_to_cpu(ex->ee_block);
- *phys = ext4_ext_pblock(ex);
- return 0;
+ goto found_extent;
}
/* go up and search for index to the right */
@@ -1368,9 +1346,12 @@ got_index:
return -EIO;
}
ex = EXT_FIRST_EXTENT(eh);
+found_extent:
*logical = le32_to_cpu(ex->ee_block);
*phys = ext4_ext_pblock(ex);
- put_bh(bh);
+ *ret_ex = ex;
+ if (bh)
+ put_bh(bh);
return 0;
}
@@ -1395,7 +1376,8 @@ ext4_ext_next_allocated_block(struct ext4_ext_path *path)
while (depth >= 0) {
if (depth == path->p_depth) {
/* leaf */
- if (path[depth].p_ext !=
+ if (path[depth].p_ext &&
+ path[depth].p_ext !=
EXT_LAST_EXTENT(path[depth].p_hdr))
return le32_to_cpu(path[depth].p_ext[1].ee_block);
} else {
@@ -1623,7 +1605,8 @@ static int ext4_ext_try_to_merge(struct inode *inode,
* such that there will be no overlap, and then returns 1.
* If there is no overlap found, it returns 0.
*/
-static unsigned int ext4_ext_check_overlap(struct inode *inode,
+static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi,
+ struct inode *inode,
struct ext4_extent *newext,
struct ext4_ext_path *path)
{
@@ -1637,6 +1620,7 @@ static unsigned int ext4_ext_check_overlap(struct inode *inode,
if (!path[depth].p_ext)
goto out;
b2 = le32_to_cpu(path[depth].p_ext->ee_block);
+ b2 &= ~(sbi->s_cluster_ratio - 1);
/*
* get the next allocated block if the extent in the path
@@ -1646,6 +1630,7 @@ static unsigned int ext4_ext_check_overlap(struct inode *inode,
b2 = ext4_ext_next_allocated_block(path);
if (b2 == EXT_MAX_BLOCKS)
goto out;
+ b2 &= ~(sbi->s_cluster_ratio - 1);
}
/* check for wrap through zero on extent logical start block*/
@@ -1697,7 +1682,7 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
/* try to insert block into found extent and return */
if (ex && !(flag & EXT4_GET_BLOCKS_PRE_IO)
&& ext4_can_extents_be_merged(inode, ex, newext)) {
- ext_debug("append [%d]%d block to %d:[%d]%d (from %llu)\n",
+ ext_debug("append [%d]%d block to %u:[%d]%d (from %llu)\n",
ext4_ext_is_uninitialized(newext),
ext4_ext_get_actual_len(newext),
le32_to_cpu(ex->ee_block),
@@ -1735,7 +1720,7 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block))
next = ext4_ext_next_leaf_block(path);
if (next != EXT_MAX_BLOCKS) {
- ext_debug("next leaf block - %d\n", next);
+ ext_debug("next leaf block - %u\n", next);
BUG_ON(npath != NULL);
npath = ext4_ext_find_extent(inode, next, NULL);
if (IS_ERR(npath))
@@ -1773,46 +1758,51 @@ has_space:
if (!nearex) {
/* there is no extent in this leaf, create first one */
- ext_debug("first extent in the leaf: %d:%llu:[%d]%d\n",
+ ext_debug("first extent in the leaf: %u:%llu:[%d]%d\n",
le32_to_cpu(newext->ee_block),
ext4_ext_pblock(newext),
ext4_ext_is_uninitialized(newext),
ext4_ext_get_actual_len(newext));
- path[depth].p_ext = EXT_FIRST_EXTENT(eh);
- } else if (le32_to_cpu(newext->ee_block)
+ nearex = EXT_FIRST_EXTENT(eh);
+ } else {
+ if (le32_to_cpu(newext->ee_block)
> le32_to_cpu(nearex->ee_block)) {
-/* BUG_ON(newext->ee_block == nearex->ee_block); */
- if (nearex != EXT_LAST_EXTENT(eh)) {
- len = EXT_MAX_EXTENT(eh) - nearex;
- len = (len - 1) * sizeof(struct ext4_extent);
- len = len < 0 ? 0 : len;
- ext_debug("insert %d:%llu:[%d]%d after: nearest 0x%p, "
- "move %d from 0x%p to 0x%p\n",
+ /* Insert after */
+ ext_debug("insert %u:%llu:[%d]%d before: "
+ "nearest %p\n",
le32_to_cpu(newext->ee_block),
ext4_ext_pblock(newext),
ext4_ext_is_uninitialized(newext),
ext4_ext_get_actual_len(newext),
- nearex, len, nearex + 1, nearex + 2);
- memmove(nearex + 2, nearex + 1, len);
+ nearex);
+ nearex++;
+ } else {
+ /* Insert before */
+ BUG_ON(newext->ee_block == nearex->ee_block);
+ ext_debug("insert %u:%llu:[%d]%d after: "
+ "nearest %p\n",
+ le32_to_cpu(newext->ee_block),
+ ext4_ext_pblock(newext),
+ ext4_ext_is_uninitialized(newext),
+ ext4_ext_get_actual_len(newext),
+ nearex);
+ }
+ len = EXT_LAST_EXTENT(eh) - nearex + 1;
+ if (len > 0) {
+ ext_debug("insert %u:%llu:[%d]%d: "
+ "move %d extents from 0x%p to 0x%p\n",
+ le32_to_cpu(newext->ee_block),
+ ext4_ext_pblock(newext),
+ ext4_ext_is_uninitialized(newext),
+ ext4_ext_get_actual_len(newext),
+ len, nearex, nearex + 1);
+ memmove(nearex + 1, nearex,
+ len * sizeof(struct ext4_extent));
}
- path[depth].p_ext = nearex + 1;
- } else {
- BUG_ON(newext->ee_block == nearex->ee_block);
- len = (EXT_MAX_EXTENT(eh) - nearex) * sizeof(struct ext4_extent);
- len = len < 0 ? 0 : len;
- ext_debug("insert %d:%llu:[%d]%d before: nearest 0x%p, "
- "move %d from 0x%p to 0x%p\n",
- le32_to_cpu(newext->ee_block),
- ext4_ext_pblock(newext),
- ext4_ext_is_uninitialized(newext),
- ext4_ext_get_actual_len(newext),
- nearex, len, nearex, nearex + 1);
- memmove(nearex + 1, nearex, len);
- path[depth].p_ext = nearex;
}
le16_add_cpu(&eh->eh_entries, 1);
- nearex = path[depth].p_ext;
+ path[depth].p_ext = nearex;
nearex->ee_block = newext->ee_block;
ext4_ext_store_pblock(nearex, ext4_ext_pblock(newext));
nearex->ee_len = newext->ee_len;
@@ -1962,6 +1952,7 @@ ext4_ext_put_in_cache(struct inode *inode, ext4_lblk_t block,
struct ext4_ext_cache *cex;
BUG_ON(len == 0);
spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
+ trace_ext4_ext_put_in_cache(inode, block, len, start);
cex = &EXT4_I(inode)->i_cached_extent;
cex->ec_block = block;
cex->ec_len = len;
@@ -2063,6 +2054,7 @@ errout:
sbi->extent_cache_misses++;
else
sbi->extent_cache_hits++;
+ trace_ext4_ext_in_cache(inode, block, ret);
spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
return ret;
}
@@ -2130,6 +2122,8 @@ static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
if (err)
return err;
ext_debug("index is empty, remove it, free block %llu\n", leaf);
+ trace_ext4_ext_rm_idx(inode, leaf);
+
ext4_free_blocks(handle, inode, NULL, leaf, 1,
EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
return err;
@@ -2158,7 +2152,7 @@ int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks,
* need to account for leaf block credit
*
* bitmaps and block group descriptor blocks
- * and other metadat blocks still need to be
+ * and other metadata blocks still need to be
* accounted.
*/
/* 1 bitmap, 1 block group descriptor */
@@ -2195,14 +2189,40 @@ int ext4_ext_index_trans_blocks(struct inode *inode, int nrblocks, int chunk)
}
static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
- struct ext4_extent *ex,
- ext4_lblk_t from, ext4_lblk_t to)
+ struct ext4_extent *ex,
+ ext4_fsblk_t *partial_cluster,
+ ext4_lblk_t from, ext4_lblk_t to)
{
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
unsigned short ee_len = ext4_ext_get_actual_len(ex);
+ ext4_fsblk_t pblk;
int flags = EXT4_FREE_BLOCKS_FORGET;
if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
flags |= EXT4_FREE_BLOCKS_METADATA;
+ /*
+ * For bigalloc file systems, we never free a partial cluster
+ * at the beginning of the extent. Instead, we make a note
+ * that we tried freeing the cluster, and check to see if we
+ * need to free it on a subsequent call to ext4_remove_blocks,
+ * or at the end of the ext4_truncate() operation.
+ */
+ flags |= EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER;
+
+ trace_ext4_remove_blocks(inode, ex, from, to, *partial_cluster);
+ /*
+ * If we have a partial cluster, and it's different from the
+ * cluster of the last block, we need to explicitly free the
+ * partial cluster here.
+ */
+ pblk = ext4_ext_pblock(ex) + ee_len - 1;
+ if (*partial_cluster && (EXT4_B2C(sbi, pblk) != *partial_cluster)) {
+ ext4_free_blocks(handle, inode, NULL,
+ EXT4_C2B(sbi, *partial_cluster),
+ sbi->s_cluster_ratio, flags);
+ *partial_cluster = 0;
+ }
+
#ifdef EXTENTS_STATS
{
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
@@ -2222,12 +2242,24 @@ static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
&& to == le32_to_cpu(ex->ee_block) + ee_len - 1) {
/* tail removal */
ext4_lblk_t num;
- ext4_fsblk_t start;
num = le32_to_cpu(ex->ee_block) + ee_len - from;
- start = ext4_ext_pblock(ex) + ee_len - num;
- ext_debug("free last %u blocks starting %llu\n", num, start);
- ext4_free_blocks(handle, inode, NULL, start, num, flags);
+ pblk = ext4_ext_pblock(ex) + ee_len - num;
+ ext_debug("free last %u blocks starting %llu\n", num, pblk);
+ ext4_free_blocks(handle, inode, NULL, pblk, num, flags);
+ /*
+ * If the block range to be freed didn't start at the
+ * beginning of a cluster, and we removed the entire
+ * extent, save the partial cluster here, since we
+ * might need to delete if we determine that the
+ * truncate operation has removed all of the blocks in
+ * the cluster.
+ */
+ if (pblk & (sbi->s_cluster_ratio - 1) &&
+ (ee_len == num))
+ *partial_cluster = EXT4_B2C(sbi, pblk);
+ else
+ *partial_cluster = 0;
} else if (from == le32_to_cpu(ex->ee_block)
&& to <= le32_to_cpu(ex->ee_block) + ee_len - 1) {
/* head removal */
@@ -2238,7 +2270,7 @@ static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
start = ext4_ext_pblock(ex);
ext_debug("free first %u blocks starting %llu\n", num, start);
- ext4_free_blocks(handle, inode, 0, start, num, flags);
+ ext4_free_blocks(handle, inode, NULL, start, num, flags);
} else {
printk(KERN_INFO "strange request: removal(2) "
@@ -2262,19 +2294,19 @@ static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
*/
static int
ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
- struct ext4_ext_path *path, ext4_lblk_t start,
- ext4_lblk_t end)
+ struct ext4_ext_path *path, ext4_fsblk_t *partial_cluster,
+ ext4_lblk_t start, ext4_lblk_t end)
{
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
int err = 0, correct_index = 0;
int depth = ext_depth(inode), credits;
struct ext4_extent_header *eh;
- ext4_lblk_t a, b, block;
+ ext4_lblk_t a, b;
unsigned num;
ext4_lblk_t ex_ee_block;
unsigned short ex_ee_len;
unsigned uninitialized = 0;
struct ext4_extent *ex;
- struct ext4_map_blocks map;
/* the header must be checked already in ext4_ext_remove_space() */
ext_debug("truncate since %u in leaf\n", start);
@@ -2291,6 +2323,8 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
ex_ee_block = le32_to_cpu(ex->ee_block);
ex_ee_len = ext4_ext_get_actual_len(ex);
+ trace_ext4_ext_rm_leaf(inode, start, ex, *partial_cluster);
+
while (ex >= EXT_FIRST_EXTENT(eh) &&
ex_ee_block + ex_ee_len > start) {
@@ -2315,86 +2349,18 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
ex_ee_block = le32_to_cpu(ex->ee_block);
ex_ee_len = ext4_ext_get_actual_len(ex);
continue;
- } else if (a != ex_ee_block &&
- b != ex_ee_block + ex_ee_len - 1) {
- /*
- * If this is a truncate, then this condition should
- * never happen because at least one of the end points
- * needs to be on the edge of the extent.
- */
- if (end == EXT_MAX_BLOCKS - 1) {
- ext_debug(" bad truncate %u:%u\n",
- start, end);
- block = 0;
- num = 0;
- err = -EIO;
- goto out;
- }
- /*
- * else this is a hole punch, so the extent needs to
- * be split since neither edge of the hole is on the
- * extent edge
- */
- else{
- map.m_pblk = ext4_ext_pblock(ex);
- map.m_lblk = ex_ee_block;
- map.m_len = b - ex_ee_block;
-
- err = ext4_split_extent(handle,
- inode, path, &map, 0,
- EXT4_GET_BLOCKS_PUNCH_OUT_EXT |
- EXT4_GET_BLOCKS_PRE_IO);
-
- if (err < 0)
- goto out;
-
- ex_ee_len = ext4_ext_get_actual_len(ex);
-
- b = ex_ee_block+ex_ee_len - 1 < end ?
- ex_ee_block+ex_ee_len - 1 : end;
-
- /* Then remove tail of this extent */
- block = ex_ee_block;
- num = a - block;
- }
+ } else if (b != ex_ee_block + ex_ee_len - 1) {
+ EXT4_ERROR_INODE(inode," bad truncate %u:%u\n",
+ start, end);
+ err = -EIO;
+ goto out;
} else if (a != ex_ee_block) {
/* remove tail of the extent */
- block = ex_ee_block;
- num = a - block;
- } else if (b != ex_ee_block + ex_ee_len - 1) {
- /* remove head of the extent */
- block = b;
- num = ex_ee_block + ex_ee_len - b;
-
- /*
- * If this is a truncate, this condition
- * should never happen
- */
- if (end == EXT_MAX_BLOCKS - 1) {
- ext_debug(" bad truncate %u:%u\n",
- start, end);
- err = -EIO;
- goto out;
- }
+ num = a - ex_ee_block;
} else {
/* remove whole extent: excellent! */
- block = ex_ee_block;
num = 0;
- if (a != ex_ee_block) {
- ext_debug(" bad truncate %u:%u\n",
- start, end);
- err = -EIO;
- goto out;
- }
-
- if (b != ex_ee_block + ex_ee_len - 1) {
- ext_debug(" bad truncate %u:%u\n",
- start, end);
- err = -EIO;
- goto out;
- }
}
-
/*
* 3 for leaf, sb, and inode plus 2 (bmap and group
* descriptor) for each block group; assume two block
@@ -2416,23 +2382,15 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
if (err)
goto out;
- err = ext4_remove_blocks(handle, inode, ex, a, b);
+ err = ext4_remove_blocks(handle, inode, ex, partial_cluster,
+ a, b);
if (err)
goto out;
- if (num == 0) {
+ if (num == 0)
/* this extent is removed; mark slot entirely unused */
ext4_ext_store_pblock(ex, 0);
- } else if (block != ex_ee_block) {
- /*
- * If this was a head removal, then we need to update
- * the physical block since it is now at a different
- * location
- */
- ext4_ext_store_pblock(ex, ext4_ext_pblock(ex) + (b-a));
- }
- ex->ee_block = cpu_to_le32(block);
ex->ee_len = cpu_to_le16(num);
/*
* Do not mark uninitialized if all the blocks in the
@@ -2440,11 +2398,6 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
*/
if (uninitialized && num)
ext4_ext_mark_uninitialized(ex);
-
- err = ext4_ext_dirty(handle, inode, path + depth);
- if (err)
- goto out;
-
/*
* If the extent was completely released,
* we need to remove it from the leaf
@@ -2464,9 +2417,14 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
sizeof(struct ext4_extent));
}
le16_add_cpu(&eh->eh_entries, -1);
- }
+ } else
+ *partial_cluster = 0;
- ext_debug("new extent: %u:%u:%llu\n", block, num,
+ err = ext4_ext_dirty(handle, inode, path + depth);
+ if (err)
+ goto out;
+
+ ext_debug("new extent: %u:%u:%llu\n", ex_ee_block, num,
ext4_ext_pblock(ex));
ex--;
ex_ee_block = le32_to_cpu(ex->ee_block);
@@ -2476,6 +2434,25 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
if (correct_index && eh->eh_entries)
err = ext4_ext_correct_indexes(handle, inode, path);
+ /*
+ * If there is still a entry in the leaf node, check to see if
+ * it references the partial cluster. This is the only place
+ * where it could; if it doesn't, we can free the cluster.
+ */
+ if (*partial_cluster && ex >= EXT_FIRST_EXTENT(eh) &&
+ (EXT4_B2C(sbi, ext4_ext_pblock(ex) + ex_ee_len - 1) !=
+ *partial_cluster)) {
+ int flags = EXT4_FREE_BLOCKS_FORGET;
+
+ if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
+ flags |= EXT4_FREE_BLOCKS_METADATA;
+
+ ext4_free_blocks(handle, inode, NULL,
+ EXT4_C2B(sbi, *partial_cluster),
+ sbi->s_cluster_ratio, flags);
+ *partial_cluster = 0;
+ }
+
/* if this leaf is free, then we should
* remove it from index block above */
if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL)
@@ -2511,6 +2488,7 @@ static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start)
struct super_block *sb = inode->i_sb;
int depth = ext_depth(inode);
struct ext4_ext_path *path;
+ ext4_fsblk_t partial_cluster = 0;
handle_t *handle;
int i, err;
@@ -2524,6 +2502,8 @@ static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start)
again:
ext4_ext_invalidate_cache(inode);
+ trace_ext4_ext_remove_space(inode, start, depth);
+
/*
* We start scanning from right side, freeing all the blocks
* after i_size and walking into the tree depth-wise.
@@ -2546,7 +2526,8 @@ again:
if (i == depth) {
/* this is leaf block */
err = ext4_ext_rm_leaf(handle, inode, path,
- start, EXT_MAX_BLOCKS - 1);
+ &partial_cluster, start,
+ EXT_MAX_BLOCKS - 1);
/* root level has p_bh == NULL, brelse() eats this */
brelse(path[i].p_bh);
path[i].p_bh = NULL;
@@ -2618,6 +2599,24 @@ again:
}
}
+ trace_ext4_ext_remove_space_done(inode, start, depth, partial_cluster,
+ path->p_hdr->eh_entries);
+
+ /* If we still have something in the partial cluster and we have removed
+ * even the first extent, then we should free the blocks in the partial
+ * cluster as well. */
+ if (partial_cluster && path->p_hdr->eh_entries == 0) {
+ int flags = EXT4_FREE_BLOCKS_FORGET;
+
+ if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
+ flags |= EXT4_FREE_BLOCKS_METADATA;
+
+ ext4_free_blocks(handle, inode, NULL,
+ EXT4_C2B(EXT4_SB(sb), partial_cluster),
+ EXT4_SB(sb)->s_cluster_ratio, flags);
+ partial_cluster = 0;
+ }
+
/* TODO: flexible tree reduction should be here */
if (path->p_hdr->eh_entries == 0) {
/*
@@ -2909,17 +2908,29 @@ out:
* a> There is no split required: Entire extent should be initialized
* b> Splits in two extents: Write is happening at either end of the extent
* c> Splits in three extents: Somone is writing in middle of the extent
+ *
+ * Pre-conditions:
+ * - The extent pointed to by 'path' is uninitialized.
+ * - The extent pointed to by 'path' contains a superset
+ * of the logical span [map->m_lblk, map->m_lblk + map->m_len).
+ *
+ * Post-conditions on success:
+ * - the returned value is the number of blocks beyond map->l_lblk
+ * that are allocated and initialized.
+ * It is guaranteed to be >= map->m_len.
*/
static int ext4_ext_convert_to_initialized(handle_t *handle,
struct inode *inode,
struct ext4_map_blocks *map,
struct ext4_ext_path *path)
{
+ struct ext4_extent_header *eh;
struct ext4_map_blocks split_map;
struct ext4_extent zero_ex;
struct ext4_extent *ex;
ext4_lblk_t ee_block, eof_block;
- unsigned int allocated, ee_len, depth;
+ unsigned int ee_len, depth;
+ int allocated;
int err = 0;
int split_flag = 0;
@@ -2933,11 +2944,93 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
eof_block = map->m_lblk + map->m_len;
depth = ext_depth(inode);
+ eh = path[depth].p_hdr;
ex = path[depth].p_ext;
ee_block = le32_to_cpu(ex->ee_block);
ee_len = ext4_ext_get_actual_len(ex);
allocated = ee_len - (map->m_lblk - ee_block);
+ trace_ext4_ext_convert_to_initialized_enter(inode, map, ex);
+
+ /* Pre-conditions */
+ BUG_ON(!ext4_ext_is_uninitialized(ex));
+ BUG_ON(!in_range(map->m_lblk, ee_block, ee_len));
+ BUG_ON(map->m_lblk + map->m_len > ee_block + ee_len);
+
+ /*
+ * Attempt to transfer newly initialized blocks from the currently
+ * uninitialized extent to its left neighbor. This is much cheaper
+ * than an insertion followed by a merge as those involve costly
+ * memmove() calls. This is the common case in steady state for
+ * workloads doing fallocate(FALLOC_FL_KEEP_SIZE) followed by append
+ * writes.
+ *
+ * Limitations of the current logic:
+ * - L1: we only deal with writes at the start of the extent.
+ * The approach could be extended to writes at the end
+ * of the extent but this scenario was deemed less common.
+ * - L2: we do not deal with writes covering the whole extent.
+ * This would require removing the extent if the transfer
+ * is possible.
+ * - L3: we only attempt to merge with an extent stored in the
+ * same extent tree node.
+ */
+ if ((map->m_lblk == ee_block) && /*L1*/
+ (map->m_len < ee_len) && /*L2*/
+ (ex > EXT_FIRST_EXTENT(eh))) { /*L3*/
+ struct ext4_extent *prev_ex;
+ ext4_lblk_t prev_lblk;
+ ext4_fsblk_t prev_pblk, ee_pblk;
+ unsigned int prev_len, write_len;
+
+ prev_ex = ex - 1;
+ prev_lblk = le32_to_cpu(prev_ex->ee_block);
+ prev_len = ext4_ext_get_actual_len(prev_ex);
+ prev_pblk = ext4_ext_pblock(prev_ex);
+ ee_pblk = ext4_ext_pblock(ex);
+ write_len = map->m_len;
+
+ /*
+ * A transfer of blocks from 'ex' to 'prev_ex' is allowed
+ * upon those conditions:
+ * - C1: prev_ex is initialized,
+ * - C2: prev_ex is logically abutting ex,
+ * - C3: prev_ex is physically abutting ex,
+ * - C4: prev_ex can receive the additional blocks without
+ * overflowing the (initialized) length limit.
+ */
+ if ((!ext4_ext_is_uninitialized(prev_ex)) && /*C1*/
+ ((prev_lblk + prev_len) == ee_block) && /*C2*/
+ ((prev_pblk + prev_len) == ee_pblk) && /*C3*/
+ (prev_len < (EXT_INIT_MAX_LEN - write_len))) { /*C4*/
+ err = ext4_ext_get_access(handle, inode, path + depth);
+ if (err)
+ goto out;
+
+ trace_ext4_ext_convert_to_initialized_fastpath(inode,
+ map, ex, prev_ex);
+
+ /* Shift the start of ex by 'write_len' blocks */
+ ex->ee_block = cpu_to_le32(ee_block + write_len);
+ ext4_ext_store_pblock(ex, ee_pblk + write_len);
+ ex->ee_len = cpu_to_le16(ee_len - write_len);
+ ext4_ext_mark_uninitialized(ex); /* Restore the flag */
+
+ /* Extend prev_ex by 'write_len' blocks */
+ prev_ex->ee_len = cpu_to_le16(prev_len + write_len);
+
+ /* Mark the block containing both extents as dirty */
+ ext4_ext_dirty(handle, inode, path + depth);
+
+ /* Update path to point to the right extent */
+ path[depth].p_ext = prev_ex;
+
+ /* Result: number of initialized blocks past m_lblk */
+ allocated = write_len;
+ goto out;
+ }
+ }
+
WARN_ON(map->m_lblk < ee_block);
/*
* It is safe to convert extent to initialized via explicit
@@ -3165,6 +3258,192 @@ static int check_eofblocks_fl(handle_t *handle, struct inode *inode,
return ext4_mark_inode_dirty(handle, inode);
}
+/**
+ * ext4_find_delalloc_range: find delayed allocated block in the given range.
+ *
+ * Goes through the buffer heads in the range [lblk_start, lblk_end] and returns
+ * whether there are any buffers marked for delayed allocation. It returns '1'
+ * on the first delalloc'ed buffer head found. If no buffer head in the given
+ * range is marked for delalloc, it returns 0.
+ * lblk_start should always be <= lblk_end.
+ * search_hint_reverse is to indicate that searching in reverse from lblk_end to
+ * lblk_start might be more efficient (i.e., we will likely hit the delalloc'ed
+ * block sooner). This is useful when blocks are truncated sequentially from
+ * lblk_start towards lblk_end.
+ */
+static int ext4_find_delalloc_range(struct inode *inode,
+ ext4_lblk_t lblk_start,
+ ext4_lblk_t lblk_end,
+ int search_hint_reverse)
+{
+ struct address_space *mapping = inode->i_mapping;
+ struct buffer_head *head, *bh = NULL;
+ struct page *page;
+ ext4_lblk_t i, pg_lblk;
+ pgoff_t index;
+
+ /* reverse search wont work if fs block size is less than page size */
+ if (inode->i_blkbits < PAGE_CACHE_SHIFT)
+ search_hint_reverse = 0;
+
+ if (search_hint_reverse)
+ i = lblk_end;
+ else
+ i = lblk_start;
+
+ index = i >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
+
+ while ((i >= lblk_start) && (i <= lblk_end)) {
+ page = find_get_page(mapping, index);
+ if (!page)
+ goto nextpage;
+
+ if (!page_has_buffers(page))
+ goto nextpage;
+
+ head = page_buffers(page);
+ if (!head)
+ goto nextpage;
+
+ bh = head;
+ pg_lblk = index << (PAGE_CACHE_SHIFT -
+ inode->i_blkbits);
+ do {
+ if (unlikely(pg_lblk < lblk_start)) {
+ /*
+ * This is possible when fs block size is less
+ * than page size and our cluster starts/ends in
+ * middle of the page. So we need to skip the
+ * initial few blocks till we reach the 'lblk'
+ */
+ pg_lblk++;
+ continue;
+ }
+
+ /* Check if the buffer is delayed allocated and that it
+ * is not yet mapped. (when da-buffers are mapped during
+ * their writeout, their da_mapped bit is set.)
+ */
+ if (buffer_delay(bh) && !buffer_da_mapped(bh)) {
+ page_cache_release(page);
+ trace_ext4_find_delalloc_range(inode,
+ lblk_start, lblk_end,
+ search_hint_reverse,
+ 1, i);
+ return 1;
+ }
+ if (search_hint_reverse)
+ i--;
+ else
+ i++;
+ } while ((i >= lblk_start) && (i <= lblk_end) &&
+ ((bh = bh->b_this_page) != head));
+nextpage:
+ if (page)
+ page_cache_release(page);
+ /*
+ * Move to next page. 'i' will be the first lblk in the next
+ * page.
+ */
+ if (search_hint_reverse)
+ index--;
+ else
+