diff options
Diffstat (limited to 'fs/ext3/balloc.c')
| -rw-r--r-- | fs/ext3/balloc.c | 167 |
1 files changed, 86 insertions, 81 deletions
diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c index 045995c8ce5..158b5d4ce06 100644 --- a/fs/ext3/balloc.c +++ b/fs/ext3/balloc.c @@ -11,16 +11,9 @@ * David S. Miller (davem@caip.rutgers.edu), 1995 */ -#include <linux/time.h> -#include <linux/capability.h> -#include <linux/fs.h> -#include <linux/slab.h> -#include <linux/jbd.h> -#include <linux/ext3_fs.h> -#include <linux/ext3_jbd.h> #include <linux/quotaops.h> -#include <linux/buffer_head.h> #include <linux/blkdev.h> +#include "ext3.h" /* * balloc.c contains the blocks allocation and deallocation routines @@ -161,6 +154,7 @@ read_block_bitmap(struct super_block *sb, unsigned int block_group) desc = ext3_get_group_desc(sb, block_group, NULL); if (!desc) return NULL; + trace_ext3_read_block_bitmap(sb, block_group); bitmap_blk = le32_to_cpu(desc->bg_block_bitmap); bh = sb_getblk(sb, bitmap_blk); if (unlikely(!bh)) { @@ -351,6 +345,7 @@ void ext3_rsv_window_add(struct super_block *sb, struct rb_node * parent = NULL; struct ext3_reserve_window_node *this; + trace_ext3_rsv_window_add(sb, rsv); while (*p) { parent = *p; @@ -424,7 +419,7 @@ static inline int rsv_is_empty(struct ext3_reserve_window *rsv) void ext3_init_block_alloc_info(struct inode *inode) { struct ext3_inode_info *ei = EXT3_I(inode); - struct ext3_block_alloc_info *block_i = ei->i_block_alloc_info; + struct ext3_block_alloc_info *block_i; struct super_block *sb = inode->i_sb; block_i = kmalloc(sizeof(*block_i), GFP_NOFS); @@ -476,8 +471,10 @@ void ext3_discard_reservation(struct inode *inode) rsv = &block_i->rsv_window_node; if (!rsv_is_empty(&rsv->rsv_window)) { spin_lock(rsv_lock); - if (!rsv_is_empty(&rsv->rsv_window)) + if (!rsv_is_empty(&rsv->rsv_window)) { + trace_ext3_discard_reservation(inode, rsv); rsv_window_remove(inode->i_sb, rsv); + } spin_unlock(rsv_lock); } } @@ -486,7 +483,7 @@ void ext3_discard_reservation(struct inode *inode) * ext3_free_blocks_sb() -- Free given blocks and update quota * @handle: handle to this transaction * @sb: super block - * @block: start physcial block to free + * @block: start physical block to free * @count: number of blocks to free * @pdquot_freed_blocks: pointer to quota */ @@ -590,7 +587,7 @@ do_more: BUFFER_TRACE(debug_bh, "Deleted!"); if (!bh2jh(bitmap_bh)->b_committed_data) BUFFER_TRACE(debug_bh, - "No commited data in bitmap"); + "No committed data in bitmap"); BUFFER_TRACE2(debug_bh, bitmap_bh, "bitmap"); __brelse(debug_bh); } @@ -683,14 +680,10 @@ error_return: void ext3_free_blocks(handle_t *handle, struct inode *inode, ext3_fsblk_t block, unsigned long count) { - struct super_block * sb; + struct super_block *sb = inode->i_sb; unsigned long dquot_freed_blocks; - sb = inode->i_sb; - if (!sb) { - printk ("ext3_free_blocks: nonexistent device"); - return; - } + trace_ext3_free_blocks(inode, block, count); ext3_free_blocks_sb(handle, sb, block, count, &dquot_freed_blocks); if (dquot_freed_blocks) dquot_free_block(inode, dquot_freed_blocks); @@ -1063,7 +1056,7 @@ static int find_next_reservable_window( rsv_window_remove(sb, my_rsv); /* - * Let's book the whole avaliable window for now. We will check the + * Let's book the whole available window for now. We will check the * disk bitmap later and then, if there are free blocks then we adjust * the window size if it's larger than requested. * Otherwise, we will remove this node from the tree next time @@ -1136,6 +1129,7 @@ static int alloc_new_reservation(struct ext3_reserve_window_node *my_rsv, else start_block = grp_goal + group_first_block; + trace_ext3_alloc_new_reservation(sb, start_block); size = my_rsv->rsv_goal_size; if (!rsv_is_empty(&my_rsv->rsv_window)) { @@ -1230,8 +1224,11 @@ retry: * check if the first free block is within the * free space we just reserved */ - if (start_block >= my_rsv->rsv_start && start_block <= my_rsv->rsv_end) + if (start_block >= my_rsv->rsv_start && + start_block <= my_rsv->rsv_end) { + trace_ext3_reserved(sb, start_block, my_rsv); return 0; /* success */ + } /* * if the first free bit we found is out of the reservable space * continue search for next reservable space, @@ -1435,15 +1432,16 @@ out: * * Check if filesystem has at least 1 free block available for allocation. */ -static int ext3_has_free_blocks(struct ext3_sb_info *sbi) +static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation) { ext3_fsblk_t free_blocks, root_blocks; free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter); root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count); if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) && - sbi->s_resuid != current_fsuid() && - (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) { + !use_reservation && !uid_eq(sbi->s_resuid, current_fsuid()) && + (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) || + !in_group_p (sbi->s_resgid))) { return 0; } return 1; @@ -1456,14 +1454,14 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi) * * ext3_should_retry_alloc() is called when ENOSPC is returned, and if * it is profitable to retry the operation, this function will wait - * for the current or commiting transaction to complete, and then + * for the current or committing transaction to complete, and then * return TRUE. * * if the total number of retries exceed three times, return FALSE. */ int ext3_should_retry_alloc(struct super_block *sb, int *retries) { - if (!ext3_has_free_blocks(EXT3_SB(sb)) || (*retries)++ > 3) + if (!ext3_has_free_blocks(EXT3_SB(sb), 0) || (*retries)++ > 3) return 0; jbd_debug(1, "%s: retrying operation after ENOSPC\n", sb->s_id); @@ -1514,10 +1512,6 @@ ext3_fsblk_t ext3_new_blocks(handle_t *handle, struct inode *inode, *errp = -ENOSPC; sb = inode->i_sb; - if (!sb) { - printk("ext3_new_block: nonexistent device"); - return 0; - } /* * Check quota for allocation of this block. @@ -1528,8 +1522,10 @@ ext3_fsblk_t ext3_new_blocks(handle_t *handle, struct inode *inode, return 0; } + trace_ext3_request_blocks(inode, goal, num); + sbi = EXT3_SB(sb); - es = EXT3_SB(sb)->s_es; + es = sbi->s_es; ext3_debug("goal=%lu.\n", goal); /* * Allocate a block from reservation only when @@ -1543,7 +1539,7 @@ ext3_fsblk_t ext3_new_blocks(handle_t *handle, struct inode *inode, if (block_i && ((windowsz = block_i->rsv_window_node.rsv_goal_size) > 0)) my_rsv = &block_i->rsv_window_node; - if (!ext3_has_free_blocks(sbi)) { + if (!ext3_has_free_blocks(sbi, IS_NOQUOTA(inode))) { *errp = -ENOSPC; goto out; } @@ -1632,9 +1628,9 @@ retry_alloc: goto allocated; } /* - * We may end up a bogus ealier ENOSPC error due to + * We may end up a bogus earlier ENOSPC error due to * filesystem is "full" of reservations, but - * there maybe indeed free blocks avaliable on disk + * there maybe indeed free blocks available on disk * In this case, we just forget about the reservations * just do block allocation as without reservations. */ @@ -1731,17 +1727,21 @@ allocated: percpu_counter_sub(&sbi->s_freeblocks_counter, num); BUFFER_TRACE(gdp_bh, "journal_dirty_metadata for group descriptor"); - err = ext3_journal_dirty_metadata(handle, gdp_bh); - if (!fatal) - fatal = err; - + fatal = ext3_journal_dirty_metadata(handle, gdp_bh); if (fatal) goto out; *errp = 0; brelse(bitmap_bh); - dquot_free_block(inode, *count-num); - *count = num; + + if (num < *count) { + dquot_free_block(inode, *count-num); + *count = num; + } + + trace_ext3_allocate_blocks(inode, goal, num, + (unsigned long long)ret_block); + return ret_block; io_error: @@ -1810,7 +1810,7 @@ ext3_fsblk_t ext3_count_free_blocks(struct super_block *sb) brelse(bitmap_bh); printk("ext3_count_free_blocks: stored = "E3FSBLK ", computed = "E3FSBLK", "E3FSBLK"\n", - le32_to_cpu(es->s_free_blocks_count), + (ext3_fsblk_t)le32_to_cpu(es->s_free_blocks_count), desc_count, bitmap_count); return bitmap_count; #else @@ -1917,9 +1917,10 @@ unsigned long ext3_bg_num_gdb(struct super_block *sb, int group) * reaches any used block. Then issue a TRIM command on this extent and free * the extent in the block bitmap. This is done until whole group is scanned. */ -ext3_grpblk_t ext3_trim_all_free(struct super_block *sb, unsigned int group, - ext3_grpblk_t start, ext3_grpblk_t max, - ext3_grpblk_t minblocks) +static ext3_grpblk_t ext3_trim_all_free(struct super_block *sb, + unsigned int group, + ext3_grpblk_t start, ext3_grpblk_t max, + ext3_grpblk_t minblocks) { handle_t *handle; ext3_grpblk_t next, free_blocks, bit, freed, count = 0; @@ -1962,7 +1963,7 @@ ext3_grpblk_t ext3_trim_all_free(struct super_block *sb, unsigned int group, sbi = EXT3_SB(sb); /* Walk through the whole group */ - while (start < max) { + while (start <= max) { start = bitmap_search_next_usable_block(start, bitmap_bh, max); if (start < 0) break; @@ -1972,7 +1973,7 @@ ext3_grpblk_t ext3_trim_all_free(struct super_block *sb, unsigned int group, * Allocate contiguous free extents by setting bits in the * block bitmap */ - while (next < max + while (next <= max && claim_block(sb_bgl_lock(sbi, group), next, bitmap_bh)) { next++; @@ -1991,10 +1992,12 @@ ext3_grpblk_t ext3_trim_all_free(struct super_block *sb, unsigned int group, spin_unlock(sb_bgl_lock(sbi, group)); percpu_counter_sub(&sbi->s_freeblocks_counter, next - start); + free_blocks -= next - start; /* Do not issue a TRIM on extents smaller than minblocks */ if ((next - start) < minblocks) goto free_extent; + trace_ext3_discard_blocks(sb, discard_block, next - start); /* Send the TRIM command down to the device */ err = sb_issue_discard(sb, discard_block, next - start, GFP_NOFS, 0); @@ -2040,7 +2043,7 @@ free_extent: cond_resched(); /* No more suitable extents */ - if ((free_blocks - count) < minblocks) + if (free_blocks < minblocks) break; } @@ -2081,73 +2084,75 @@ err_out: */ int ext3_trim_fs(struct super_block *sb, struct fstrim_range *range) { - ext3_grpblk_t last_block, first_block, free_blocks; - unsigned long first_group, last_group; - unsigned long group, ngroups; + ext3_grpblk_t last_block, first_block; + unsigned long group, first_group, last_group; struct ext3_group_desc *gdp; struct ext3_super_block *es = EXT3_SB(sb)->s_es; - uint64_t start, len, minlen, trimmed; + uint64_t start, minlen, end, trimmed = 0; + ext3_fsblk_t first_data_blk = + le32_to_cpu(EXT3_SB(sb)->s_es->s_first_data_block); ext3_fsblk_t max_blks = le32_to_cpu(es->s_blocks_count); int ret = 0; start = range->start >> sb->s_blocksize_bits; - len = range->len >> sb->s_blocksize_bits; + end = start + (range->len >> sb->s_blocksize_bits) - 1; minlen = range->minlen >> sb->s_blocksize_bits; - trimmed = 0; - if (unlikely(minlen > EXT3_BLOCKS_PER_GROUP(sb))) + if (minlen > EXT3_BLOCKS_PER_GROUP(sb) || + start >= max_blks || + range->len < sb->s_blocksize) return -EINVAL; - if (start >= max_blks) + if (end >= max_blks) + end = max_blks - 1; + if (end <= first_data_blk) goto out; - if (start < le32_to_cpu(es->s_first_data_block)) { - len -= le32_to_cpu(es->s_first_data_block) - start; - start = le32_to_cpu(es->s_first_data_block); - } - if (start + len > max_blks) - len = max_blks - start; + if (start < first_data_blk) + start = first_data_blk; - ngroups = EXT3_SB(sb)->s_groups_count; smp_rmb(); /* Determine first and last group to examine based on start and len */ ext3_get_group_no_and_offset(sb, (ext3_fsblk_t) start, &first_group, &first_block); - ext3_get_group_no_and_offset(sb, (ext3_fsblk_t) (start + len), + ext3_get_group_no_and_offset(sb, (ext3_fsblk_t) end, &last_group, &last_block); - last_group = (last_group > ngroups - 1) ? ngroups - 1 : last_group; - last_block = EXT3_BLOCKS_PER_GROUP(sb); - if (first_group > last_group) - return -EINVAL; + /* end now represents the last block to discard in this group */ + end = EXT3_BLOCKS_PER_GROUP(sb) - 1; for (group = first_group; group <= last_group; group++) { gdp = ext3_get_group_desc(sb, group, NULL); if (!gdp) break; - free_blocks = le16_to_cpu(gdp->bg_free_blocks_count); - if (free_blocks < minlen) - continue; - - if (len >= EXT3_BLOCKS_PER_GROUP(sb)) - len -= (EXT3_BLOCKS_PER_GROUP(sb) - first_block); - else - last_block = first_block + len; + /* + * For all the groups except the last one, last block will + * always be EXT3_BLOCKS_PER_GROUP(sb)-1, so we only need to + * change it for the last group, note that last_block is + * already computed earlier by ext3_get_group_no_and_offset() + */ + if (group == last_group) + end = last_block; - ret = ext3_trim_all_free(sb, group, first_block, - last_block, minlen); - if (ret < 0) - break; + if (le16_to_cpu(gdp->bg_free_blocks_count) >= minlen) { + ret = ext3_trim_all_free(sb, group, first_block, + end, minlen); + if (ret < 0) + break; + trimmed += ret; + } - trimmed += ret; + /* + * For every group except the first one, we are sure + * that the first block to discard will be block #0. + */ first_block = 0; } - if (ret >= 0) + if (ret > 0) ret = 0; out: range->len = trimmed * sb->s_blocksize; - return ret; } |
