diff options
Diffstat (limited to 'fs/ext4/resize.c')
| -rw-r--r-- | fs/ext4/resize.c | 1721 | 
1 files changed, 1349 insertions, 372 deletions
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c index dc963929de6..bb0e80f03e2 100644 --- a/fs/ext4/resize.c +++ b/fs/ext4/resize.c @@ -16,6 +16,57 @@  #include "ext4_jbd2.h" +int ext4_resize_begin(struct super_block *sb) +{ +	int ret = 0; + +	if (!capable(CAP_SYS_RESOURCE)) +		return -EPERM; + +	/* +	 * We are not allowed to do online-resizing on a filesystem mounted +	 * with error, because it can destroy the filesystem easily. +	 */ +	if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) { +		ext4_warning(sb, "There are errors in the filesystem, " +			     "so online resizing is not allowed\n"); +		return -EPERM; +	} + +	if (test_and_set_bit_lock(EXT4_RESIZING, &EXT4_SB(sb)->s_resize_flags)) +		ret = -EBUSY; + +	return ret; +} + +void ext4_resize_end(struct super_block *sb) +{ +	clear_bit_unlock(EXT4_RESIZING, &EXT4_SB(sb)->s_resize_flags); +	smp_mb__after_atomic(); +} + +static ext4_group_t ext4_meta_bg_first_group(struct super_block *sb, +					     ext4_group_t group) { +	return (group >> EXT4_DESC_PER_BLOCK_BITS(sb)) << +	       EXT4_DESC_PER_BLOCK_BITS(sb); +} + +static ext4_fsblk_t ext4_meta_bg_first_block_no(struct super_block *sb, +					     ext4_group_t group) { +	group = ext4_meta_bg_first_group(sb, group); +	return ext4_group_first_block_no(sb, group); +} + +static ext4_grpblk_t ext4_group_overhead_blocks(struct super_block *sb, +						ext4_group_t group) { +	ext4_grpblk_t overhead; +	overhead = ext4_bg_num_gdb(sb, group); +	if (ext4_bg_has_super(sb, group)) +		overhead += 1 + +			  le16_to_cpu(EXT4_SB(sb)->s_es->s_reserved_gdt_blocks); +	return overhead; +} +  #define outside(b, first, last)	((b) < (first) || (b) >= (last))  #define inside(b, first, last)	((b) >= (first) && (b) < (last)) @@ -28,14 +79,20 @@ static int verify_group_input(struct super_block *sb,  	ext4_fsblk_t end = start + input->blocks_count;  	ext4_group_t group = input->group;  	ext4_fsblk_t itend = input->inode_table + sbi->s_itb_per_group; -	unsigned overhead = ext4_bg_has_super(sb, group) ? -		(1 + ext4_bg_num_gdb(sb, group) + -		 le16_to_cpu(es->s_reserved_gdt_blocks)) : 0; -	ext4_fsblk_t metaend = start + overhead; +	unsigned overhead; +	ext4_fsblk_t metaend;  	struct buffer_head *bh = NULL;  	ext4_grpblk_t free_blocks_count, offset;  	int err = -EINVAL; +	if (group != sbi->s_groups_count) { +		ext4_warning(sb, "Cannot add at group %u (only %u groups)", +			     input->group, sbi->s_groups_count); +		return -EINVAL; +	} + +	overhead = ext4_group_overhead_blocks(sb, group); +	metaend = start + overhead;  	input->free_blocks_count = free_blocks_count =  		input->blocks_count - 2 - overhead - sbi->s_itb_per_group; @@ -47,10 +104,7 @@ static int verify_group_input(struct super_block *sb,  		       free_blocks_count, input->reserved_blocks);  	ext4_get_group_no_and_offset(sb, start, NULL, &offset); -	if (group != sbi->s_groups_count) -		ext4_warning(sb, "Cannot add at group %u (only %u groups)", -			     input->group, sbi->s_groups_count); -	else if (offset != 0) +	if (offset != 0)  			ext4_warning(sb, "Last group not full");  	else if (input->reserved_blocks > input->blocks_count / 5)  		ext4_warning(sb, "Reserved blocks too high (%u)", @@ -105,6 +159,186 @@ static int verify_group_input(struct super_block *sb,  	return err;  } +/* + * ext4_new_flex_group_data is used by 64bit-resize interface to add a flex + * group each time. + */ +struct ext4_new_flex_group_data { +	struct ext4_new_group_data *groups;	/* new_group_data for groups +						   in the flex group */ +	__u16 *bg_flags;			/* block group flags of groups +						   in @groups */ +	ext4_group_t count;			/* number of groups in @groups +						 */ +}; + +/* + * alloc_flex_gd() allocates a ext4_new_flex_group_data with size of + * @flexbg_size. + * + * Returns NULL on failure otherwise address of the allocated structure. + */ +static struct ext4_new_flex_group_data *alloc_flex_gd(unsigned long flexbg_size) +{ +	struct ext4_new_flex_group_data *flex_gd; + +	flex_gd = kmalloc(sizeof(*flex_gd), GFP_NOFS); +	if (flex_gd == NULL) +		goto out3; + +	if (flexbg_size >= UINT_MAX / sizeof(struct ext4_new_flex_group_data)) +		goto out2; +	flex_gd->count = flexbg_size; + +	flex_gd->groups = kmalloc(sizeof(struct ext4_new_group_data) * +				  flexbg_size, GFP_NOFS); +	if (flex_gd->groups == NULL) +		goto out2; + +	flex_gd->bg_flags = kmalloc(flexbg_size * sizeof(__u16), GFP_NOFS); +	if (flex_gd->bg_flags == NULL) +		goto out1; + +	return flex_gd; + +out1: +	kfree(flex_gd->groups); +out2: +	kfree(flex_gd); +out3: +	return NULL; +} + +static void free_flex_gd(struct ext4_new_flex_group_data *flex_gd) +{ +	kfree(flex_gd->bg_flags); +	kfree(flex_gd->groups); +	kfree(flex_gd); +} + +/* + * ext4_alloc_group_tables() allocates block bitmaps, inode bitmaps + * and inode tables for a flex group. + * + * This function is used by 64bit-resize.  Note that this function allocates + * group tables from the 1st group of groups contained by @flexgd, which may + * be a partial of a flex group. + * + * @sb: super block of fs to which the groups belongs + * + * Returns 0 on a successful allocation of the metadata blocks in the + * block group. + */ +static int ext4_alloc_group_tables(struct super_block *sb, +				struct ext4_new_flex_group_data *flex_gd, +				int flexbg_size) +{ +	struct ext4_new_group_data *group_data = flex_gd->groups; +	ext4_fsblk_t start_blk; +	ext4_fsblk_t last_blk; +	ext4_group_t src_group; +	ext4_group_t bb_index = 0; +	ext4_group_t ib_index = 0; +	ext4_group_t it_index = 0; +	ext4_group_t group; +	ext4_group_t last_group; +	unsigned overhead; +	__u16 uninit_mask = (flexbg_size > 1) ? ~EXT4_BG_BLOCK_UNINIT : ~0; + +	BUG_ON(flex_gd->count == 0 || group_data == NULL); + +	src_group = group_data[0].group; +	last_group  = src_group + flex_gd->count - 1; + +	BUG_ON((flexbg_size > 1) && ((src_group & ~(flexbg_size - 1)) != +	       (last_group & ~(flexbg_size - 1)))); +next_group: +	group = group_data[0].group; +	if (src_group >= group_data[0].group + flex_gd->count) +		return -ENOSPC; +	start_blk = ext4_group_first_block_no(sb, src_group); +	last_blk = start_blk + group_data[src_group - group].blocks_count; + +	overhead = ext4_group_overhead_blocks(sb, src_group); + +	start_blk += overhead; + +	/* We collect contiguous blocks as much as possible. */ +	src_group++; +	for (; src_group <= last_group; src_group++) { +		overhead = ext4_group_overhead_blocks(sb, src_group); +		if (overhead == 0) +			last_blk += group_data[src_group - group].blocks_count; +		else +			break; +	} + +	/* Allocate block bitmaps */ +	for (; bb_index < flex_gd->count; bb_index++) { +		if (start_blk >= last_blk) +			goto next_group; +		group_data[bb_index].block_bitmap = start_blk++; +		group = ext4_get_group_number(sb, start_blk - 1); +		group -= group_data[0].group; +		group_data[group].free_blocks_count--; +		flex_gd->bg_flags[group] &= uninit_mask; +	} + +	/* Allocate inode bitmaps */ +	for (; ib_index < flex_gd->count; ib_index++) { +		if (start_blk >= last_blk) +			goto next_group; +		group_data[ib_index].inode_bitmap = start_blk++; +		group = ext4_get_group_number(sb, start_blk - 1); +		group -= group_data[0].group; +		group_data[group].free_blocks_count--; +		flex_gd->bg_flags[group] &= uninit_mask; +	} + +	/* Allocate inode tables */ +	for (; it_index < flex_gd->count; it_index++) { +		unsigned int itb = EXT4_SB(sb)->s_itb_per_group; +		ext4_fsblk_t next_group_start; + +		if (start_blk + itb > last_blk) +			goto next_group; +		group_data[it_index].inode_table = start_blk; +		group = ext4_get_group_number(sb, start_blk); +		next_group_start = ext4_group_first_block_no(sb, group + 1); +		group -= group_data[0].group; + +		if (start_blk + itb > next_group_start) { +			flex_gd->bg_flags[group + 1] &= uninit_mask; +			overhead = start_blk + itb - next_group_start; +			group_data[group + 1].free_blocks_count -= overhead; +			itb -= overhead; +		} + +		group_data[group].free_blocks_count -= itb; +		flex_gd->bg_flags[group] &= uninit_mask; +		start_blk += EXT4_SB(sb)->s_itb_per_group; +	} + +	if (test_opt(sb, DEBUG)) { +		int i; +		group = group_data[0].group; + +		printk(KERN_DEBUG "EXT4-fs: adding a flex group with " +		       "%d groups, flexbg size is %d:\n", flex_gd->count, +		       flexbg_size); + +		for (i = 0; i < flex_gd->count; i++) { +			printk(KERN_DEBUG "adding %s group %u: %u " +			       "blocks (%d free)\n", +			       ext4_bg_has_super(sb, group + i) ? "normal" : +			       "no-super", group + i, +			       group_data[i].blocks_count, +			       group_data[i].free_blocks_count); +		} +	} +	return 0; +} +  static struct buffer_head *bclean(handle_t *handle, struct super_block *sb,  				  ext4_fsblk_t blk)  { @@ -112,16 +346,15 @@ static struct buffer_head *bclean(handle_t *handle, struct super_block *sb,  	int err;  	bh = sb_getblk(sb, blk); -	if (!bh) -		return ERR_PTR(-EIO); +	if (unlikely(!bh)) +		return ERR_PTR(-ENOMEM); +	BUFFER_TRACE(bh, "get_write_access");  	if ((err = ext4_journal_get_write_access(handle, bh))) {  		brelse(bh);  		bh = ERR_PTR(err);  	} else { -		lock_buffer(bh);  		memset(bh->b_data, 0, sb->s_blocksize);  		set_buffer_uptodate(bh); -		unlock_buffer(bh);  	}  	return bh; @@ -132,8 +365,7 @@ static struct buffer_head *bclean(handle_t *handle, struct super_block *sb,   * If that fails, restart the transaction & regain write access for the   * buffer head which is used for block_bitmap modifications.   */ -static int extend_or_restart_transaction(handle_t *handle, int thresh, -					 struct buffer_head *bh) +static int extend_or_restart_transaction(handle_t *handle, int thresh)  {  	int err; @@ -144,134 +376,277 @@ static int extend_or_restart_transaction(handle_t *handle, int thresh,  	if (err < 0)  		return err;  	if (err) { -		if ((err = ext4_journal_restart(handle, EXT4_MAX_TRANS_DATA))) +		err = ext4_journal_restart(handle, EXT4_MAX_TRANS_DATA); +		if (err)  			return err; -		if ((err = ext4_journal_get_write_access(handle, bh))) +	} + +	return 0; +} + +/* + * set_flexbg_block_bitmap() mark @count blocks starting from @block used. + * + * Helper function for ext4_setup_new_group_blocks() which set . + * + * @sb: super block + * @handle: journal handle + * @flex_gd: flex group data + */ +static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle, +			struct ext4_new_flex_group_data *flex_gd, +			ext4_fsblk_t block, ext4_group_t count) +{ +	ext4_group_t count2; + +	ext4_debug("mark blocks [%llu/%u] used\n", block, count); +	for (count2 = count; count > 0; count -= count2, block += count2) { +		ext4_fsblk_t start; +		struct buffer_head *bh; +		ext4_group_t group; +		int err; + +		group = ext4_get_group_number(sb, block); +		start = ext4_group_first_block_no(sb, group); +		group -= flex_gd->groups[0].group; + +		count2 = EXT4_BLOCKS_PER_GROUP(sb) - (block - start); +		if (count2 > count) +			count2 = count; + +		if (flex_gd->bg_flags[group] & EXT4_BG_BLOCK_UNINIT) { +			BUG_ON(flex_gd->count > 1); +			continue; +		} + +		err = extend_or_restart_transaction(handle, 1); +		if (err)  			return err; + +		bh = sb_getblk(sb, flex_gd->groups[group].block_bitmap); +		if (unlikely(!bh)) +			return -ENOMEM; + +		BUFFER_TRACE(bh, "get_write_access"); +		err = ext4_journal_get_write_access(handle, bh); +		if (err) +			return err; +		ext4_debug("mark block bitmap %#04llx (+%llu/%u)\n", block, +			   block - start, count2); +		ext4_set_bits(bh->b_data, block - start, count2); + +		err = ext4_handle_dirty_metadata(handle, NULL, bh); +		if (unlikely(err)) +			return err; +		brelse(bh);  	}  	return 0;  }  /* - * Set up the block and inode bitmaps, and the inode table for the new group. + * Set up the block and inode bitmaps, and the inode table for the new groups.   * This doesn't need to be part of the main transaction, since we are only   * changing blocks outside the actual filesystem.  We still do journaling to   * ensure the recovery is correct in case of a failure just after resize.   * If any part of this fails, we simply abort the resize. + * + * setup_new_flex_group_blocks handles a flex group as follow: + *  1. copy super block and GDT, and initialize group tables if necessary. + *     In this step, we only set bits in blocks bitmaps for blocks taken by + *     super block and GDT. + *  2. allocate group tables in block bitmaps, that is, set bits in block + *     bitmap for blocks taken by group tables.   */ -static int setup_new_group_blocks(struct super_block *sb, -				  struct ext4_new_group_data *input) +static int setup_new_flex_group_blocks(struct super_block *sb, +				struct ext4_new_flex_group_data *flex_gd)  { +	int group_table_count[] = {1, 1, EXT4_SB(sb)->s_itb_per_group}; +	ext4_fsblk_t start; +	ext4_fsblk_t block;  	struct ext4_sb_info *sbi = EXT4_SB(sb); -	ext4_fsblk_t start = ext4_group_first_block_no(sb, input->group); -	int reserved_gdb = ext4_bg_has_super(sb, input->group) ? -		le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) : 0; -	unsigned long gdblocks = ext4_bg_num_gdb(sb, input->group); -	struct buffer_head *bh; +	struct ext4_super_block *es = sbi->s_es; +	struct ext4_new_group_data *group_data = flex_gd->groups; +	__u16 *bg_flags = flex_gd->bg_flags;  	handle_t *handle; -	ext4_fsblk_t block; -	ext4_grpblk_t bit; -	int i; -	int err = 0, err2; +	ext4_group_t group, count; +	struct buffer_head *bh = NULL; +	int reserved_gdb, i, j, err = 0, err2; +	int meta_bg; -	/* This transaction may be extended/restarted along the way */ -	handle = ext4_journal_start_sb(sb, EXT4_MAX_TRANS_DATA); +	BUG_ON(!flex_gd->count || !group_data || +	       group_data[0].group != sbi->s_groups_count); +	reserved_gdb = le16_to_cpu(es->s_reserved_gdt_blocks); +	meta_bg = EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_META_BG); + +	/* This transaction may be extended/restarted along the way */ +	handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, EXT4_MAX_TRANS_DATA);  	if (IS_ERR(handle))  		return PTR_ERR(handle); -	mutex_lock(&sbi->s_resize_lock); -	if (input->group != sbi->s_groups_count) { -		err = -EBUSY; -		goto exit_journal; -	} +	group = group_data[0].group; +	for (i = 0; i < flex_gd->count; i++, group++) { +		unsigned long gdblocks; +		ext4_grpblk_t overhead; -	if (IS_ERR(bh = bclean(handle, sb, input->block_bitmap))) { -		err = PTR_ERR(bh); -		goto exit_journal; -	} +		gdblocks = ext4_bg_num_gdb(sb, group); +		start = ext4_group_first_block_no(sb, group); -	if (ext4_bg_has_super(sb, input->group)) { -		ext4_debug("mark backup superblock %#04llx (+0)\n", start); -		ext4_set_bit(0, bh->b_data); -	} +		if (meta_bg == 0 && !ext4_bg_has_super(sb, group)) +			goto handle_itb; -	/* Copy all of the GDT blocks into the backup in this group */ -	for (i = 0, bit = 1, block = start + 1; -	     i < gdblocks; i++, block++, bit++) { -		struct buffer_head *gdb; +		if (meta_bg == 1) { +			ext4_group_t first_group; +			first_group = ext4_meta_bg_first_group(sb, group); +			if (first_group != group + 1 && +			    first_group != group + EXT4_DESC_PER_BLOCK(sb) - 1) +				goto handle_itb; +		} -		ext4_debug("update backup group %#04llx (+%d)\n", block, bit); +		block = start + ext4_bg_has_super(sb, group); +		/* Copy all of the GDT blocks into the backup in this group */ +		for (j = 0; j < gdblocks; j++, block++) { +			struct buffer_head *gdb; + +			ext4_debug("update backup group %#04llx\n", block); +			err = extend_or_restart_transaction(handle, 1); +			if (err) +				goto out; + +			gdb = sb_getblk(sb, block); +			if (unlikely(!gdb)) { +				err = -ENOMEM; +				goto out; +			} + +			BUFFER_TRACE(gdb, "get_write_access"); +			err = ext4_journal_get_write_access(handle, gdb); +			if (err) { +				brelse(gdb); +				goto out; +			} +			memcpy(gdb->b_data, sbi->s_group_desc[j]->b_data, +			       gdb->b_size); +			set_buffer_uptodate(gdb); + +			err = ext4_handle_dirty_metadata(handle, NULL, gdb); +			if (unlikely(err)) { +				brelse(gdb); +				goto out; +			} +			brelse(gdb); +		} -		if ((err = extend_or_restart_transaction(handle, 1, bh))) -			goto exit_bh; +		/* Zero out all of the reserved backup group descriptor +		 * table blocks +		 */ +		if (ext4_bg_has_super(sb, group)) { +			err = sb_issue_zeroout(sb, gdblocks + start + 1, +					reserved_gdb, GFP_NOFS); +			if (err) +				goto out; +		} -		gdb = sb_getblk(sb, block); -		if (!gdb) { -			err = -EIO; -			goto exit_bh; +handle_itb: +		/* Initialize group tables of the grop @group */ +		if (!(bg_flags[i] & EXT4_BG_INODE_ZEROED)) +			goto handle_bb; + +		/* Zero out all of the inode table blocks */ +		block = group_data[i].inode_table; +		ext4_debug("clear inode table blocks %#04llx -> %#04lx\n", +			   block, sbi->s_itb_per_group); +		err = sb_issue_zeroout(sb, block, sbi->s_itb_per_group, +				       GFP_NOFS); +		if (err) +			goto out; + +handle_bb: +		if (bg_flags[i] & EXT4_BG_BLOCK_UNINIT) +			goto handle_ib; + +		/* Initialize block bitmap of the @group */ +		block = group_data[i].block_bitmap; +		err = extend_or_restart_transaction(handle, 1); +		if (err) +			goto out; + +		bh = bclean(handle, sb, block); +		if (IS_ERR(bh)) { +			err = PTR_ERR(bh); +			goto out;  		} -		if ((err = ext4_journal_get_write_access(handle, gdb))) { -			brelse(gdb); -			goto exit_bh; +		overhead = ext4_group_overhead_blocks(sb, group); +		if (overhead != 0) { +			ext4_debug("mark backup superblock %#04llx (+0)\n", +				   start); +			ext4_set_bits(bh->b_data, 0, overhead);  		} -		lock_buffer(gdb); -		memcpy(gdb->b_data, sbi->s_group_desc[i]->b_data, gdb->b_size); -		set_buffer_uptodate(gdb); -		unlock_buffer(gdb); -		ext4_handle_dirty_metadata(handle, NULL, gdb); -		ext4_set_bit(bit, bh->b_data); -		brelse(gdb); -	} - -	/* Zero out all of the reserved backup group descriptor table blocks */ -	ext4_debug("clear inode table blocks %#04llx -> %#04llx\n", -			block, sbi->s_itb_per_group); -	err = sb_issue_zeroout(sb, gdblocks + start + 1, reserved_gdb, -			       GFP_NOFS); -	if (err) -		goto exit_bh; +		ext4_mark_bitmap_end(group_data[i].blocks_count, +				     sb->s_blocksize * 8, bh->b_data); +		err = ext4_handle_dirty_metadata(handle, NULL, bh); +		if (err) +			goto out; +		brelse(bh); -	ext4_debug("mark block bitmap %#04llx (+%llu)\n", input->block_bitmap, -		   input->block_bitmap - start); -	ext4_set_bit(input->block_bitmap - start, bh->b_data); -	ext4_debug("mark inode bitmap %#04llx (+%llu)\n", input->inode_bitmap, -		   input->inode_bitmap - start); -	ext4_set_bit(input->inode_bitmap - start, bh->b_data); - -	/* Zero out all of the inode table blocks */ -	block = input->inode_table; -	ext4_debug("clear inode table blocks %#04llx -> %#04llx\n", -			block, sbi->s_itb_per_group); -	err = sb_issue_zeroout(sb, block, sbi->s_itb_per_group, GFP_NOFS); -	if (err) -		goto exit_bh; +handle_ib: +		if (bg_flags[i] & EXT4_BG_INODE_UNINIT) +			continue; + +		/* Initialize inode bitmap of the @group */ +		block = group_data[i].inode_bitmap; +		err = extend_or_restart_transaction(handle, 1); +		if (err) +			goto out; +		/* Mark unused entries in inode bitmap used */ +		bh = bclean(handle, sb, block); +		if (IS_ERR(bh)) { +			err = PTR_ERR(bh); +			goto out; +		} -	if ((err = extend_or_restart_transaction(handle, 2, bh))) -		goto exit_bh; +		ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), +				     sb->s_blocksize * 8, bh->b_data); +		err = ext4_handle_dirty_metadata(handle, NULL, bh); +		if (err) +			goto out; +		brelse(bh); +	} +	bh = NULL; + +	/* Mark group tables in block bitmap */ +	for (j = 0; j < GROUP_TABLE_COUNT; j++) { +		count = group_table_count[j]; +		start = (&group_data[0].block_bitmap)[j]; +		block = start; +		for (i = 1; i < flex_gd->count; i++) { +			block += group_table_count[j]; +			if (block == (&group_data[i].block_bitmap)[j]) { +				count += group_table_count[j]; +				continue; +			} +			err = set_flexbg_block_bitmap(sb, handle, +						flex_gd, start, count); +			if (err) +				goto out; +			count = group_table_count[j]; +			start = (&group_data[i].block_bitmap)[j]; +			block = start; +		} -	ext4_mark_bitmap_end(input->blocks_count, sb->s_blocksize * 8, -			     bh->b_data); -	ext4_handle_dirty_metadata(handle, NULL, bh); -	brelse(bh); -	/* Mark unused entries in inode bitmap used */ -	ext4_debug("clear inode bitmap %#04llx (+%llu)\n", -		   input->inode_bitmap, input->inode_bitmap - start); -	if (IS_ERR(bh = bclean(handle, sb, input->inode_bitmap))) { -		err = PTR_ERR(bh); -		goto exit_journal; +		if (count) { +			err = set_flexbg_block_bitmap(sb, handle, +						flex_gd, start, count); +			if (err) +				goto out; +		}  	} -	ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), sb->s_blocksize * 8, -			     bh->b_data); -	ext4_handle_dirty_metadata(handle, NULL, bh); -exit_bh: +out:  	brelse(bh); - -exit_journal: -	mutex_unlock(&sbi->s_resize_lock); -	if ((err2 = ext4_journal_stop(handle)) && !err) +	err2 = ext4_journal_stop(handle); +	if (err2 && !err)  		err = err2;  	return err; @@ -319,10 +694,10 @@ static unsigned ext4_list_backups(struct super_block *sb, unsigned *three,   * groups in current filesystem that have BACKUPS, or -ve error code.   */  static int verify_reserved_gdb(struct super_block *sb, +			       ext4_group_t end,  			       struct buffer_head *primary)  {  	const ext4_fsblk_t blk = primary->b_blocknr; -	const ext4_group_t end = EXT4_SB(sb)->s_groups_count;  	unsigned three = 1;  	unsigned five = 5;  	unsigned seven = 7; @@ -362,15 +737,15 @@ static int verify_reserved_gdb(struct super_block *sb,   * fail once we start modifying the data on disk, because JBD has no rollback.   */  static int add_new_gdb(handle_t *handle, struct inode *inode, -		       struct ext4_new_group_data *input, -		       struct buffer_head **primary) +		       ext4_group_t group)  {  	struct super_block *sb = inode->i_sb;  	struct ext4_super_block *es = EXT4_SB(sb)->s_es; -	unsigned long gdb_num = input->group / EXT4_DESC_PER_BLOCK(sb); +	unsigned long gdb_num = group / EXT4_DESC_PER_BLOCK(sb);  	ext4_fsblk_t gdblock = EXT4_SB(sb)->s_sbh->b_blocknr + 1 + gdb_num;  	struct buffer_head **o_group_desc, **n_group_desc;  	struct buffer_head *dind; +	struct buffer_head *gdb_bh;  	int gdbackups;  	struct ext4_iloc iloc;  	__le32 *data; @@ -393,11 +768,12 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,  		return -EPERM;  	} -	*primary = sb_bread(sb, gdblock); -	if (!*primary) +	gdb_bh = sb_bread(sb, gdblock); +	if (!gdb_bh)  		return -EIO; -	if ((gdbackups = verify_reserved_gdb(sb, *primary)) < 0) { +	gdbackups = verify_reserved_gdb(sb, group, gdb_bh); +	if (gdbackups < 0) {  		err = gdbackups;  		goto exit_bh;  	} @@ -412,30 +788,38 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,  	data = (__le32 *)dind->b_data;  	if (le32_to_cpu(data[gdb_num % EXT4_ADDR_PER_BLOCK(sb)]) != gdblock) {  		ext4_warning(sb, "new group %u GDT block %llu not reserved", -			     input->group, gdblock); +			     group, gdblock);  		err = -EINVAL;  		goto exit_dind;  	} -	if ((err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh))) +	BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get_write_access"); +	err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh); +	if (unlikely(err))  		goto exit_dind; -	if ((err = ext4_journal_get_write_access(handle, *primary))) -		goto exit_sbh; +	BUFFER_TRACE(gdb_bh, "get_write_access"); +	err = ext4_journal_get_write_access(handle, gdb_bh); +	if (unlikely(err)) +		goto exit_dind; -	if ((err = ext4_journal_get_write_access(handle, dind))) -		goto exit_primary; +	BUFFER_TRACE(dind, "get_write_access"); +	err = ext4_journal_get_write_access(handle, dind); +	if (unlikely(err)) +		ext4_std_error(sb, err);  	/* ext4_reserve_inode_write() gets a reference on the iloc */ -	if ((err = ext4_reserve_inode_write(handle, inode, &iloc))) -		goto exit_dindj; +	err = ext4_reserve_inode_write(handle, inode, &iloc); +	if (unlikely(err)) +		goto exit_dind; -	n_group_desc = kmalloc((gdb_num + 1) * sizeof(struct buffer_head *), -			GFP_NOFS); +	n_group_desc = ext4_kvmalloc((gdb_num + 1) * +				     sizeof(struct buffer_head *), +				     GFP_NOFS);  	if (!n_group_desc) {  		err = -ENOMEM; -		ext4_warning(sb, -			      "not enough memory for %lu groups", gdb_num + 1); +		ext4_warning(sb, "not enough memory for %lu groups", +			     gdb_num + 1);  		goto exit_inode;  	} @@ -449,45 +833,89 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,  	 * reserved inode, and will become GDT blocks (primary and backup).  	 */  	data[gdb_num % EXT4_ADDR_PER_BLOCK(sb)] = 0; -	ext4_handle_dirty_metadata(handle, NULL, dind); -	brelse(dind); +	err = ext4_handle_dirty_metadata(handle, NULL, dind); +	if (unlikely(err)) { +		ext4_std_error(sb, err); +		goto exit_inode; +	}  	inode->i_blocks -= (gdbackups + 1) * sb->s_blocksize >> 9;  	ext4_mark_iloc_dirty(handle, inode, &iloc); -	memset((*primary)->b_data, 0, sb->s_blocksize); -	ext4_handle_dirty_metadata(handle, NULL, *primary); +	memset(gdb_bh->b_data, 0, sb->s_blocksize); +	err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh); +	if (unlikely(err)) { +		ext4_std_error(sb, err); +		goto exit_inode; +	} +	brelse(dind);  	o_group_desc = EXT4_SB(sb)->s_group_desc;  	memcpy(n_group_desc, o_group_desc,  	       EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *)); -	n_group_desc[gdb_num] = *primary; +	n_group_desc[gdb_num] = gdb_bh;  	EXT4_SB(sb)->s_group_desc = n_group_desc;  	EXT4_SB(sb)->s_gdb_count++; -	kfree(o_group_desc); +	ext4_kvfree(o_group_desc);  	le16_add_cpu(&es->s_reserved_gdt_blocks, -1); -	ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh); +	err = ext4_handle_dirty_super(handle, sb); +	if (err) +		ext4_std_error(sb, err); -	return 0; +	return err;  exit_inode: -	/* ext4_journal_release_buffer(handle, iloc.bh); */ +	ext4_kvfree(n_group_desc);  	brelse(iloc.bh); -exit_dindj: -	/* ext4_journal_release_buffer(handle, dind); */ -exit_primary: -	/* ext4_journal_release_buffer(handle, *primary); */ -exit_sbh: -	/* ext4_journal_release_buffer(handle, *primary); */  exit_dind:  	brelse(dind);  exit_bh: -	brelse(*primary); +	brelse(gdb_bh);  	ext4_debug("leaving with error %d\n", err);  	return err;  }  /* + * add_new_gdb_meta_bg is the sister of add_new_gdb. + */ +static int add_new_gdb_meta_bg(struct super_block *sb, +			       handle_t *handle, ext4_group_t group) { +	ext4_fsblk_t gdblock; +	struct buffer_head *gdb_bh; +	struct buffer_head **o_group_desc, **n_group_desc; +	unsigned long gdb_num = group / EXT4_DESC_PER_BLOCK(sb); +	int err; + +	gdblock = ext4_meta_bg_first_block_no(sb, group) + +		   ext4_bg_has_super(sb, group); +	gdb_bh = sb_bread(sb, gdblock); +	if (!gdb_bh) +		return -EIO; +	n_group_desc = ext4_kvmalloc((gdb_num + 1) * +				     sizeof(struct buffer_head *), +				     GFP_NOFS); +	if (!n_group_desc) { +		err = -ENOMEM; +		ext4_warning(sb, "not enough memory for %lu groups", +			     gdb_num + 1); +		return err; +	} + +	o_group_desc = EXT4_SB(sb)->s_group_desc; +	memcpy(n_group_desc, o_group_desc, +	       EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *)); +	n_group_desc[gdb_num] = gdb_bh; +	EXT4_SB(sb)->s_group_desc = n_group_desc; +	EXT4_SB(sb)->s_gdb_count++; +	ext4_kvfree(o_group_desc); +	BUFFER_TRACE(gdb_bh, "get_write_access"); +	err = ext4_journal_get_write_access(handle, gdb_bh); +	if (unlikely(err)) +		brelse(gdb_bh); +	return err; +} + +/*   * Called when we are adding a new group which has a backup copy of each of   * the GDT blocks (i.e. sparse group) and there are reserved GDT blocks.   * We need to add these reserved backup GDT blocks to the resize inode, so @@ -501,7 +929,7 @@ exit_bh:   * backup GDT blocks are stored in their reserved primary GDT block.   */  static int reserve_backup_gdb(handle_t *handle, struct inode *inode, -			      struct ext4_new_group_data *input) +			      ext4_group_t group)  {  	struct super_block *sb = inode->i_sb;  	int reserved_gdb =le16_to_cpu(EXT4_SB(sb)->s_es->s_reserved_gdt_blocks); @@ -545,7 +973,8 @@ static int reserve_backup_gdb(handle_t *handle, struct inode *inode,  			err = -EIO;  			goto exit_bh;  		} -		if ((gdbackups = verify_reserved_gdb(sb, primary[res])) < 0) { +		gdbackups = verify_reserved_gdb(sb, group, primary[res]); +		if (gdbackups < 0) {  			brelse(primary[res]);  			err = gdbackups;  			goto exit_bh; @@ -555,14 +984,9 @@ static int reserve_backup_gdb(handle_t *handle, struct inode *inode,  	}  	for (i = 0; i < reserved_gdb; i++) { -		if ((err = ext4_journal_get_write_access(handle, primary[i]))) { -			/* -			int j; -			for (j = 0; j < i; j++) -				ext4_journal_release_buffer(handle, primary[j]); -			 */ +		BUFFER_TRACE(primary[i], "get_write_access"); +		if ((err = ext4_journal_get_write_access(handle, primary[i])))  			goto exit_bh; -		}  	}  	if ((err = ext4_reserve_inode_write(handle, inode, &iloc))) @@ -572,7 +996,7 @@ static int reserve_backup_gdb(handle_t *handle, struct inode *inode,  	 * Finally we can add each of the reserved backup GDT blocks from  	 * the new group to its reserved primary GDT block.  	 */ -	blk = input->group * EXT4_BLOCKS_PER_GROUP(sb); +	blk = group * EXT4_BLOCKS_PER_GROUP(sb);  	for (i = 0; i < reserved_gdb; i++) {  		int err2;  		data = (__le32 *)primary[i]->b_data; @@ -614,29 +1038,38 @@ exit_free:   * do not copy the full number of backups at this time.  The resize   * which changed s_groups_count will backup again.   */ -static void update_backups(struct super_block *sb, -			   int blk_off, char *data, int size) +static void update_backups(struct super_block *sb, int blk_off, char *data, +			   int size, int meta_bg)  {  	struct ext4_sb_info *sbi = EXT4_SB(sb); -	const ext4_group_t last = sbi->s_groups_count; +	ext4_group_t last;  	const int bpg = EXT4_BLOCKS_PER_GROUP(sb);  	unsigned three = 1;  	unsigned five = 5;  	unsigned seven = 7; -	ext4_group_t group; +	ext4_group_t group = 0;  	int rest = sb->s_blocksize - size;  	handle_t *handle;  	int err = 0, err2; -	handle = ext4_journal_start_sb(sb, EXT4_MAX_TRANS_DATA); +	handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, EXT4_MAX_TRANS_DATA);  	if (IS_ERR(handle)) {  		group = 1;  		err = PTR_ERR(handle);  		goto exit_err;  	} -	while ((group = ext4_list_backups(sb, &three, &five, &seven)) < last) { +	if (meta_bg == 0) { +		group = ext4_list_backups(sb, &three, &five, &seven); +		last = sbi->s_groups_count; +	} else { +		group = ext4_meta_bg_first_group(sb, group) + 1; +		last = (ext4_group_t)(group + EXT4_DESC_PER_BLOCK(sb) - 2); +	} + +	while (group < sbi->s_groups_count) {  		struct buffer_head *bh; +		ext4_fsblk_t backup_block;  		/* Out of journal space, and can't get more - abort - so sad */  		if (ext4_handle_valid(handle) && @@ -645,13 +1078,21 @@ static void update_backups(struct super_block *sb,  		    (err = ext4_journal_restart(handle, EXT4_MAX_TRANS_DATA)))  			break; -		bh = sb_getblk(sb, group * bpg + blk_off); -		if (!bh) { -			err = -EIO; +		if (meta_bg == 0) +			backup_block = group * bpg + blk_off; +		else +			backup_block = (ext4_group_first_block_no(sb, group) + +					ext4_bg_has_super(sb, group)); + +		bh = sb_getblk(sb, backup_block); +		if (unlikely(!bh)) { +			err = -ENOMEM;  			break;  		} -		ext4_debug("update metadata backup %#04lx\n", -			  (unsigned long)bh->b_blocknr); +		ext4_debug("update metadata backup %llu(+%llu)\n", +			   backup_block, backup_block - +			   ext4_group_first_block_no(sb, group)); +		BUFFER_TRACE(bh, "get_write_access");  		if ((err = ext4_journal_get_write_access(handle, bh)))  			break;  		lock_buffer(bh); @@ -660,8 +1101,17 @@ static void update_backups(struct super_block *sb,  			memset(bh->b_data + size, 0, rest);  		set_buffer_uptodate(bh);  		unlock_buffer(bh); -		ext4_handle_dirty_metadata(handle, NULL, bh); +		err = ext4_handle_dirty_metadata(handle, NULL, bh); +		if (unlikely(err)) +			ext4_std_error(sb, err);  		brelse(bh); + +		if (meta_bg == 0) +			group = ext4_list_backups(sb, &three, &five, &seven); +		else if (group == last) +			break; +		else +			group = last;  	}  	if ((err2 = ext4_journal_stop(handle)) && !err)  		err = err2; @@ -686,6 +1136,424 @@ exit_err:  	}  } +/* + * ext4_add_new_descs() adds @count group descriptor of groups + * starting at @group + * + * @handle: journal handle + * @sb: super block + * @group: the group no. of the first group desc to be added + * @resize_inode: the resize inode + * @count: number of group descriptors to be added + */ +static int ext4_add_new_descs(handle_t *handle, struct super_block *sb, +			      ext4_group_t group, struct inode *resize_inode, +			      ext4_group_t count) +{ +	struct ext4_sb_info *sbi = EXT4_SB(sb); +	struct ext4_super_block *es = sbi->s_es; +	struct buffer_head *gdb_bh; +	int i, gdb_off, gdb_num, err = 0; +	int meta_bg; + +	meta_bg = EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_META_BG); +	for (i = 0; i < count; i++, group++) { +		int reserved_gdb = ext4_bg_has_super(sb, group) ? +			le16_to_cpu(es->s_reserved_gdt_blocks) : 0; + +		gdb_off = group % EXT4_DESC_PER_BLOCK(sb); +		gdb_num = group / EXT4_DESC_PER_BLOCK(sb); + +		/* +		 * We will only either add reserved group blocks to a backup group +		 * or remove reserved blocks for the first group in a new group block. +		 * Doing both would be mean more complex code, and sane people don't +		 * use non-sparse filesystems anymore.  This is already checked above. +		 */ +		if (gdb_off) { +			gdb_bh = sbi->s_group_desc[gdb_num]; +			BUFFER_TRACE(gdb_bh, "get_write_access"); +			err = ext4_journal_get_write_access(handle, gdb_bh); + +			if (!err && reserved_gdb && ext4_bg_num_gdb(sb, group)) +				err = reserve_backup_gdb(handle, resize_inode, group); +		} else if (meta_bg != 0) { +			err = add_new_gdb_meta_bg(sb, handle, group); +		} else { +			err = add_new_gdb(handle, resize_inode, group); +		} +		if (err) +			break; +	} +	return err; +} + +static struct buffer_head *ext4_get_bitmap(struct super_block *sb, __u64 block) +{ +	struct buffer_head *bh = sb_getblk(sb, block); +	if (unlikely(!bh)) +		return NULL; +	if (!bh_uptodate_or_lock(bh)) { +		if (bh_submit_read(bh) < 0) { +			brelse(bh); +			return NULL; +		} +	} + +	return bh; +} + +static int ext4_set_bitmap_checksums(struct super_block *sb, +				     ext4_group_t group, +				     struct ext4_group_desc *gdp, +				     struct ext4_new_group_data *group_data) +{ +	struct buffer_head *bh; + +	if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, +					EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) +		return 0; + +	bh = ext4_get_bitmap(sb, group_data->inode_bitmap); +	if (!bh) +		return -EIO; +	ext4_inode_bitmap_csum_set(sb, group, gdp, bh, +				   EXT4_INODES_PER_GROUP(sb) / 8); +	brelse(bh); + +	bh = ext4_get_bitmap(sb, group_data->block_bitmap); +	if (!bh) +		return -EIO; +	ext4_block_bitmap_csum_set(sb, group, gdp, bh); +	brelse(bh); + +	return 0; +} + +/* + * ext4_setup_new_descs() will set up the group descriptor descriptors of a flex bg + */ +static int ext4_setup_new_descs(handle_t *handle, struct super_block *sb, +				struct ext4_new_flex_group_data *flex_gd) +{ +	struct ext4_new_group_data	*group_data = flex_gd->groups; +	struct ext4_group_desc		*gdp; +	struct ext4_sb_info		*sbi = EXT4_SB(sb); +	struct buffer_head		*gdb_bh; +	ext4_group_t			group; +	__u16				*bg_flags = flex_gd->bg_flags; +	int				i, gdb_off, gdb_num, err = 0; +	 + +	for (i = 0; i < flex_gd->count; i++, group_data++, bg_flags++) { +		group = group_data->group; + +		gdb_off = group % EXT4_DESC_PER_BLOCK(sb); +		gdb_num = group / EXT4_DESC_PER_BLOCK(sb); + +		/* +		 * get_write_access() has been called on gdb_bh by ext4_add_new_desc(). +		 */ +		gdb_bh = sbi->s_group_desc[gdb_num]; +		/* Update group descriptor block for new group */ +		gdp = (struct ext4_group_desc *)(gdb_bh->b_data + +						 gdb_off * EXT4_DESC_SIZE(sb)); + +		memset(gdp, 0, EXT4_DESC_SIZE(sb)); +		ext4_block_bitmap_set(sb, gdp, group_data->block_bitmap); +		ext4_inode_bitmap_set(sb, gdp, group_data->inode_bitmap); +		err = ext4_set_bitmap_checksums(sb, group, gdp, group_data); +		if (err) { +			ext4_std_error(sb, err); +			break; +		} + +		ext4_inode_table_set(sb, gdp, group_data->inode_table); +		ext4_free_group_clusters_set(sb, gdp, +			EXT4_NUM_B2C(sbi, group_data->free_blocks_count)); +		ext4_free_inodes_set(sb, gdp, EXT4_INODES_PER_GROUP(sb)); +		if (ext4_has_group_desc_csum(sb)) +			ext4_itable_unused_set(sb, gdp, +					       EXT4_INODES_PER_GROUP(sb)); +		gdp->bg_flags = cpu_to_le16(*bg_flags); +		ext4_group_desc_csum_set(sb, group, gdp); + +		err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh); +		if (unlikely(err)) { +			ext4_std_error(sb, err); +			break; +		} + +		/* +		 * We can allocate memory for mb_alloc based on the new group +		 * descriptor +		 */ +		err = ext4_mb_add_groupinfo(sb, group, gdp); +		if (err) +			break; +	} +	return err; +} + +/* + * ext4_update_super() updates the super block so that the newly added + * groups can be seen by the filesystem. + * + * @sb: super block + * @flex_gd: new added groups + */ +static void ext4_update_super(struct super_block *sb, +			     struct ext4_new_flex_group_data *flex_gd) +{ +	ext4_fsblk_t blocks_count = 0; +	ext4_fsblk_t free_blocks = 0; +	ext4_fsblk_t reserved_blocks = 0; +	struct ext4_new_group_data *group_data = flex_gd->groups; +	struct ext4_sb_info *sbi = EXT4_SB(sb); +	struct ext4_super_block *es = sbi->s_es; +	int i; + +	BUG_ON(flex_gd->count == 0 || group_data == NULL); +	/* +	 * Make the new blocks and inodes valid next.  We do this before +	 * increasing the group count so that once the group is enabled, +	 * all of its blocks and inodes are already valid. +	 * +	 * We always allocate group-by-group, then block-by-block or +	 * inode-by-inode within a group, so enabling these +	 * blocks/inodes before the group is live won't actually let us +	 * allocate the new space yet. +	 */ +	for (i = 0; i < flex_gd->count; i++) { +		blocks_count += group_data[i].blocks_count; +		free_blocks += group_data[i].free_blocks_count; +	} + +	reserved_blocks = ext4_r_blocks_count(es) * 100; +	reserved_blocks = div64_u64(reserved_blocks, ext4_blocks_count(es)); +	reserved_blocks *= blocks_count; +	do_div(reserved_blocks, 100); + +	ext4_blocks_count_set(es, ext4_blocks_count(es) + blocks_count); +	ext4_free_blocks_count_set(es, ext4_free_blocks_count(es) + free_blocks); +	le32_add_cpu(&es->s_inodes_count, EXT4_INODES_PER_GROUP(sb) * +		     flex_gd->count); +	le32_add_cpu(&es->s_free_inodes_count, EXT4_INODES_PER_GROUP(sb) * +		     flex_gd->count); + +	ext4_debug("free blocks count %llu", ext4_free_blocks_count(es)); +	/* +	 * We need to protect s_groups_count against other CPUs seeing +	 * inconsistent state in the superblock. +	 * +	 * The precise rules we use are: +	 * +	 * * Writers must perform a smp_wmb() after updating all +	 *   dependent data and before modifying the groups count +	 * +	 * * Readers must perform an smp_rmb() after reading the groups +	 *   count and before reading any dependent data. +	 * +	 * NB. These rules can be relaxed when checking the group count +	 * while freeing data, as we can only allocate from a block +	 * group after serialising against the group count, and we can +	 * only then free after serialising in turn against that +	 * allocation. +	 */ +	smp_wmb(); + +	/* Update the global fs size fields */ +	sbi->s_groups_count += flex_gd->count; +	sbi->s_blockfile_groups = min_t(ext4_group_t, sbi->s_groups_count, +			(EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb))); + +	/* Update the reserved block counts only once the new group is +	 * active. */ +	ext4_r_blocks_count_set(es, ext4_r_blocks_count(es) + +				reserved_blocks); + +	/* Update the free space counts */ +	percpu_counter_add(&sbi->s_freeclusters_counter, +			   EXT4_NUM_B2C(sbi, free_blocks)); +	percpu_counter_add(&sbi->s_freeinodes_counter, +			   EXT4_INODES_PER_GROUP(sb) * flex_gd->count); + +	ext4_debug("free blocks count %llu", +		   percpu_counter_read(&sbi->s_freeclusters_counter)); +	if (EXT4_HAS_INCOMPAT_FEATURE(sb, +				      EXT4_FEATURE_INCOMPAT_FLEX_BG) && +	    sbi->s_log_groups_per_flex) { +		ext4_group_t flex_group; +		flex_group = ext4_flex_group(sbi, group_data[0].group); +		atomic64_add(EXT4_NUM_B2C(sbi, free_blocks), +			     &sbi->s_flex_groups[flex_group].free_clusters); +		atomic_add(EXT4_INODES_PER_GROUP(sb) * flex_gd->count, +			   &sbi->s_flex_groups[flex_group].free_inodes); +	} + +	/* +	 * Update the fs overhead information +	 */ +	ext4_calculate_overhead(sb); + +	if (test_opt(sb, DEBUG)) +		printk(KERN_DEBUG "EXT4-fs: added group %u:" +		       "%llu blocks(%llu free %llu reserved)\n", flex_gd->count, +		       blocks_count, free_blocks, reserved_blocks); +} + +/* Add a flex group to an fs. Ensure we handle all possible error conditions + * _before_ we start modifying the filesystem, because we cannot abort the + * transaction and not have it write the data to disk. + */ +static int ext4_flex_group_add(struct super_block *sb, +			       struct inode *resize_inode, +			       struct ext4_new_flex_group_data *flex_gd) +{ +	struct ext4_sb_info *sbi = EXT4_SB(sb); +	struct ext4_super_block *es = sbi->s_es; +	ext4_fsblk_t o_blocks_count; +	ext4_grpblk_t last; +	ext4_group_t group; +	handle_t *handle; +	unsigned reserved_gdb; +	int err = 0, err2 = 0, credit; + +	BUG_ON(!flex_gd->count || !flex_gd->groups || !flex_gd->bg_flags); + +	reserved_gdb = le16_to_cpu(es->s_reserved_gdt_blocks); +	o_blocks_count = ext4_blocks_count(es); +	ext4_get_group_no_and_offset(sb, o_blocks_count, &group, &last); +	BUG_ON(last); + +	err = setup_new_flex_group_blocks(sb, flex_gd); +	if (err) +		goto exit; +	/* +	 * We will always be modifying at least the superblock and  GDT +	 * block.  If we are adding a group past the last current GDT block, +	 * we will also modify the inode and the dindirect block.  If we +	 * are adding a group with superblock/GDT backups  we will also +	 * modify each of the reserved GDT dindirect blocks. +	 */ +	credit = flex_gd->count * 4 + reserved_gdb; +	handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, credit); +	if (IS_ERR(handle)) { +		err = PTR_ERR(handle); +		goto exit; +	} + +	BUFFER_TRACE(sbi->s_sbh, "get_write_access"); +	err = ext4_journal_get_write_access(handle, sbi->s_sbh); +	if (err) +		goto exit_journal; + +	group = flex_gd->groups[0].group; +	BUG_ON(group != EXT4_SB(sb)->s_groups_count); +	err = ext4_add_new_descs(handle, sb, group, +				resize_inode, flex_gd->count); +	if (err) +		goto exit_journal; + +	err = ext4_setup_new_descs(handle, sb, flex_gd); +	if (err) +		goto exit_journal; + +	ext4_update_super(sb, flex_gd); + +	err = ext4_handle_dirty_super(handle, sb); + +exit_journal: +	err2 = ext4_journal_stop(handle); +	if (!err) +		err = err2; + +	if (!err) { +		int gdb_num = group / EXT4_DESC_PER_BLOCK(sb); +		int gdb_num_end = ((group + flex_gd->count - 1) / +				   EXT4_DESC_PER_BLOCK(sb)); +		int meta_bg = EXT4_HAS_INCOMPAT_FEATURE(sb, +				EXT4_FEATURE_INCOMPAT_META_BG); +		sector_t old_gdb = 0; + +		update_backups(sb, sbi->s_sbh->b_blocknr, (char *)es, +			       sizeof(struct ext4_super_block), 0); +		for (; gdb_num <= gdb_num_end; gdb_num++) { +			struct buffer_head *gdb_bh; + +			gdb_bh = sbi->s_group_desc[gdb_num]; +			if (old_gdb == gdb_bh->b_blocknr) +				continue; +			update_backups(sb, gdb_bh->b_blocknr, gdb_bh->b_data, +				       gdb_bh->b_size, meta_bg); +			old_gdb = gdb_bh->b_blocknr; +		} +	} +exit: +	return err; +} + +static int ext4_setup_next_flex_gd(struct super_block *sb, +				    struct ext4_new_flex_group_data *flex_gd, +				    ext4_fsblk_t n_blocks_count, +				    unsigned long flexbg_size) +{ +	struct ext4_super_block *es = EXT4_SB(sb)->s_es; +	struct ext4_new_group_data *group_data = flex_gd->groups; +	ext4_fsblk_t o_blocks_count; +	ext4_group_t n_group; +	ext4_group_t group; +	ext4_group_t last_group; +	ext4_grpblk_t last; +	ext4_grpblk_t blocks_per_group; +	unsigned long i; + +	blocks_per_group = EXT4_BLOCKS_PER_GROUP(sb); + +	o_blocks_count = ext4_blocks_count(es); + +	if (o_blocks_count == n_blocks_count) +		return 0; + +	ext4_get_group_no_and_offset(sb, o_blocks_count, &group, &last); +	BUG_ON(last); +	ext4_get_group_no_and_offset(sb, n_blocks_count - 1, &n_group, &last); + +	last_group = group | (flexbg_size - 1); +	if (last_group > n_group) +		last_group = n_group; + +	flex_gd->count = last_group - group + 1; + +	for (i = 0; i < flex_gd->count; i++) { +		int overhead; + +		group_data[i].group = group + i; +		group_data[i].blocks_count = blocks_per_group; +		overhead = ext4_group_overhead_blocks(sb, group + i); +		group_data[i].free_blocks_count = blocks_per_group - overhead; +		if (ext4_has_group_desc_csum(sb)) { +			flex_gd->bg_flags[i] = EXT4_BG_BLOCK_UNINIT | +					       EXT4_BG_INODE_UNINIT; +			if (!test_opt(sb, INIT_INODE_TABLE)) +				flex_gd->bg_flags[i] |= EXT4_BG_INODE_ZEROED; +		} else +			flex_gd->bg_flags[i] = EXT4_BG_INODE_ZEROED; +	} + +	if (last_group == n_group && ext4_has_group_desc_csum(sb)) +		/* We need to initialize block bitmap of last group. */ +		flex_gd->bg_flags[i - 1] &= ~EXT4_BG_BLOCK_UNINIT; + +	if ((last_group == n_group) && (last != blocks_per_group - 1)) { +		group_data[i - 1].blocks_count = last + 1; +		group_data[i - 1].free_blocks_count -= blocks_per_group- +					last - 1; +	} + +	return 1; +} +  /* Add group descriptor data to an existing or new group descriptor block.   * Ensure we handle all possible error conditions _before_ we start modifying   * the filesystem, because we cannot abort the transaction and not have it @@ -701,18 +1569,16 @@ exit_err:   */  int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)  { +	struct ext4_new_flex_group_data flex_gd;  	struct ext4_sb_info *sbi = EXT4_SB(sb);  	struct ext4_super_block *es = sbi->s_es;  	int reserved_gdb = ext4_bg_has_super(sb, input->group) ?  		le16_to_cpu(es->s_reserved_gdt_blocks) : 0; -	struct buffer_head *primary = NULL; -	struct ext4_group_desc *gdp;  	struct inode *inode = NULL; -	handle_t *handle; -	int gdb_off, gdb_num; -	int err, err2; +	int gdb_off; +	int err; +	__u16 bg_flags = 0; -	gdb_num = input->group / EXT4_DESC_PER_BLOCK(sb);  	gdb_off = input->group % EXT4_DESC_PER_BLOCK(sb);  	if (gdb_off == 0 && !EXT4_HAS_RO_COMPAT_FEATURE(sb, @@ -749,174 +1615,79 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)  	} -	if ((err = verify_group_input(sb, input))) -		goto exit_put; +	err = verify_group_input(sb, input); +	if (err) +		goto out; + +	err = ext4_alloc_flex_bg_array(sb, input->group + 1); +	if (err) +		goto out; -	if ((err = setup_new_group_blocks(sb, input))) -		goto exit_put; +	err = ext4_mb_alloc_groupinfo(sb, input->group + 1); +	if (err) +		goto out; -	/* -	 * We will always be modifying at least the superblock and a GDT -	 * block.  If we are adding a group past the last current GDT block, -	 * we will also modify the inode and the dindirect block.  If we -	 * are adding a group with superblock/GDT backups  we will also -	 * modify each of the reserved GDT dindirect blocks. +	flex_gd.count = 1; +	flex_gd.groups = input; +	flex_gd.bg_flags = &bg_flags; +	err = ext4_flex_group_add(sb, inode, &flex_gd); +out: +	iput(inode); +	return err; +} /* ext4_group_add */ + +/* + * extend a group without checking assuming that checking has been done. + */ +static int ext4_group_extend_no_check(struct super_block *sb, +				      ext4_fsblk_t o_blocks_count, ext4_grpblk_t add) +{ +	struct ext4_super_block *es = EXT4_SB(sb)->s_es; +	handle_t *handle; +	int err = 0, err2; + +	/* We will update the superblock, one block bitmap, and +	 * one group descriptor via ext4_group_add_blocks().  	 */ -	handle = ext4_journal_start_sb(sb, -				       ext4_bg_has_super(sb, input->group) ? -				       3 + reserved_gdb : 4); +	handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, 3);  	if (IS_ERR(handle)) {  		err = PTR_ERR(handle); -		goto exit_put; +		ext4_warning(sb, "error %d on journal start", err); +		return err;  	} -	mutex_lock(&sbi->s_resize_lock); -	if (input->group != sbi->s_groups_count) { -		ext4_warning(sb, "multiple resizers run on filesystem!"); -		err = -EBUSY; -		goto exit_journal; +	BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get_write_access"); +	err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh); +	if (err) { +		ext4_warning(sb, "error %d on journal write access", err); +		goto errout;  	} -	if ((err = ext4_journal_get_write_access(handle, sbi->s_sbh))) -		goto exit_journal; - -        /* -         * We will only either add reserved group blocks to a backup group -         * or remove reserved blocks for the first group in a new group block. -         * Doing both would be mean more complex code, and sane people don't -         * use non-sparse filesystems anymore.  This is already checked above. -         */ -	if (gdb_off) { -		primary = sbi->s_group_desc[gdb_num]; -		if ((err = ext4_journal_get_write_access(handle, primary))) -			goto exit_journal; - -		if (reserved_gdb && ext4_bg_num_gdb(sb, input->group) && -		    (err = reserve_backup_gdb(handle, inode, input))) -			goto exit_journal; -	} else if ((err = add_new_gdb(handle, inode, input, &primary))) -		goto exit_journal; - -        /* -         * OK, now we've set up the new group.  Time to make it active. -         * -         * We do not lock all allocations via s_resize_lock -         * so we have to be safe wrt. concurrent accesses the group -         * data.  So we need to be careful to set all of the relevant -         * group descriptor data etc. *before* we enable the group. -         * -         * The key field here is sbi->s_groups_count: as long as -         * that retains its old value, nobody is going to access the new -         * group. -         * -         * So first we update all the descriptor metadata for the new -         * group; then we update the total disk blocks count; then we -         * update the groups count to enable the group; then finally we -         * update the free space counts so that the system can start -         * using the new disk blocks. -         */ - -	/* Update group descriptor block for new group */ -	gdp = (struct ext4_group_desc *)((char *)primary->b_data + -					 gdb_off * EXT4_DESC_SIZE(sb)); - -	memset(gdp, 0, EXT4_DESC_SIZE(sb)); -	ext4_block_bitmap_set(sb, gdp, input->block_bitmap); /* LV FIXME */ -	ext4_inode_bitmap_set(sb, gdp, input->inode_bitmap); /* LV FIXME */ -	ext4_inode_table_set(sb, gdp, input->inode_table); /* LV FIXME */ -	ext4_free_blks_set(sb, gdp, input->free_blocks_count); -	ext4_free_inodes_set(sb, gdp, EXT4_INODES_PER_GROUP(sb)); -	gdp->bg_flags = cpu_to_le16(EXT4_BG_INODE_ZEROED); -	gdp->bg_checksum = ext4_group_desc_csum(sbi, input->group, gdp); - -	/* -	 * We can allocate memory for mb_alloc based on the new group -	 * descriptor -	 */ -	err = ext4_mb_add_groupinfo(sb, input->group, gdp); +	ext4_blocks_count_set(es, o_blocks_count + add); +	ext4_free_blocks_count_set(es, ext4_free_blocks_count(es) + add); +	ext4_debug("freeing blocks %llu through %llu\n", o_blocks_count, +		   o_blocks_count + add); +	/* We add the blocks to the bitmap and set the group need init bit */ +	err = ext4_group_add_blocks(handle, sb, o_blocks_count, add);  	if (err) -		goto exit_journal; - -	/* -	 * Make the new blocks and inodes valid next.  We do this before -	 * increasing the group count so that once the group is enabled, -	 * all of its blocks and inodes are already valid. -	 * -	 * We always allocate group-by-group, then block-by-block or -	 * inode-by-inode within a group, so enabling these -	 * blocks/inodes before the group is live won't actually let us -	 * allocate the new space yet. -	 */ -	ext4_blocks_count_set(es, ext4_blocks_count(es) + -		input->blocks_count); -	le32_add_cpu(&es->s_inodes_count, EXT4_INODES_PER_GROUP(sb)); - -	/* -	 * We need to protect s_groups_count against other CPUs seeing -	 * inconsistent state in the superblock. -	 * -	 * The precise rules we use are: -	 * -	 * * Writers of s_groups_count *must* hold s_resize_lock -	 * AND -	 * * Writers must perform a smp_wmb() after updating all dependent -	 *   data and before modifying the groups count -	 * -	 * * Readers must hold s_resize_lock over the access -	 * OR -	 * * Readers must perform an smp_rmb() after reading the groups count -	 *   and before reading any dependent data. -	 * -	 * NB. These rules can be relaxed when checking the group count -	 * while freeing data, as we can only allocate from a block -	 * group after serialising against the group count, and we can -	 * only then free after serialising in turn against that -	 * allocation. -	 */ -	smp_wmb(); - -	/* Update the global fs size fields */ -	sbi->s_groups_count++; - -	ext4_handle_dirty_metadata(handle, NULL, primary); - -	/* Update the reserved block counts only once the new group is -	 * active. */ -	ext4_r_blocks_count_set(es, ext4_r_blocks_count(es) + -		input->reserved_blocks); - -	/* Update the free space counts */ -	percpu_counter_add(&sbi->s_freeblocks_counter, -			   input->free_blocks_count); -	percpu_counter_add(&sbi->s_freeinodes_counter, -			   EXT4_INODES_PER_GROUP(sb)); - -	if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG) && -	    sbi->s_log_groups_per_flex) { -		ext4_group_t flex_group; -		flex_group = ext4_flex_group(sbi, input->group); -		atomic_add(input->free_blocks_count, -			   &sbi->s_flex_groups[flex_group].free_blocks); -		atomic_add(EXT4_INODES_PER_GROUP(sb), -			   &sbi->s_flex_groups[flex_group].free_inodes); -	} - +		goto errout;  	ext4_handle_dirty_super(handle, sb); - -exit_journal: -	mutex_unlock(&sbi->s_resize_lock); -	if ((err2 = ext4_journal_stop(handle)) && !err) +	ext4_debug("freed blocks %llu through %llu\n", o_blocks_count, +		   o_blocks_count + add); +errout: +	err2 = ext4_journal_stop(handle); +	if (err2 && !err)  		err = err2; +  	if (!err) { -		update_backups(sb, sbi->s_sbh->b_blocknr, (char *)es, -			       sizeof(struct ext4_super_block)); -		update_backups(sb, primary->b_blocknr, primary->b_data, -			       primary->b_size); +		if (test_opt(sb, DEBUG)) +			printk(KERN_DEBUG "EXT4-fs: extended group to %llu " +			       "blocks\n", ext4_blocks_count(es)); +		update_backups(sb, EXT4_SB(sb)->s_sbh->b_blocknr, +			       (char *)es, sizeof(struct ext4_super_block), 0);  	} -exit_put: -	iput(inode);  	return err; -} /* ext4_group_add */ +}  /*   * Extend the filesystem to the new number of blocks specified.  This entry @@ -935,26 +1706,23 @@ int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es,  	ext4_grpblk_t last;  	ext4_grpblk_t add;  	struct buffer_head *bh; -	handle_t *handle;  	int err;  	ext4_group_t group; -	/* We don't need to worry about locking wrt other resizers just -	 * yet: we're going to revalidate es->s_blocks_count after -	 * taking the s_resize_lock below. */  	o_blocks_count = ext4_blocks_count(es);  	if (test_opt(sb, DEBUG)) -		printk(KERN_DEBUG "EXT4-fs: extending last group from %llu uto %llu blocks\n", -		       o_blocks_count, n_blocks_count); +		ext4_msg(sb, KERN_DEBUG, +			 "extending last group from %llu to %llu blocks", +			 o_blocks_count, n_blocks_count);  	if (n_blocks_count == 0 || n_blocks_count == o_blocks_count)  		return 0;  	if (n_blocks_count > (sector_t)(~0ULL) >> (sb->s_blocksize_bits - 9)) { -		printk(KERN_ERR "EXT4-fs: filesystem on %s:" -			" too large to resize to %llu blocks safely\n", -			sb->s_id, n_blocks_count); +		ext4_msg(sb, KERN_ERR, +			 "filesystem too large to resize to %llu blocks safely", +			 n_blocks_count);  		if (sizeof(sector_t) < 8)  			ext4_warning(sb, "CONFIG_LBDAF not enabled");  		return -EINVAL; @@ -962,7 +1730,7 @@ int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es,  	if (n_blocks_count < o_blocks_count) {  		ext4_warning(sb, "can't shrink FS - resize aborted"); -		return -EBUSY; +		return -EINVAL;  	}  	/* Handle the remaining blocks in the last group only. */ @@ -995,49 +1763,258 @@ int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es,  	}  	brelse(bh); -	/* We will update the superblock, one block bitmap, and -	 * one group descriptor via ext4_free_blocks(). -	 */ -	handle = ext4_journal_start_sb(sb, 3); -	if (IS_ERR(handle)) { -		err = PTR_ERR(handle); -		ext4_warning(sb, "error %d on journal start", err); -		goto exit_put; +	err = ext4_group_extend_no_check(sb, o_blocks_count, add); +	return err; +} /* ext4_group_extend */ + + +static int num_desc_blocks(struct super_block *sb, ext4_group_t groups) +{ +	return (groups + EXT4_DESC_PER_BLOCK(sb) - 1) / EXT4_DESC_PER_BLOCK(sb); +} + +/* + * Release the resize inode and drop the resize_inode feature if there + * are no more reserved gdt blocks, and then convert the file system + * to enable meta_bg + */ +static int ext4_convert_meta_bg(struct super_block *sb, struct inode *inode) +{ +	handle_t *handle; +	struct ext4_sb_info *sbi = EXT4_SB(sb); +	struct ext4_super_block *es = sbi->s_es; +	struct ext4_inode_info *ei = EXT4_I(inode); +	ext4_fsblk_t nr; +	int i, ret, err = 0; +	int credits = 1; + +	ext4_msg(sb, KERN_INFO, "Converting file system to meta_bg"); +	if (inode) { +		if (es->s_reserved_gdt_blocks) { +			ext4_error(sb, "Unexpected non-zero " +				   "s_reserved_gdt_blocks"); +			return -EPERM; +		} + +		/* Do a quick sanity check of the resize inode */ +		if (inode->i_blocks != 1 << (inode->i_blkbits - 9)) +			goto invalid_resize_inode; +		for (i = 0; i < EXT4_N_BLOCKS; i++) { +			if (i == EXT4_DIND_BLOCK) { +				if (ei->i_data[i]) +					continue; +				else +					goto invalid_resize_inode; +			} +			if (ei->i_data[i]) +				goto invalid_resize_inode; +		} +		credits += 3;	/* block bitmap, bg descriptor, resize inode */  	} -	mutex_lock(&EXT4_SB(sb)->s_resize_lock); -	if (o_blocks_count != ext4_blocks_count(es)) { -		ext4_warning(sb, "multiple resizers run on filesystem!"); -		mutex_unlock(&EXT4_SB(sb)->s_resize_lock); -		ext4_journal_stop(handle); -		err = -EBUSY; -		goto exit_put; +	handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, credits); +	if (IS_ERR(handle)) +		return PTR_ERR(handle); + +	BUFFER_TRACE(sbi->s_sbh, "get_write_access"); +	err = ext4_journal_get_write_access(handle, sbi->s_sbh); +	if (err) +		goto errout; + +	EXT4_CLEAR_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_RESIZE_INODE); +	EXT4_SET_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_META_BG); +	sbi->s_es->s_first_meta_bg = +		cpu_to_le32(num_desc_blocks(sb, sbi->s_groups_count)); + +	err = ext4_handle_dirty_super(handle, sb); +	if (err) { +		ext4_std_error(sb, err); +		goto errout;  	} -	if ((err = ext4_journal_get_write_access(handle, -						 EXT4_SB(sb)->s_sbh))) { -		ext4_warning(sb, "error %d on journal write access", err); -		mutex_unlock(&EXT4_SB(sb)->s_resize_lock); -		ext4_journal_stop(handle); -		goto exit_put; +	if (inode) { +		nr = le32_to_cpu(ei->i_data[EXT4_DIND_BLOCK]); +		ext4_free_blocks(handle, inode, NULL, nr, 1, +				 EXT4_FREE_BLOCKS_METADATA | +				 EXT4_FREE_BLOCKS_FORGET); +		ei->i_data[EXT4_DIND_BLOCK] = 0; +		inode->i_blocks = 0; + +		err = ext4_mark_inode_dirty(handle, inode); +		if (err) +			ext4_std_error(sb, err);  	} -	ext4_blocks_count_set(es, o_blocks_count + add); -	mutex_unlock(&EXT4_SB(sb)->s_resize_lock); -	ext4_debug("freeing blocks %llu through %llu\n", o_blocks_count, -		   o_blocks_count + add); -	/* We add the blocks to the bitmap and set the group need init bit */ -	ext4_add_groupblocks(handle, sb, o_blocks_count, add); -	ext4_handle_dirty_super(handle, sb); -	ext4_debug("freed blocks %llu through %llu\n", o_blocks_count, -		   o_blocks_count + add); -	if ((err = ext4_journal_stop(handle))) -		goto exit_put; -	if (test_opt(sb, DEBUG)) -		printk(KERN_DEBUG "EXT4-fs: extended group to %llu blocks\n", -		       ext4_blocks_count(es)); -	update_backups(sb, EXT4_SB(sb)->s_sbh->b_blocknr, (char *)es, -		       sizeof(struct ext4_super_block)); -exit_put: +errout: +	ret = ext4_journal_stop(handle); +	if (!err) +		err = ret; +	return ret; + +invalid_resize_inode: +	ext4_error(sb, "corrupted/inconsistent resize inode"); +	return -EINVAL; +} + +/* + * ext4_resize_fs() resizes a fs to new size specified by @n_blocks_count + * + * @sb: super block of the fs to be resized + * @n_blocks_count: the number of blocks resides in the resized fs + */ +int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count) +{ +	struct ext4_new_flex_group_data *flex_gd = NULL; +	struct ext4_sb_info *sbi = EXT4_SB(sb); +	struct ext4_super_block *es = sbi->s_es; +	struct buffer_head *bh; +	struct inode *resize_inode = NULL; +	ext4_grpblk_t add, offset; +	unsigned long n_desc_blocks; +	unsigned long o_desc_blocks; +	ext4_group_t o_group; +	ext4_group_t n_group; +	ext4_fsblk_t o_blocks_count; +	ext4_fsblk_t n_blocks_count_retry = 0; +	unsigned long last_update_time = 0; +	int err = 0, flexbg_size = 1 << sbi->s_log_groups_per_flex; +	int meta_bg; + +	/* See if the device is actually as big as what was requested */ +	bh = sb_bread(sb, n_blocks_count - 1); +	if (!bh) { +		ext4_warning(sb, "can't read last block, resize aborted"); +		return -ENOSPC; +	} +	brelse(bh); + +retry: +	o_blocks_count = ext4_blocks_count(es); + +	ext4_msg(sb, KERN_INFO, "resizing filesystem from %llu " +		 "to %llu blocks", o_blocks_count, n_blocks_count); + +	if (n_blocks_count < o_blocks_count) { +		/* On-line shrinking not supported */ +		ext4_warning(sb, "can't shrink FS - resize aborted"); +		return -EINVAL; +	} + +	if (n_blocks_count == o_blocks_count) +		/* Nothing need to do */ +		return 0; + +	n_group = ext4_get_group_number(sb, n_blocks_count - 1); +	if (n_group > (0xFFFFFFFFUL / EXT4_INODES_PER_GROUP(sb))) { +		ext4_warning(sb, "resize would cause inodes_count overflow"); +		return -EINVAL; +	} +	ext4_get_group_no_and_offset(sb, o_blocks_count - 1, &o_group, &offset); + +	n_desc_blocks = num_desc_blocks(sb, n_group + 1); +	o_desc_blocks = num_desc_blocks(sb, sbi->s_groups_count); + +	meta_bg = EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_META_BG); + +	if (EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_RESIZE_INODE)) { +		if (meta_bg) { +			ext4_error(sb, "resize_inode and meta_bg enabled " +				   "simultaneously"); +			return -EINVAL; +		} +		if (n_desc_blocks > o_desc_blocks + +		    le16_to_cpu(es->s_reserved_gdt_blocks)) { +			n_blocks_count_retry = n_blocks_count; +			n_desc_blocks = o_desc_blocks + +				le16_to_cpu(es->s_reserved_gdt_blocks); +			n_group = n_desc_blocks * EXT4_DESC_PER_BLOCK(sb); +			n_blocks_count = n_group * EXT4_BLOCKS_PER_GROUP(sb); +			n_group--; /* set to last group number */ +		} + +		if (!resize_inode) +			resize_inode = ext4_iget(sb, EXT4_RESIZE_INO); +		if (IS_ERR(resize_inode)) { +			ext4_warning(sb, "Error opening resize inode"); +			return PTR_ERR(resize_inode); +		} +	} + +	if ((!resize_inode && !meta_bg) || n_blocks_count == o_blocks_count) { +		err = ext4_convert_meta_bg(sb, resize_inode); +		if (err) +			goto out; +		if (resize_inode) { +			iput(resize_inode); +			resize_inode = NULL; +		} +		if (n_blocks_count_retry) { +			n_blocks_count = n_blocks_count_retry; +			n_blocks_count_retry = 0; +			goto retry; +		} +	} + +	/* extend the last group */ +	if (n_group == o_group) +		add = n_blocks_count - o_blocks_count; +	else +		add = EXT4_BLOCKS_PER_GROUP(sb) - (offset + 1); +	if (add > 0) { +		err = ext4_group_extend_no_check(sb, o_blocks_count, add); +		if (err) +			goto out; +	} + +	if (ext4_blocks_count(es) == n_blocks_count) +		goto out; + +	err = ext4_alloc_flex_bg_array(sb, n_group + 1); +	if (err) +		return err; + +	err = ext4_mb_alloc_groupinfo(sb, n_group + 1); +	if (err) +		goto out; + +	flex_gd = alloc_flex_gd(flexbg_size); +	if (flex_gd == NULL) { +		err = -ENOMEM; +		goto out; +	} + +	/* Add flex groups. Note that a regular group is a +	 * flex group with 1 group. +	 */ +	while (ext4_setup_next_flex_gd(sb, flex_gd, n_blocks_count, +					      flexbg_size)) { +		if (jiffies - last_update_time > HZ * 10) { +			if (last_update_time) +				ext4_msg(sb, KERN_INFO, +					 "resized to %llu blocks", +					 ext4_blocks_count(es)); +			last_update_time = jiffies; +		} +		if (ext4_alloc_group_tables(sb, flex_gd, flexbg_size) != 0) +			break; +		err = ext4_flex_group_add(sb, resize_inode, flex_gd); +		if (unlikely(err)) +			break; +	} + +	if (!err && n_blocks_count_retry) { +		n_blocks_count = n_blocks_count_retry; +		n_blocks_count_retry = 0; +		free_flex_gd(flex_gd); +		flex_gd = NULL; +		goto retry; +	} + +out: +	if (flex_gd) +		free_flex_gd(flex_gd); +	if (resize_inode != NULL) +		iput(resize_inode); +	ext4_msg(sb, KERN_INFO, "resized filesystem to %llu", n_blocks_count);  	return err; -} /* ext4_group_extend */ +}  | 
