diff options
Diffstat (limited to 'fs/f2fs/segment.h')
| -rw-r--r-- | fs/f2fs/segment.h | 190 | 
1 files changed, 130 insertions, 60 deletions
diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h index bdd10eab8c4..7091204680f 100644 --- a/fs/f2fs/segment.h +++ b/fs/f2fs/segment.h @@ -14,17 +14,14 @@  #define NULL_SEGNO			((unsigned int)(~0))  #define NULL_SECNO			((unsigned int)(~0)) +#define DEF_RECLAIM_PREFREE_SEGMENTS	5	/* 5% over total segments */ +  /* L: Logical segment # in volume, R: Relative segment # in main area */  #define GET_L2R_SEGNO(free_i, segno)	(segno - free_i->start_segno)  #define GET_R2L_SEGNO(free_i, segno)	(segno + free_i->start_segno) -#define IS_DATASEG(t)							\ -	((t == CURSEG_HOT_DATA) || (t == CURSEG_COLD_DATA) ||		\ -	(t == CURSEG_WARM_DATA)) - -#define IS_NODESEG(t)							\ -	((t == CURSEG_HOT_NODE) || (t == CURSEG_COLD_NODE) ||		\ -	(t == CURSEG_WARM_NODE)) +#define IS_DATASEG(t)	(t <= CURSEG_COLD_DATA) +#define IS_NODESEG(t)	(t >= CURSEG_HOT_NODE)  #define IS_CURSEG(sbi, seg)						\  	((seg == CURSEG_I(sbi, CURSEG_HOT_DATA)->segno) ||	\ @@ -60,6 +57,9 @@  	((blk_addr) - SM_I(sbi)->seg0_blkaddr)  #define GET_SEGNO_FROM_SEG0(sbi, blk_addr)				\  	(GET_SEGOFF_FROM_SEG0(sbi, blk_addr) >> sbi->log_blocks_per_seg) +#define GET_BLKOFF_FROM_SEG0(sbi, blk_addr)				\ +	(GET_SEGOFF_FROM_SEG0(sbi, blk_addr) & (sbi->blocks_per_seg - 1)) +  #define GET_SEGNO(sbi, blk_addr)					\  	(((blk_addr == NULL_ADDR) || (blk_addr == NEW_ADDR)) ?		\  	NULL_SEGNO : GET_L2R_SEGNO(FREE_I(sbi),			\ @@ -81,22 +81,19 @@  	(segno / SIT_ENTRY_PER_BLOCK)  #define	START_SEGNO(sit_i, segno)		\  	(SIT_BLOCK_OFFSET(sit_i, segno) * SIT_ENTRY_PER_BLOCK) +#define SIT_BLK_CNT(sbi)			\ +	((TOTAL_SEGS(sbi) + SIT_ENTRY_PER_BLOCK - 1) / SIT_ENTRY_PER_BLOCK)  #define f2fs_bitmap_size(nr)			\  	(BITS_TO_LONGS(nr) * sizeof(unsigned long))  #define TOTAL_SEGS(sbi)	(SM_I(sbi)->main_segments)  #define TOTAL_SECS(sbi)	(sbi->total_sections)  #define SECTOR_FROM_BLOCK(sbi, blk_addr)				\ -	(blk_addr << ((sbi)->log_blocksize - F2FS_LOG_SECTOR_SIZE)) +	(((sector_t)blk_addr) << (sbi)->log_sectors_per_block)  #define SECTOR_TO_BLOCK(sbi, sectors)					\ -	(sectors >> ((sbi)->log_blocksize - F2FS_LOG_SECTOR_SIZE)) - -/* during checkpoint, bio_private is used to synchronize the last bio */ -struct bio_private { -	struct f2fs_sb_info *sbi; -	bool is_sync; -	void *wait; -}; +	(sectors >> (sbi)->log_sectors_per_block) +#define MAX_BIO_BLOCKS(max_hw_blocks)					\ +	(min((int)max_hw_blocks, BIO_MAX_PAGES))  /*   * indicate a block allocation direction: RIGHT and LEFT. @@ -383,26 +380,12 @@ static inline void get_sit_bitmap(struct f2fs_sb_info *sbi,  static inline block_t written_block_count(struct f2fs_sb_info *sbi)  { -	struct sit_info *sit_i = SIT_I(sbi); -	block_t vblocks; - -	mutex_lock(&sit_i->sentry_lock); -	vblocks = sit_i->written_valid_blocks; -	mutex_unlock(&sit_i->sentry_lock); - -	return vblocks; +	return SIT_I(sbi)->written_valid_blocks;  }  static inline unsigned int free_segments(struct f2fs_sb_info *sbi)  { -	struct free_segmap_info *free_i = FREE_I(sbi); -	unsigned int free_segs; - -	read_lock(&free_i->segmap_lock); -	free_segs = free_i->free_segments; -	read_unlock(&free_i->segmap_lock); - -	return free_segs; +	return FREE_I(sbi)->free_segments;  }  static inline int reserved_segments(struct f2fs_sb_info *sbi) @@ -412,14 +395,7 @@ static inline int reserved_segments(struct f2fs_sb_info *sbi)  static inline unsigned int free_sections(struct f2fs_sb_info *sbi)  { -	struct free_segmap_info *free_i = FREE_I(sbi); -	unsigned int free_secs; - -	read_lock(&free_i->segmap_lock); -	free_secs = free_i->free_sections; -	read_unlock(&free_i->segmap_lock); - -	return free_secs; +	return FREE_I(sbi)->free_sections;  }  static inline unsigned int prefree_segments(struct f2fs_sb_info *sbi) @@ -454,8 +430,8 @@ static inline int reserved_sections(struct f2fs_sb_info *sbi)  static inline bool need_SSR(struct f2fs_sb_info *sbi)  { -	return ((prefree_segments(sbi) / sbi->segs_per_sec) -			+ free_sections(sbi) < overprovision_sections(sbi)); +	return (prefree_segments(sbi) / sbi->segs_per_sec) +			+ free_sections(sbi) < overprovision_sections(sbi);  }  static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi, int freed) @@ -463,33 +439,71 @@ static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi, int freed)  	int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES);  	int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS); -	if (sbi->por_doing) +	if (unlikely(sbi->por_doing))  		return false; -	return ((free_sections(sbi) + freed) <= (node_secs + 2 * dent_secs + -						reserved_sections(sbi))); +	return (free_sections(sbi) + freed) <= (node_secs + 2 * dent_secs + +						reserved_sections(sbi)); +} + +static inline bool excess_prefree_segs(struct f2fs_sb_info *sbi) +{ +	return prefree_segments(sbi) > SM_I(sbi)->rec_prefree_segments;  }  static inline int utilization(struct f2fs_sb_info *sbi)  { -	return div_u64((u64)valid_user_blocks(sbi) * 100, sbi->user_block_count); +	return div_u64((u64)valid_user_blocks(sbi) * 100, +					sbi->user_block_count);  }  /*   * Sometimes f2fs may be better to drop out-of-place update policy. - * So, if fs utilization is over MIN_IPU_UTIL, then f2fs tries to write - * data in the original place likewise other traditional file systems. - * But, currently set 100 in percentage, which means it is disabled. - * See below need_inplace_update(). + * And, users can control the policy through sysfs entries. + * There are five policies with triggering conditions as follows. + * F2FS_IPU_FORCE - all the time, + * F2FS_IPU_SSR - if SSR mode is activated, + * F2FS_IPU_UTIL - if FS utilization is over threashold, + * F2FS_IPU_SSR_UTIL - if SSR mode is activated and FS utilization is over + *                     threashold, + * F2FS_IPUT_DISABLE - disable IPU. (=default option)   */ -#define MIN_IPU_UTIL		100 +#define DEF_MIN_IPU_UTIL	70 + +enum { +	F2FS_IPU_FORCE, +	F2FS_IPU_SSR, +	F2FS_IPU_UTIL, +	F2FS_IPU_SSR_UTIL, +	F2FS_IPU_DISABLE, +}; +  static inline bool need_inplace_update(struct inode *inode)  {  	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); + +	/* IPU can be done only for the user data */  	if (S_ISDIR(inode->i_mode))  		return false; -	if (need_SSR(sbi) && utilization(sbi) > MIN_IPU_UTIL) + +	switch (SM_I(sbi)->ipu_policy) { +	case F2FS_IPU_FORCE:  		return true; +	case F2FS_IPU_SSR: +		if (need_SSR(sbi)) +			return true; +		break; +	case F2FS_IPU_UTIL: +		if (utilization(sbi) > SM_I(sbi)->min_ipu_util) +			return true; +		break; +	case F2FS_IPU_SSR_UTIL: +		if (need_SSR(sbi) && utilization(sbi) > SM_I(sbi)->min_ipu_util) +			return true; +		break; +	case F2FS_IPU_DISABLE: +		break; +	}  	return false;  } @@ -513,16 +527,13 @@ static inline unsigned short curseg_blkoff(struct f2fs_sb_info *sbi, int type)  	return curseg->next_blkoff;  } +#ifdef CONFIG_F2FS_CHECK_FS  static inline void check_seg_range(struct f2fs_sb_info *sbi, unsigned int segno)  {  	unsigned int end_segno = SM_I(sbi)->segment_count - 1;  	BUG_ON(segno > end_segno);  } -/* - * This function is used for only debugging. - * NOTE: In future, we have to remove this function. - */  static inline void verify_block_addr(struct f2fs_sb_info *sbi, block_t blk_addr)  {  	struct f2fs_sm_info *sm_info = SM_I(sbi); @@ -541,8 +552,9 @@ static inline void check_block_count(struct f2fs_sb_info *sbi,  {  	struct f2fs_sm_info *sm_info = SM_I(sbi);  	unsigned int end_segno = sm_info->segment_count - 1; +	bool is_valid  = test_bit_le(0, raw_sit->valid_map) ? true : false;  	int valid_blocks = 0; -	int i; +	int cur_pos = 0, next_pos;  	/* check segment usage */  	BUG_ON(GET_SIT_VBLOCKS(raw_sit) > sbi->blocks_per_seg); @@ -551,11 +563,26 @@ static inline void check_block_count(struct f2fs_sb_info *sbi,  	BUG_ON(segno > end_segno);  	/* check bitmap with valid block count */ -	for (i = 0; i < sbi->blocks_per_seg; i++) -		if (f2fs_test_bit(i, raw_sit->valid_map)) -			valid_blocks++; +	do { +		if (is_valid) { +			next_pos = find_next_zero_bit_le(&raw_sit->valid_map, +					sbi->blocks_per_seg, +					cur_pos); +			valid_blocks += next_pos - cur_pos; +		} else +			next_pos = find_next_bit_le(&raw_sit->valid_map, +					sbi->blocks_per_seg, +					cur_pos); +		cur_pos = next_pos; +		is_valid = !is_valid; +	} while (cur_pos < sbi->blocks_per_seg);  	BUG_ON(GET_SIT_VBLOCKS(raw_sit) != valid_blocks);  } +#else +#define check_seg_range(sbi, segno) +#define verify_block_addr(sbi, blk_addr) +#define check_block_count(sbi, segno, raw_sit) +#endif  static inline pgoff_t current_sit_addr(struct f2fs_sb_info *sbi,  						unsigned int start) @@ -637,3 +664,46 @@ static inline unsigned int max_hw_blocks(struct f2fs_sb_info *sbi)  	struct request_queue *q = bdev_get_queue(bdev);  	return SECTOR_TO_BLOCK(sbi, queue_max_sectors(q));  } + +/* + * It is very important to gather dirty pages and write at once, so that we can + * submit a big bio without interfering other data writes. + * By default, 512 pages for directory data, + * 512 pages (2MB) * 3 for three types of nodes, and + * max_bio_blocks for meta are set. + */ +static inline int nr_pages_to_skip(struct f2fs_sb_info *sbi, int type) +{ +	if (type == DATA) +		return sbi->blocks_per_seg; +	else if (type == NODE) +		return 3 * sbi->blocks_per_seg; +	else if (type == META) +		return MAX_BIO_BLOCKS(max_hw_blocks(sbi)); +	else +		return 0; +} + +/* + * When writing pages, it'd better align nr_to_write for segment size. + */ +static inline long nr_pages_to_write(struct f2fs_sb_info *sbi, int type, +					struct writeback_control *wbc) +{ +	long nr_to_write, desired; + +	if (wbc->sync_mode != WB_SYNC_NONE) +		return 0; + +	nr_to_write = wbc->nr_to_write; + +	if (type == DATA) +		desired = 4096; +	else if (type == NODE) +		desired = 3 * max_hw_blocks(sbi); +	else +		desired = MAX_BIO_BLOCKS(max_hw_blocks(sbi)); + +	wbc->nr_to_write = desired; +	return desired - nr_to_write; +}  | 
