diff options
Diffstat (limited to 'fs/gfs2/glops.c')
| -rw-r--r-- | fs/gfs2/glops.c | 373 | 
1 files changed, 265 insertions, 108 deletions
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c index 0d149dcc04e..2ffc67dce87 100644 --- a/fs/gfs2/glops.c +++ b/fs/gfs2/glops.c @@ -26,53 +26,107 @@  #include "rgrp.h"  #include "util.h"  #include "trans.h" +#include "dir.h" + +static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh) +{ +	fs_err(gl->gl_sbd, "AIL buffer %p: blocknr %llu state 0x%08lx mapping %p page state 0x%lx\n", +	       bh, (unsigned long long)bh->b_blocknr, bh->b_state, +	       bh->b_page->mapping, bh->b_page->flags); +	fs_err(gl->gl_sbd, "AIL glock %u:%llu mapping %p\n", +	       gl->gl_name.ln_type, gl->gl_name.ln_number, +	       gfs2_glock2aspace(gl)); +	gfs2_lm_withdraw(gl->gl_sbd, "AIL error\n"); +}  /** - * ail_empty_gl - remove all buffers for a given lock from the AIL + * __gfs2_ail_flush - remove all buffers for a given lock from the AIL   * @gl: the glock + * @fsync: set when called from fsync (not all buffers will be clean)   *   * None of the buffers should be dirty, locked, or pinned.   */ -static void gfs2_ail_empty_gl(struct gfs2_glock *gl) +static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync, +			     unsigned int nr_revokes)  {  	struct gfs2_sbd *sdp = gl->gl_sbd;  	struct list_head *head = &gl->gl_ail_list; -	struct gfs2_bufdata *bd; +	struct gfs2_bufdata *bd, *tmp;  	struct buffer_head *bh; +	const unsigned long b_state = (1UL << BH_Dirty)|(1UL << BH_Pinned)|(1UL << BH_Lock); + +	gfs2_log_lock(sdp); +	spin_lock(&sdp->sd_ail_lock); +	list_for_each_entry_safe_reverse(bd, tmp, head, bd_ail_gl_list) { +		if (nr_revokes == 0) +			break; +		bh = bd->bd_bh; +		if (bh->b_state & b_state) { +			if (fsync) +				continue; +			gfs2_ail_error(gl, bh); +		} +		gfs2_trans_add_revoke(sdp, bd); +		nr_revokes--; +	} +	GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count)); +	spin_unlock(&sdp->sd_ail_lock); +	gfs2_log_unlock(sdp); +} + + +static void gfs2_ail_empty_gl(struct gfs2_glock *gl) +{ +	struct gfs2_sbd *sdp = gl->gl_sbd;  	struct gfs2_trans tr;  	memset(&tr, 0, sizeof(tr)); +	INIT_LIST_HEAD(&tr.tr_buf); +	INIT_LIST_HEAD(&tr.tr_databuf);  	tr.tr_revokes = atomic_read(&gl->gl_ail_count);  	if (!tr.tr_revokes)  		return; -	/* A shortened, inline version of gfs2_trans_begin() */ +	/* A shortened, inline version of gfs2_trans_begin() +         * tr->alloced is not set since the transaction structure is +         * on the stack */  	tr.tr_reserved = 1 + gfs2_struct2blk(sdp, tr.tr_revokes, sizeof(u64));  	tr.tr_ip = (unsigned long)__builtin_return_address(0); -	INIT_LIST_HEAD(&tr.tr_list_buf); -	gfs2_log_reserve(sdp, tr.tr_reserved); -	BUG_ON(current->journal_info); +	sb_start_intwrite(sdp->sd_vfs); +	if (gfs2_log_reserve(sdp, tr.tr_reserved) < 0) { +		sb_end_intwrite(sdp->sd_vfs); +		return; +	} +	WARN_ON_ONCE(current->journal_info);  	current->journal_info = &tr; -	gfs2_log_lock(sdp); -	while (!list_empty(head)) { -		bd = list_entry(head->next, struct gfs2_bufdata, -				bd_ail_gl_list); -		bh = bd->bd_bh; -		gfs2_remove_from_ail(bd); -		bd->bd_bh = NULL; -		bh->b_private = NULL; -		bd->bd_blkno = bh->b_blocknr; -		gfs2_assert_withdraw(sdp, !buffer_busy(bh)); -		gfs2_trans_add_revoke(sdp, bd); -	} -	gfs2_assert_withdraw(sdp, !atomic_read(&gl->gl_ail_count)); -	gfs2_log_unlock(sdp); +	__gfs2_ail_flush(gl, 0, tr.tr_revokes); + +	gfs2_trans_end(sdp); +	gfs2_log_flush(sdp, NULL, NORMAL_FLUSH); +} + +void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync) +{ +	struct gfs2_sbd *sdp = gl->gl_sbd; +	unsigned int revokes = atomic_read(&gl->gl_ail_count); +	unsigned int max_revokes = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / sizeof(u64); +	int ret; + +	if (!revokes) +		return; +	while (revokes > max_revokes) +		max_revokes += (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header)) / sizeof(u64); + +	ret = gfs2_trans_begin(sdp, 0, max_revokes); +	if (ret) +		return; +	__gfs2_ail_flush(gl, fsync, max_revokes);  	gfs2_trans_end(sdp); -	gfs2_log_flush(sdp, NULL); +	gfs2_log_flush(sdp, NULL, NORMAL_FLUSH);  }  /** @@ -86,18 +140,26 @@ static void gfs2_ail_empty_gl(struct gfs2_glock *gl)  static void rgrp_go_sync(struct gfs2_glock *gl)  { -	struct address_space *metamapping = gfs2_glock2aspace(gl); +	struct gfs2_sbd *sdp = gl->gl_sbd; +	struct address_space *mapping = &sdp->sd_aspace; +	struct gfs2_rgrpd *rgd;  	int error;  	if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))  		return; -	BUG_ON(gl->gl_state != LM_ST_EXCLUSIVE); +	GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE); -	gfs2_log_flush(gl->gl_sbd, gl); -	filemap_fdatawrite(metamapping); -	error = filemap_fdatawait(metamapping); -        mapping_set_error(metamapping, error); +	gfs2_log_flush(sdp, gl, NORMAL_FLUSH); +	filemap_fdatawrite_range(mapping, gl->gl_vm.start, gl->gl_vm.end); +	error = filemap_fdatawait_range(mapping, gl->gl_vm.start, gl->gl_vm.end); +	mapping_set_error(mapping, error);  	gfs2_ail_empty_gl(gl); + +	spin_lock(&gl->gl_spin); +	rgd = gl->gl_object; +	if (rgd) +		gfs2_free_clones(rgd); +	spin_unlock(&gl->gl_spin);  }  /** @@ -112,11 +174,12 @@ static void rgrp_go_sync(struct gfs2_glock *gl)  static void rgrp_go_inval(struct gfs2_glock *gl, int flags)  { -	struct address_space *mapping = gfs2_glock2aspace(gl); +	struct gfs2_sbd *sdp = gl->gl_sbd; +	struct address_space *mapping = &sdp->sd_aspace; -	BUG_ON(!(flags & DIO_METADATA)); -	gfs2_assert_withdraw(gl->gl_sbd, !atomic_read(&gl->gl_ail_count)); -	truncate_inode_pages(mapping, 0); +	WARN_ON_ONCE(!(flags & DIO_METADATA)); +	gfs2_assert_withdraw(sdp, !atomic_read(&gl->gl_ail_count)); +	truncate_inode_pages_range(mapping, gl->gl_vm.start, gl->gl_vm.end);  	if (gl->gl_object) {  		struct gfs2_rgrpd *rgd = (struct gfs2_rgrpd *)gl->gl_object; @@ -138,14 +201,17 @@ static void inode_go_sync(struct gfs2_glock *gl)  	if (ip && !S_ISREG(ip->i_inode.i_mode))  		ip = NULL; -	if (ip && test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags)) -		unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0); +	if (ip) { +		if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags)) +			unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0); +		inode_dio_wait(&ip->i_inode); +	}  	if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))  		return; -	BUG_ON(gl->gl_state != LM_ST_EXCLUSIVE); +	GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE); -	gfs2_log_flush(gl->gl_sbd, gl); +	gfs2_log_flush(gl->gl_sbd, gl, NORMAL_FLUSH);  	filemap_fdatawrite(metamapping);  	if (ip) {  		struct address_space *mapping = ip->i_inode.i_mapping; @@ -160,7 +226,7 @@ static void inode_go_sync(struct gfs2_glock *gl)  	 * Writeback of the data mapping may cause the dirty flag to be set  	 * so we have to clear it again here.  	 */ -	smp_mb__before_clear_bit(); +	smp_mb__before_atomic();  	clear_bit(GLF_DIRTY, &gl->gl_flags);  } @@ -168,8 +234,8 @@ static void inode_go_sync(struct gfs2_glock *gl)   * inode_go_inval - prepare a inode glock to be released   * @gl: the glock   * @flags: - *  - * Normally we invlidate everything, but if we are moving into + * + * Normally we invalidate everything, but if we are moving into   * LM_ST_DEFERRED from LM_ST_SHARED or LM_ST_EXCLUSIVE then we   * can keep hold of the metadata, since it won't have changed.   * @@ -187,11 +253,14 @@ static void inode_go_inval(struct gfs2_glock *gl, int flags)  		if (ip) {  			set_bit(GIF_INVALID, &ip->i_flags);  			forget_all_cached_acls(&ip->i_inode); +			gfs2_dir_hash_inval(ip);  		}  	} -	if (ip == GFS2_I(gl->gl_sbd->sd_rindex)) +	if (ip == GFS2_I(gl->gl_sbd->sd_rindex)) { +		gfs2_log_flush(gl->gl_sbd, NULL, NORMAL_FLUSH);  		gl->gl_sbd->sd_rindex_uptodate = 0; +	}  	if (ip && S_ISREG(ip->i_inode.i_mode))  		truncate_inode_pages(ip->i_inode.i_mapping, 0);  } @@ -206,12 +275,130 @@ static void inode_go_inval(struct gfs2_glock *gl, int flags)  static int inode_go_demote_ok(const struct gfs2_glock *gl)  {  	struct gfs2_sbd *sdp = gl->gl_sbd; +	struct gfs2_holder *gh; +  	if (sdp->sd_jindex == gl->gl_object || sdp->sd_rindex == gl->gl_object)  		return 0; + +	if (!list_empty(&gl->gl_holders)) { +		gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list); +		if (gh->gh_list.next != &gl->gl_holders) +			return 0; +	} +  	return 1;  }  /** + * gfs2_set_nlink - Set the inode's link count based on on-disk info + * @inode: The inode in question + * @nlink: The link count + * + * If the link count has hit zero, it must never be raised, whatever the + * on-disk inode might say. When new struct inodes are created the link + * count is set to 1, so that we can safely use this test even when reading + * in on disk information for the first time. + */ + +static void gfs2_set_nlink(struct inode *inode, u32 nlink) +{ +	/* +	 * We will need to review setting the nlink count here in the +	 * light of the forthcoming ro bind mount work. This is a reminder +	 * to do that. +	 */ +	if ((inode->i_nlink != nlink) && (inode->i_nlink != 0)) { +		if (nlink == 0) +			clear_nlink(inode); +		else +			set_nlink(inode, nlink); +	} +} + +static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf) +{ +	const struct gfs2_dinode *str = buf; +	struct timespec atime; +	u16 height, depth; + +	if (unlikely(ip->i_no_addr != be64_to_cpu(str->di_num.no_addr))) +		goto corrupt; +	ip->i_no_formal_ino = be64_to_cpu(str->di_num.no_formal_ino); +	ip->i_inode.i_mode = be32_to_cpu(str->di_mode); +	ip->i_inode.i_rdev = 0; +	switch (ip->i_inode.i_mode & S_IFMT) { +	case S_IFBLK: +	case S_IFCHR: +		ip->i_inode.i_rdev = MKDEV(be32_to_cpu(str->di_major), +					   be32_to_cpu(str->di_minor)); +		break; +	}; + +	i_uid_write(&ip->i_inode, be32_to_cpu(str->di_uid)); +	i_gid_write(&ip->i_inode, be32_to_cpu(str->di_gid)); +	gfs2_set_nlink(&ip->i_inode, be32_to_cpu(str->di_nlink)); +	i_size_write(&ip->i_inode, be64_to_cpu(str->di_size)); +	gfs2_set_inode_blocks(&ip->i_inode, be64_to_cpu(str->di_blocks)); +	atime.tv_sec = be64_to_cpu(str->di_atime); +	atime.tv_nsec = be32_to_cpu(str->di_atime_nsec); +	if (timespec_compare(&ip->i_inode.i_atime, &atime) < 0) +		ip->i_inode.i_atime = atime; +	ip->i_inode.i_mtime.tv_sec = be64_to_cpu(str->di_mtime); +	ip->i_inode.i_mtime.tv_nsec = be32_to_cpu(str->di_mtime_nsec); +	ip->i_inode.i_ctime.tv_sec = be64_to_cpu(str->di_ctime); +	ip->i_inode.i_ctime.tv_nsec = be32_to_cpu(str->di_ctime_nsec); + +	ip->i_goal = be64_to_cpu(str->di_goal_meta); +	ip->i_generation = be64_to_cpu(str->di_generation); + +	ip->i_diskflags = be32_to_cpu(str->di_flags); +	ip->i_eattr = be64_to_cpu(str->di_eattr); +	/* i_diskflags and i_eattr must be set before gfs2_set_inode_flags() */ +	gfs2_set_inode_flags(&ip->i_inode); +	height = be16_to_cpu(str->di_height); +	if (unlikely(height > GFS2_MAX_META_HEIGHT)) +		goto corrupt; +	ip->i_height = (u8)height; + +	depth = be16_to_cpu(str->di_depth); +	if (unlikely(depth > GFS2_DIR_MAX_DEPTH)) +		goto corrupt; +	ip->i_depth = (u8)depth; +	ip->i_entries = be32_to_cpu(str->di_entries); + +	if (S_ISREG(ip->i_inode.i_mode)) +		gfs2_set_aops(&ip->i_inode); + +	return 0; +corrupt: +	gfs2_consist_inode(ip); +	return -EIO; +} + +/** + * gfs2_inode_refresh - Refresh the incore copy of the dinode + * @ip: The GFS2 inode + * + * Returns: errno + */ + +int gfs2_inode_refresh(struct gfs2_inode *ip) +{ +	struct buffer_head *dibh; +	int error; + +	error = gfs2_meta_inode_buffer(ip, &dibh); +	if (error) +		return error; + +	error = gfs2_dinode_in(ip, dibh->b_data); +	brelse(dibh); +	clear_bit(GIF_INVALID, &ip->i_flags); + +	return error; +} + +/**   * inode_go_lock - operation done after an inode lock is locked by a process   * @gl: the glock   * @flags: @@ -235,6 +422,9 @@ static int inode_go_lock(struct gfs2_holder *gh)  			return error;  	} +	if (gh->gh_state != LM_ST_DEFERRED) +		inode_dio_wait(&ip->i_inode); +  	if ((ip->i_diskflags & GFS2_DIF_TRUNC_IN_PROG) &&  	    (gl->gl_state == LM_ST_EXCLUSIVE) &&  	    (gh->gh_state == LM_ST_EXCLUSIVE)) { @@ -254,90 +444,55 @@ static int inode_go_lock(struct gfs2_holder *gh)   * @seq: The iterator   * @ip: the inode   * - * Returns: 0 on success, -ENOBUFS when we run out of space   */ -static int inode_go_dump(struct seq_file *seq, const struct gfs2_glock *gl) +static void inode_go_dump(struct seq_file *seq, const struct gfs2_glock *gl)  {  	const struct gfs2_inode *ip = gl->gl_object;  	if (ip == NULL) -		return 0; +		return;  	gfs2_print_dbg(seq, " I: n:%llu/%llu t:%u f:0x%02lx d:0x%08x s:%llu\n",  		  (unsigned long long)ip->i_no_formal_ino,  		  (unsigned long long)ip->i_no_addr,  		  IF2DT(ip->i_inode.i_mode), ip->i_flags,  		  (unsigned int)ip->i_diskflags,  		  (unsigned long long)i_size_read(&ip->i_inode)); -	return 0;  }  /** - * rgrp_go_demote_ok - Check to see if it's ok to unlock a RG's glock - * @gl: the glock - * - * Returns: 1 if it's ok - */ - -static int rgrp_go_demote_ok(const struct gfs2_glock *gl) -{ -	const struct address_space *mapping = (const struct address_space *)(gl + 1); -	return !mapping->nrpages; -} - -/** - * rgrp_go_lock - operation done after an rgrp lock is locked by - *    a first holder on this node. - * @gl: the glock - * @flags: - * - * Returns: errno - */ - -static int rgrp_go_lock(struct gfs2_holder *gh) -{ -	return gfs2_rgrp_bh_get(gh->gh_gl->gl_object); -} - -/** - * rgrp_go_unlock - operation done before an rgrp lock is unlocked by - *    a last holder on this node. - * @gl: the glock - * @flags: - * - */ - -static void rgrp_go_unlock(struct gfs2_holder *gh) -{ -	gfs2_rgrp_bh_put(gh->gh_gl->gl_object); -} - -/** - * trans_go_sync - promote/demote the transaction glock + * freeze_go_sync - promote/demote the freeze glock   * @gl: the glock   * @state: the requested state   * @flags:   *   */ -static void trans_go_sync(struct gfs2_glock *gl) +static void freeze_go_sync(struct gfs2_glock *gl)  {  	struct gfs2_sbd *sdp = gl->gl_sbd; +	DEFINE_WAIT(wait); -	if (gl->gl_state != LM_ST_UNLOCKED && +	if (gl->gl_state == LM_ST_SHARED &&  	    test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) { -		flush_workqueue(gfs2_delete_workqueue); -		gfs2_meta_syncfs(sdp); -		gfs2_log_shutdown(sdp); +		atomic_set(&sdp->sd_log_freeze, 1); +		wake_up(&sdp->sd_logd_waitq); +		do { +			prepare_to_wait(&sdp->sd_log_frozen_wait, &wait, +					TASK_UNINTERRUPTIBLE); +			if (atomic_read(&sdp->sd_log_freeze)) +				io_schedule(); +		} while(atomic_read(&sdp->sd_log_freeze)); +		finish_wait(&sdp->sd_log_frozen_wait, &wait);  	}  }  /** - * trans_go_xmote_bh - After promoting/demoting the transaction glock + * freeze_go_xmote_bh - After promoting/demoting the freeze glock   * @gl: the glock   *   */ -static int trans_go_xmote_bh(struct gfs2_glock *gl, struct gfs2_holder *gh) +static int freeze_go_xmote_bh(struct gfs2_glock *gl, struct gfs2_holder *gh)  {  	struct gfs2_sbd *sdp = gl->gl_sbd;  	struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode); @@ -370,7 +525,7 @@ static int trans_go_xmote_bh(struct gfs2_glock *gl, struct gfs2_holder *gh)   * Always returns 0   */ -static int trans_go_demote_ok(const struct gfs2_glock *gl) +static int freeze_go_demote_ok(const struct gfs2_glock *gl)  {  	return 0;  } @@ -381,15 +536,19 @@ static int trans_go_demote_ok(const struct gfs2_glock *gl)   *   * gl_spin lock is held while calling this   */ -static void iopen_go_callback(struct gfs2_glock *gl) +static void iopen_go_callback(struct gfs2_glock *gl, bool remote)  {  	struct gfs2_inode *ip = (struct gfs2_inode *)gl->gl_object; +	struct gfs2_sbd *sdp = gl->gl_sbd; + +	if (!remote || (sdp->sd_vfs->s_flags & MS_RDONLY)) +		return;  	if (gl->gl_demote_state == LM_ST_UNLOCKED &&  	    gl->gl_state == LM_ST_SHARED && ip) { -		gfs2_glock_hold(gl); +		gl->gl_lockref.count++;  		if (queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0) -			gfs2_glock_put_nolock(gl); +			gl->gl_lockref.count--;  	}  } @@ -398,32 +557,29 @@ const struct gfs2_glock_operations gfs2_meta_glops = {  };  const struct gfs2_glock_operations gfs2_inode_glops = { -	.go_xmote_th = inode_go_sync, +	.go_sync = inode_go_sync,  	.go_inval = inode_go_inval,  	.go_demote_ok = inode_go_demote_ok,  	.go_lock = inode_go_lock,  	.go_dump = inode_go_dump,  	.go_type = LM_TYPE_INODE, -	.go_min_hold_time = HZ / 5,  	.go_flags = GLOF_ASPACE,  };  const struct gfs2_glock_operations gfs2_rgrp_glops = { -	.go_xmote_th = rgrp_go_sync, +	.go_sync = rgrp_go_sync,  	.go_inval = rgrp_go_inval, -	.go_demote_ok = rgrp_go_demote_ok, -	.go_lock = rgrp_go_lock, -	.go_unlock = rgrp_go_unlock, +	.go_lock = gfs2_rgrp_go_lock, +	.go_unlock = gfs2_rgrp_go_unlock,  	.go_dump = gfs2_rgrp_dump,  	.go_type = LM_TYPE_RGRP, -	.go_min_hold_time = HZ / 5, -	.go_flags = GLOF_ASPACE, +	.go_flags = GLOF_LVB,  }; -const struct gfs2_glock_operations gfs2_trans_glops = { -	.go_xmote_th = trans_go_sync, -	.go_xmote_bh = trans_go_xmote_bh, -	.go_demote_ok = trans_go_demote_ok, +const struct gfs2_glock_operations gfs2_freeze_glops = { +	.go_sync = freeze_go_sync, +	.go_xmote_bh = freeze_go_xmote_bh, +	.go_demote_ok = freeze_go_demote_ok,  	.go_type = LM_TYPE_NONDISK,  }; @@ -442,6 +598,7 @@ const struct gfs2_glock_operations gfs2_nondisk_glops = {  const struct gfs2_glock_operations gfs2_quota_glops = {  	.go_type = LM_TYPE_QUOTA, +	.go_flags = GLOF_LVB,  };  const struct gfs2_glock_operations gfs2_journal_glops = {  | 
