diff options
Diffstat (limited to 'fs/xfs/xfs_buf.c')
| -rw-r--r-- | fs/xfs/xfs_buf.c | 132 | 
1 files changed, 66 insertions, 66 deletions
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c index 263470075ea..7a34a1ae655 100644 --- a/fs/xfs/xfs_buf.c +++ b/fs/xfs/xfs_buf.c @@ -34,12 +34,13 @@  #include <linux/backing-dev.h>  #include <linux/freezer.h> -#include "xfs_sb.h" +#include "xfs_log_format.h"  #include "xfs_trans_resv.h" -#include "xfs_log.h" +#include "xfs_sb.h"  #include "xfs_ag.h"  #include "xfs_mount.h"  #include "xfs_trace.h" +#include "xfs_log.h"  static kmem_zone_t *xfs_buf_zone; @@ -215,8 +216,7 @@ _xfs_buf_alloc(  STATIC int  _xfs_buf_get_pages(  	xfs_buf_t		*bp, -	int			page_count, -	xfs_buf_flags_t		flags) +	int			page_count)  {  	/* Make sure that we have a page list */  	if (bp->b_pages == NULL) { @@ -329,7 +329,7 @@ use_alloc_page:  	end = (BBTOB(bp->b_maps[0].bm_bn + bp->b_length) + PAGE_SIZE - 1)  								>> PAGE_SHIFT;  	page_count = end - start; -	error = _xfs_buf_get_pages(bp, page_count, flags); +	error = _xfs_buf_get_pages(bp, page_count);  	if (unlikely(error))  		return error; @@ -395,7 +395,17 @@ _xfs_buf_map_pages(  		bp->b_addr = NULL;  	} else {  		int retried = 0; +		unsigned noio_flag; +		/* +		 * vm_map_ram() will allocate auxillary structures (e.g. +		 * pagetables) with GFP_KERNEL, yet we are likely to be under +		 * GFP_NOFS context here. Hence we need to tell memory reclaim +		 * that we are in such a context via PF_MEMALLOC_NOIO to prevent +		 * memory reclaim re-entering the filesystem here and +		 * potentially deadlocking. +		 */ +		noio_flag = memalloc_noio_save();  		do {  			bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count,  						-1, PAGE_KERNEL); @@ -403,6 +413,7 @@ _xfs_buf_map_pages(  				break;  			vm_unmap_aliases();  		} while (retried++ <= 1); +		memalloc_noio_restore(noio_flag);  		if (!bp->b_addr)  			return -ENOMEM; @@ -444,8 +455,8 @@ _xfs_buf_find(  	numbytes = BBTOB(numblks);  	/* Check for IOs smaller than the sector size / not sector aligned */ -	ASSERT(!(numbytes < (1 << btp->bt_sshift))); -	ASSERT(!(BBTOB(blkno) & (xfs_off_t)btp->bt_smask)); +	ASSERT(!(numbytes < btp->bt_meta_sectorsize)); +	ASSERT(!(BBTOB(blkno) & (xfs_off_t)btp->bt_meta_sectormask));  	/*  	 * Corrupted block numbers can get through to here, unfortunately, so we @@ -590,7 +601,7 @@ found:  		error = _xfs_buf_map_pages(bp, flags);  		if (unlikely(error)) {  			xfs_warn(target->bt_mount, -				"%s: failed to map pages\n", __func__); +				"%s: failed to map pagesn", __func__);  			xfs_buf_relse(bp);  			return NULL;  		} @@ -697,7 +708,11 @@ xfs_buf_read_uncached(  	bp->b_flags |= XBF_READ;  	bp->b_ops = ops; -	xfsbdstrat(target->bt_mount, bp); +	if (XFS_FORCED_SHUTDOWN(target->bt_mount)) { +		xfs_buf_relse(bp); +		return NULL; +	} +	xfs_buf_iorequest(bp);  	xfs_buf_iowait(bp);  	return bp;  } @@ -762,7 +777,7 @@ xfs_buf_associate_memory(  	bp->b_pages = NULL;  	bp->b_addr = mem; -	rval = _xfs_buf_get_pages(bp, page_count, 0); +	rval = _xfs_buf_get_pages(bp, page_count);  	if (rval)  		return rval; @@ -795,7 +810,7 @@ xfs_buf_get_uncached(  		goto fail;  	page_count = PAGE_ALIGN(numblks << BBSHIFT) >> PAGE_SHIFT; -	error = _xfs_buf_get_pages(bp, page_count, 0); +	error = _xfs_buf_get_pages(bp, page_count);  	if (error)  		goto fail_free_buf; @@ -809,7 +824,7 @@ xfs_buf_get_uncached(  	error = _xfs_buf_map_pages(bp, 0);  	if (unlikely(error)) {  		xfs_warn(target->bt_mount, -			"%s: failed to map pages\n", __func__); +			"%s: failed to map pages", __func__);  		goto fail_free_mem;  	} @@ -1088,7 +1103,7 @@ xfs_bioerror(   * This is meant for userdata errors; metadata bufs come with   * iodone functions attached, so that we can track down errors.   */ -STATIC int +int  xfs_bioerror_relse(  	struct xfs_buf	*bp)  { @@ -1151,7 +1166,7 @@ xfs_bwrite(  	ASSERT(xfs_buf_islocked(bp));  	bp->b_flags |= XBF_WRITE; -	bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q); +	bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q | XBF_WRITE_FAIL);  	xfs_bdstrat_cb(bp); @@ -1163,25 +1178,6 @@ xfs_bwrite(  	return error;  } -/* - * Wrapper around bdstrat so that we can stop data from going to disk in case - * we are shutting down the filesystem.  Typically user data goes thru this - * path; one of the exceptions is the superblock. - */ -void -xfsbdstrat( -	struct xfs_mount	*mp, -	struct xfs_buf		*bp) -{ -	if (XFS_FORCED_SHUTDOWN(mp)) { -		trace_xfs_bdstrat_shut(bp, _RET_IP_); -		xfs_bioerror_relse(bp); -		return; -	} - -	xfs_buf_iorequest(bp); -} -  STATIC void  _xfs_buf_ioend(  	xfs_buf_t		*bp, @@ -1254,7 +1250,7 @@ next_chunk:  	bio = bio_alloc(GFP_NOIO, nr_pages);  	bio->bi_bdev = bp->b_target->bt_bdev; -	bio->bi_sector = sector; +	bio->bi_iter.bi_sector = sector;  	bio->bi_end_io = xfs_buf_bio_end_io;  	bio->bi_private = bp; @@ -1276,7 +1272,7 @@ next_chunk:  		total_nr_pages--;  	} -	if (likely(bio->bi_size)) { +	if (likely(bio->bi_iter.bi_size)) {  		if (xfs_buf_is_vmapped(bp)) {  			flush_kernel_vmap_range(bp->b_addr,  						xfs_buf_vmap_len(bp)); @@ -1375,21 +1371,29 @@ xfs_buf_iorequest(  		xfs_buf_wait_unpin(bp);  	xfs_buf_hold(bp); -	/* Set the count to 1 initially, this will stop an I/O +	/* +	 * Set the count to 1 initially, this will stop an I/O  	 * completion callout which happens before we have started  	 * all the I/O from calling xfs_buf_ioend too early.  	 */  	atomic_set(&bp->b_io_remaining, 1);  	_xfs_buf_ioapply(bp); -	_xfs_buf_ioend(bp, 1); +	/* +	 * If _xfs_buf_ioapply failed, we'll get back here with +	 * only the reference we took above.  _xfs_buf_ioend will +	 * drop it to zero, so we'd better not queue it for later, +	 * or we'll free it before it's done. +	 */ +	_xfs_buf_ioend(bp, bp->b_error ? 0 : 1);  	xfs_buf_rele(bp);  }  /*   * Waits for I/O to complete on the buffer supplied.  It returns immediately if - * no I/O is pending or there is already a pending error on the buffer.  It - * returns the I/O error code, if any, or 0 if there was no error. + * no I/O is pending or there is already a pending error on the buffer, in which + * case nothing will ever complete.  It returns the I/O error code, if any, or + * 0 if there was no error.   */  int  xfs_buf_iowait( @@ -1515,6 +1519,12 @@ xfs_wait_buftarg(  			struct xfs_buf *bp;  			bp = list_first_entry(&dispose, struct xfs_buf, b_lru);  			list_del_init(&bp->b_lru); +			if (bp->b_flags & XBF_WRITE_FAIL) { +				xfs_alert(btp->bt_mount, +"Corruption Alert: Buffer at block 0x%llx had permanent write failures!\n" +"Please run xfs_repair to determine the extent of the problem.", +					(long long)bp->b_bn); +			}  			xfs_buf_rele(bp);  		}  		if (loop++ != 0) @@ -1601,16 +1611,14 @@ xfs_free_buftarg(  	kmem_free(btp);  } -STATIC int -xfs_setsize_buftarg_flags( +int +xfs_setsize_buftarg(  	xfs_buftarg_t		*btp, -	unsigned int		blocksize, -	unsigned int		sectorsize, -	int			verbose) +	unsigned int		sectorsize)  { -	btp->bt_bsize = blocksize; -	btp->bt_sshift = ffs(sectorsize) - 1; -	btp->bt_smask = sectorsize - 1; +	/* Set up metadata sector size info */ +	btp->bt_meta_sectorsize = sectorsize; +	btp->bt_meta_sectormask = sectorsize - 1;  	if (set_blocksize(btp->bt_bdev, sectorsize)) {  		char name[BDEVNAME_SIZE]; @@ -1618,43 +1626,35 @@ xfs_setsize_buftarg_flags(  		bdevname(btp->bt_bdev, name);  		xfs_warn(btp->bt_mount, -			"Cannot set_blocksize to %u on device %s\n", +			"Cannot set_blocksize to %u on device %s",  			sectorsize, name);  		return EINVAL;  	} +	/* Set up device logical sector size mask */ +	btp->bt_logical_sectorsize = bdev_logical_block_size(btp->bt_bdev); +	btp->bt_logical_sectormask = bdev_logical_block_size(btp->bt_bdev) - 1; +  	return 0;  }  /* - *	When allocating the initial buffer target we have not yet - *	read in the superblock, so don't know what sized sectors - *	are being used at this early stage.  Play safe. + * When allocating the initial buffer target we have not yet + * read in the superblock, so don't know what sized sectors + * are being used at this early stage.  Play safe.   */  STATIC int  xfs_setsize_buftarg_early(  	xfs_buftarg_t		*btp,  	struct block_device	*bdev)  { -	return xfs_setsize_buftarg_flags(btp, -			PAGE_SIZE, bdev_logical_block_size(bdev), 0); -} - -int -xfs_setsize_buftarg( -	xfs_buftarg_t		*btp, -	unsigned int		blocksize, -	unsigned int		sectorsize) -{ -	return xfs_setsize_buftarg_flags(btp, blocksize, sectorsize, 1); +	return xfs_setsize_buftarg(btp, bdev_logical_block_size(bdev));  }  xfs_buftarg_t *  xfs_alloc_buftarg(  	struct xfs_mount	*mp, -	struct block_device	*bdev, -	int			external, -	const char		*fsname) +	struct block_device	*bdev)  {  	xfs_buftarg_t		*btp; @@ -1798,7 +1798,7 @@ __xfs_buf_delwri_submit(  	blk_start_plug(&plug);  	list_for_each_entry_safe(bp, n, io_list, b_list) { -		bp->b_flags &= ~(_XBF_DELWRI_Q | XBF_ASYNC); +		bp->b_flags &= ~(_XBF_DELWRI_Q | XBF_ASYNC | XBF_WRITE_FAIL);  		bp->b_flags |= XBF_WRITE;  		if (!wait) {  | 
