diff options
Diffstat (limited to 'block/blk-lib.c')
| -rw-r--r-- | block/blk-lib.c | 34 | 
1 files changed, 20 insertions, 14 deletions
diff --git a/block/blk-lib.c b/block/blk-lib.c index d6f50d57256..8411be3c19d 100644 --- a/block/blk-lib.c +++ b/block/blk-lib.c @@ -43,8 +43,8 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,  	DECLARE_COMPLETION_ONSTACK(wait);  	struct request_queue *q = bdev_get_queue(bdev);  	int type = REQ_WRITE | REQ_DISCARD; -	sector_t max_discard_sectors; -	sector_t granularity, alignment; +	unsigned int max_discard_sectors, granularity; +	int alignment;  	struct bio_batch bb;  	struct bio *bio;  	int ret = 0; @@ -58,16 +58,14 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,  	/* Zero-sector (unknown) and one-sector granularities are the same.  */  	granularity = max(q->limits.discard_granularity >> 9, 1U); -	alignment = bdev_discard_alignment(bdev) >> 9; -	alignment = sector_div(alignment, granularity); +	alignment = (bdev_discard_alignment(bdev) >> 9) % granularity;  	/*  	 * Ensure that max_discard_sectors is of the proper  	 * granularity, so that requests stay aligned after a split.  	 */  	max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9); -	sector_div(max_discard_sectors, granularity); -	max_discard_sectors *= granularity; +	max_discard_sectors -= max_discard_sectors % granularity;  	if (unlikely(!max_discard_sectors)) {  		/* Avoid infinite loop below. Being cautious never hurts. */  		return -EOPNOTSUPP; @@ -110,17 +108,25 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,  			req_sects = end_sect - sector;  		} -		bio->bi_sector = sector; +		bio->bi_iter.bi_sector = sector;  		bio->bi_end_io = bio_batch_end_io;  		bio->bi_bdev = bdev;  		bio->bi_private = &bb; -		bio->bi_size = req_sects << 9; +		bio->bi_iter.bi_size = req_sects << 9;  		nr_sects -= req_sects;  		sector = end_sect;  		atomic_inc(&bb.done);  		submit_bio(type, bio); + +		/* +		 * We can loop for a long time in here, if someone does +		 * full device discards (like mkfs). Be nice and allow +		 * us to schedule out to avoid softlocking if preempt +		 * is disabled. +		 */ +		cond_resched();  	}  	blk_finish_plug(&plug); @@ -176,7 +182,7 @@ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,  			break;  		} -		bio->bi_sector = sector; +		bio->bi_iter.bi_sector = sector;  		bio->bi_end_io = bio_batch_end_io;  		bio->bi_bdev = bdev;  		bio->bi_private = &bb; @@ -186,11 +192,11 @@ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,  		bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev);  		if (nr_sects > max_write_same_sectors) { -			bio->bi_size = max_write_same_sectors << 9; +			bio->bi_iter.bi_size = max_write_same_sectors << 9;  			nr_sects -= max_write_same_sectors;  			sector += max_write_same_sectors;  		} else { -			bio->bi_size = nr_sects << 9; +			bio->bi_iter.bi_size = nr_sects << 9;  			nr_sects = 0;  		} @@ -220,8 +226,8 @@ EXPORT_SYMBOL(blkdev_issue_write_same);   *  Generate and issue number of bios with zerofiled pages.   */ -int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, -			sector_t nr_sects, gfp_t gfp_mask) +static int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, +				  sector_t nr_sects, gfp_t gfp_mask)  {  	int ret;  	struct bio *bio; @@ -242,7 +248,7 @@ int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,  			break;  		} -		bio->bi_sector = sector; +		bio->bi_iter.bi_sector = sector;  		bio->bi_bdev   = bdev;  		bio->bi_end_io = bio_batch_end_io;  		bio->bi_private = &bb;  | 
