aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--block/elevator.c17
-rw-r--r--block/ll_rw_blk.c270
-rw-r--r--drivers/block/pktcdvd.c7
-rw-r--r--drivers/block/ps3disk.c21
-rw-r--r--drivers/ide/ide-disk.c29
-rw-r--r--drivers/ide/ide-io.c35
-rw-r--r--drivers/md/dm-table.c28
-rw-r--r--drivers/md/dm.c16
-rw-r--r--drivers/md/dm.h1
-rw-r--r--drivers/md/linear.c20
-rw-r--r--drivers/md/md.c1
-rw-r--r--drivers/md/multipath.c30
-rw-r--r--drivers/md/raid0.c21
-rw-r--r--drivers/md/raid1.c31
-rw-r--r--drivers/md/raid10.c31
-rw-r--r--drivers/md/raid5.c31
-rw-r--r--drivers/message/i2o/i2o_block.c24
-rw-r--r--drivers/scsi/sd.c22
-rw-r--r--fs/bio.c23
-rw-r--r--include/linux/bio.h19
-rw-r--r--include/linux/blkdev.h8
-rw-r--r--include/linux/ide.h5
-rw-r--r--mm/bounce.c6
23 files changed, 240 insertions, 456 deletions
diff --git a/block/elevator.c b/block/elevator.c
index b9c518afe1f..952aee04a68 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -712,6 +712,14 @@ struct request *elv_next_request(struct request_queue *q)
int ret;
while ((rq = __elv_next_request(q)) != NULL) {
+ /*
+ * Kill the empty barrier place holder, the driver must
+ * not ever see it.
+ */
+ if (blk_empty_barrier(rq)) {
+ end_queued_request(rq, 1);
+ continue;
+ }
if (!(rq->cmd_flags & REQ_STARTED)) {
/*
* This is the first time the device driver
@@ -751,15 +759,8 @@ struct request *elv_next_request(struct request_queue *q)
rq = NULL;
break;
} else if (ret == BLKPREP_KILL) {
- int nr_bytes = rq->hard_nr_sectors << 9;
-
- if (!nr_bytes)
- nr_bytes = rq->data_len;
-
- blkdev_dequeue_request(rq);
rq->cmd_flags |= REQ_QUIET;
- end_that_request_chunk(rq, 0, nr_bytes);
- end_that_request_last(rq, 0);
+ end_queued_request(rq, 0);
} else {
printk(KERN_ERR "%s: bad return=%d\n", __FUNCTION__,
ret);
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index d875673e76c..4df7d027eb0 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -304,23 +304,6 @@ int blk_queue_ordered(struct request_queue *q, unsigned ordered,
EXPORT_SYMBOL(blk_queue_ordered);
-/**
- * blk_queue_issue_flush_fn - set function for issuing a flush
- * @q: the request queue
- * @iff: the function to be called issuing the flush
- *
- * Description:
- * If a driver supports issuing a flush command, the support is notified
- * to the block layer by defining it through this call.
- *
- **/
-void blk_queue_issue_flush_fn(struct request_queue *q, issue_flush_fn *iff)
-{
- q->issue_flush_fn = iff;
-}
-
-EXPORT_SYMBOL(blk_queue_issue_flush_fn);
-
/*
* Cache flushing for ordered writes handling
*/
@@ -377,10 +360,12 @@ void blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error)
/*
* Okay, sequence complete.
*/
- rq = q->orig_bar_rq;
- uptodate = q->orderr ? q->orderr : 1;
+ uptodate = 1;
+ if (q->orderr)
+ uptodate = q->orderr;
q->ordseq = 0;
+ rq = q->orig_bar_rq;
end_that_request_first(rq, uptodate, rq->hard_nr_sectors);
end_that_request_last(rq, uptodate);
@@ -445,7 +430,8 @@ static inline struct request *start_ordered(struct request_queue *q,
rq_init(q, rq);
if (bio_data_dir(q->orig_bar_rq->bio) == WRITE)
rq->cmd_flags |= REQ_RW;
- rq->cmd_flags |= q->ordered & QUEUE_ORDERED_FUA ? REQ_FUA : 0;
+ if (q->ordered & QUEUE_ORDERED_FUA)
+ rq->cmd_flags |= REQ_FUA;
rq->elevator_private = NULL;
rq->elevator_private2 = NULL;
init_request_from_bio(rq, q->orig_bar_rq->bio);
@@ -455,9 +441,12 @@ static inline struct request *start_ordered(struct request_queue *q,
* Queue ordered sequence. As we stack them at the head, we
* need to queue in reverse order. Note that we rely on that
* no fs request uses ELEVATOR_INSERT_FRONT and thus no fs
- * request gets inbetween ordered sequence.
+ * request gets inbetween ordered sequence. If this request is
+ * an empty barrier, we don't need to do a postflush ever since
+ * there will be no data written between the pre and post flush.
+ * Hence a single flush will suffice.
*/
- if (q->ordered & QUEUE_ORDERED_POSTFLUSH)
+ if ((q->ordered & QUEUE_ORDERED_POSTFLUSH) && !blk_empty_barrier(rq))
queue_flush(q, QUEUE_ORDERED_POSTFLUSH);
else
q->ordseq |= QUEUE_ORDSEQ_POSTFLUSH;
@@ -481,7 +470,7 @@ static inline struct request *start_ordered(struct request_queue *q,
int blk_do_ordered(struct request_queue *q, struct request **rqp)
{
struct request *rq = *rqp;
- int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq);
+ const int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq);
if (!q->ordseq) {
if (!is_barrier)
@@ -2660,6 +2649,14 @@ int blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk,
EXPORT_SYMBOL(blk_execute_rq);
+static void bio_end_empty_barrier(struct bio *bio, int err)
+{
+ if (err)
+ clear_bit(BIO_UPTODATE, &bio->bi_flags);
+
+ complete(bio->bi_private);
+}
+
/**
* blkdev_issue_flush - queue a flush
* @bdev: blockdev to issue flush for
@@ -2672,7 +2669,10 @@ EXPORT_SYMBOL(blk_execute_rq);
*/
int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
{
+ DECLARE_COMPLETION_ONSTACK(wait);
struct request_queue *q;
+ struct bio *bio;
+ int ret;
if (bdev->bd_disk == NULL)
return -ENXIO;
@@ -2680,10 +2680,32 @@ int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
q = bdev_get_queue(bdev);
if (!q)
return -ENXIO;
- if (!q->issue_flush_fn)
- return -EOPNOTSUPP;
- return q->issue_flush_fn(q, bdev->bd_disk, error_sector);
+ bio = bio_alloc(GFP_KERNEL, 0);
+ if (!bio)
+ return -ENOMEM;
+
+ bio->bi_end_io = bio_end_empty_barrier;
+ bio->bi_private = &wait;
+ bio->bi_bdev = bdev;
+ submit_bio(1 << BIO_RW_BARRIER, bio);
+
+ wait_for_completion(&wait);
+
+ /*
+ * The driver must store the error location in ->bi_sector, if
+ * it supports it. For non-stacked drivers, this should be copied
+ * from rq->sector.
+ */
+ if (error_sector)
+ *error_sector = bio->bi_sector;
+
+ ret = 0;
+ if (!bio_flagged(bio, BIO_UPTODATE))
+ ret = -EIO;
+
+ bio_put(bio);
+ return ret;
}
EXPORT_SYMBOL(blkdev_issue_flush);
@@ -3051,7 +3073,7 @@ static inline void blk_partition_remap(struct bio *bio)
{
struct block_device *bdev = bio->bi_bdev;
- if (bdev != bdev->bd_contains) {
+ if (bio_sectors(bio) && bdev != bdev->bd_contains) {
struct hd_struct *p = bdev->bd_part;
const int rw = bio_data_dir(bio);
@@ -3117,6 +3139,35 @@ static inline int should_fail_request(struct bio *bio)
#endif /* CONFIG_FAIL_MAKE_REQUEST */
+/*
+ * Check whether this bio extends beyond the end of the device.
+ */
+static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors)
+{
+ sector_t maxsector;
+
+ if (!nr_sectors)
+ return 0;
+
+ /* Test device or partition size, when known. */
+ maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
+ if (maxsector) {
+ sector_t sector = bio->bi_sector;
+
+ if (maxsector < nr_sectors || maxsector - nr_sectors < sector) {
+ /*
+ * This may well happen - the kernel calls bread()
+ * without checking the size of the device, e.g., when
+ * mounting a device.
+ */
+ handle_bad_sector(bio);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
/**
* generic_make_request: hand a buffer to its device driver for I/O
* @bio: The bio describing the location in memory and on the device.
@@ -3144,27 +3195,14 @@ static inline int should_fail_request(struct bio *bio)
static inline void __generic_make_request(struct bio *bio)
{
struct request_queue *q;
- sector_t maxsector;
sector_t old_sector;
int ret, nr_sectors = bio_sectors(bio);
dev_t old_dev;
might_sleep();
- /* Test device or partition size, when known. */
- maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
- if (maxsector) {
- sector_t sector = bio->bi_sector;
- if (maxsector < nr_sectors || maxsector - nr_sectors < sector) {
- /*
- * This may well happen - the kernel calls bread()
- * without checking the size of the device, e.g., when
- * mounting a device.
- */
- handle_bad_sector(bio);
- goto end_io;
- }
- }
+ if (bio_check_eod(bio, nr_sectors))
+ goto end_io;
/*
* Resolve the mapping until finished. (drivers are
@@ -3191,7 +3229,7 @@ end_io:
break;
}
- if (unlikely(bio_sectors(bio) > q->max_hw_sectors)) {
+ if (unlikely(nr_sectors > q->max_hw_sectors)) {
printk("bio too big device %s (%u > %u)\n",
bdevname(bio->bi_bdev, b),
bio_sectors(bio),
@@ -3212,7 +3250,7 @@ end_io:
blk_partition_remap(bio);
if (old_sector != -1)
- blk_add_trace_remap(q, bio, old_dev, bio->bi_sector,
+ blk_add_trace_remap(q, bio, old_dev, bio->bi_sector,
old_sector);
blk_add_trace_bio(q, bio, BLK_TA_QUEUE);
@@ -3220,21 +3258,8 @@ end_io:
old_sector = bio->bi_sector;
old_dev = bio->bi_bdev->bd_dev;
- maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
- if (maxsector) {
- sector_t sector = bio->bi_sector;
-
- if (maxsector < nr_sectors ||
- maxsector - nr_sectors < sector) {
- /*
- * This may well happen - partitions are not
- * checked to make sure they are within the size
- * of the whole device.
- */
- handle_bad_sector(bio);
- goto end_io;
- }
- }
+ if (bio_check_eod(bio, nr_sectors))
+ goto end_io;
ret = q->make_request_fn(q, bio);
} while (ret);
@@ -3307,23 +3332,32 @@ void submit_bio(int rw, struct bio *bio)
{
int count = bio_sectors(bio);
- BIO_BUG_ON(!bio->bi_size);
- BIO_BUG_ON(!bio->bi_io_vec);
bio->bi_rw |= rw;
- if (rw & WRITE) {
- count_vm_events(PGPGOUT, count);
- } else {
- task_io_account_read(bio->bi_size);
- count_vm_events(PGPGIN, count);
- }
- if (unlikely(block_dump)) {
- char b[BDEVNAME_SIZE];
- printk(KERN_DEBUG "%s(%d): %s block %Lu on %s\n",
- current->comm, current->pid,
- (rw & WRITE) ? "WRITE" : "READ",
- (unsigned long long)bio->bi_sector,
- bdevname(bio->bi_bdev,b));
+ /*
+ * If it's a regular read/write or a barrier with data attached,
+ * go through the normal accounting stuff before submission.
+ */
+ if (!bio_empty_barrier(bio)) {
+
+ BIO_BUG_ON(!bio->bi_size);
+ BIO_BUG_ON(!bio->bi_io_vec);
+
+ if (rw & WRITE) {
+ count_vm_events(PGPGOUT, count);
+ } else {
+ task_io_account_read(bio->bi_size);
+ count_vm_events(PGPGIN, count);
+ }
+
+ if (unlikely(block_dump)) {
+ char b[BDEVNAME_SIZE];
+ printk(KERN_DEBUG "%s(%d): %s block %Lu on %s\n",
+ current->comm, current->pid,
+ (rw & WRITE) ? "WRITE" : "READ",
+ (unsigned long long)bio->bi_sector,
+ bdevname(bio->bi_bdev,b));
+ }
}
generic_make_request(bio);
@@ -3399,6 +3433,14 @@ static int __end_that_request_first(struct request *req, int uptodate,
while ((bio = req->bio) != NULL) {
int nbytes;
+ /*
+ * For an empty barrier request, the low level driver must
+ * store a potential error location in ->sector. We pass
+ * that back up in ->bi_sector.
+ */
+ if (blk_empty_barrier(req))
+ bio->bi_sector = req->sector;
+
if (nr_bytes >= bio->bi_size) {
req->bio = bio->bi_next;
nbytes = bio->bi_size;
@@ -3564,7 +3606,7 @@ static struct notifier_block blk_cpu_notifier __cpuinitdata = {
* Description:
* Ends all I/O on a request. It does not handle partial completions,
* unless the driver actually implements this in its completion callback
- * through requeueing. Theh actual completion happens out-of-order,
+ * through requeueing. The actual completion happens out-of-order,
* through a softirq handler. The user must have registered a completion
* callback through blk_queue_softirq_done().
**/
@@ -3627,15 +3669,83 @@ void end_that_request_last(struct request *req, int uptodate)
EXPORT_SYMBOL(end_that_request_last);
-void end_request(struct request *req, int uptodate)
+static inline void __end_request(struct request *rq, int uptodate,
+ unsigned int nr_bytes, int dequeue)
{
- if (!end_that_request_first(req, uptodate, req->hard_cur_sectors)) {
- add_disk_randomness(req->rq_disk);
- blkdev_dequeue_request(req);
- end_that_request_last(req, uptodate);
+ if (!end_that_request_chunk(rq, uptodate, nr_bytes)) {
+ if (dequeue)
+ blkdev_dequeue_request(rq);
+ add_disk_randomness(rq->rq_disk);
+ end_that_request_last(rq, uptodate);
}
}
+static unsigned int rq_byte_size(struct request *rq)
+{
+ if (blk_fs_request(rq))
+ return rq->hard_nr_sectors << 9;
+
+ return rq->data_len;
+}
+
+/**
+ * end_queued_request - end all I/O on a queued request
+ * @rq: the request being processed
+ * @uptodate: error value or 0/1 uptodate flag
+ *
+ * Description:
+ * Ends all I/O on a request, and removes it from the block layer queues.
+ * Not suitable for normal IO completion, unless the driver still has
+ * the request attached to the block layer.
+ *
+ **/
+void end_queued_request(struct request *rq, int uptodate)
+{
+ __end_request(rq, uptodate, rq_byte_size(rq), 1);
+}
+EXPORT_SYMBOL(end_queued_request);
+
+/**
+ * end_dequeued_request - end all I/O on a dequeued request
+ * @rq: the request being processed
+ * @uptodate: error value or 0/1 uptodate flag
+ *
+ * Description:
+ * Ends all I/O on a request. The request must already have been
+ * dequeued using blkdev_dequeue_request(), as is normally the case
+ * for most drivers.
+ *
+ **/
+void end_dequeued_request(struct request *rq, int uptodate)
+{
+ __end_request(rq, uptodate, rq_byte_size(rq), 0);
+}
+EXPORT_SYMBOL(end_dequeued_request);
+
+
+/**
+ * end_request - end I/O on the current segment of the request
+ * @rq: the request being processed
+ * @uptodate: error value or 0/1 uptodate flag
+ *
+ * Description:
+ * Ends I/O on the current segment of a request. If that is the only
+ * remaining segment, the request is also completed and freed.
+ *
+ * This is a remnant of how older block drivers handled IO completions.
+ * Modern drivers typically end IO on the full request in one go, unless
+ * they have a residual value to account for. For that case this function
+ * isn't really useful, unless the residual just happens to be the
+ * full current segment. In other words, don't use this function in new
+ * code. Either use end_request_completely(), or the
+ * end_that_request_chunk() (along with end_that_request_last()) for
+ * partial completions.
+ *
+ **/
+void end_request(struct request *req, int uptodate)
+{
+ __end_request(req, uptodate, req->hard_cur_sectors << 9, 1);
+}
EXPORT_SYMBOL(end_request);
static void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 540bf367698..a8130a4ad6d 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -1133,16 +1133,21 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
* Schedule reads for missing parts of the packet.
*/
for (f = 0; f < pkt->frames; f++) {
+ struct bio_vec *vec;
+
int p, offset;
if (written[f])
continue;
bio = pkt->r_bios[f];
+ vec = bio->bi_io_vec;
bio_init(bio);
bio->bi_max_vecs = 1;
bio->bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9);
bio->bi_bdev = pd->bdev;
bio->bi_end_io = pkt_end_io_read;
bio->bi_private = pkt;
+ bio->bi_io_vec = vec;
+ bio->bi_destructor = pkt_bio_destructor;
p = (f * CD_FRAMESIZE) / PAGE_SIZE;
offset = (f * CD_FRAMESIZE) % PAGE_SIZE;
@@ -1439,6 +1444,8 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
pkt->w_bio->bi_bdev = pd->bdev;
pkt->w_bio->bi_end_io = pkt_end_io_packet_write;
pkt->w_bio->bi_private = pkt;
+ pkt->w_bio->bi_io_vec = bvec;
+ pkt->w_bio->bi_destructor = pkt_bio_destructor;
for (f = 0; f < pkt->frames; f++)
if (!bio_add_page(pkt->w_bio, bvec[f].bv_page, CD_FRAMESIZE, bvec[f].bv_offset))
BUG();
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c
index 06d0552cf49..e354bfc070e 100644
--- a/drivers/block/ps3disk.c
+++ b/drivers/block/ps3disk.c
@@ -414,26 +414,6 @@ static void ps3disk_prepare_flush(struct request_queue *q, struct request *req)
req->cmd_type = REQ_TYPE_FLUSH;
}
-static int ps3disk_issue_flush(struct request_queue *q, struct gendisk *gendisk,
- sector_t *sector)
-{
- struct ps3_storage_device *dev = q->queuedata;
- struct request *req;
- int res;
-
- dev_dbg(&dev->sbd.core, "%s:%u\n", __func__, __LINE__);
-
- req = blk_get_request(q, WRITE, __GFP_WAIT);
- ps3disk_prepare_flush(q, req);
- res = blk_execute_rq(q, gendisk, req, 0);
- if (res)
- dev_err(&dev->sbd.core, "%s:%u: flush request failed %d\n",
- __func__, __LINE__, res);
- blk_put_request(req);
- return res;
-}
-
-
static unsigned long ps3disk_mask;
static DEFINE_MUTEX(ps3disk_mask_mutex);
@@ -506,7 +486,6 @@ static int __devinit ps3disk_probe(struct ps3_system_bus_device *_dev)
blk_queue_dma_alignment(queue, dev->blk_size-1);
blk_queue_hardsect_size(queue, dev->blk_size);
- blk_queue_issue_flush_fn(queue, ps3disk_issue_flush);
blk_queue_ordered(queue, QUEUE_ORDERED_DRAIN_FLUSH,
ps3disk_prepare_flush);
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index 4754769eda9..92177ca48b4 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -716,32 +716,6 @@ static void idedisk_prepare_flush(struct request_queue *q, struct request *rq)
rq->buffer = rq->cmd;
}
-static int idedisk_issue_flush(struct request_queue *q, struct gendisk *disk,
- sector_t *error_sector)
-{
- ide_drive_t *drive = q->queuedata;
- struct request *rq;
- int ret;
-
- if (!drive->wcache)
- return 0;
-
- rq = blk_get_request(q, WRITE, __GFP_WAIT);
-
- idedisk_prepare_flush(q, rq);
-
- ret = blk_execute_rq(q, disk, rq, 0);
-
- /*
- * if we failed and caller wants error offset, get it
- */
- if (ret && error_sector)
- *error_sector = ide_get_error_location(drive, rq->cmd);
-
- blk_put_request(rq);
- return ret;
-}
-
/*
* This is tightly woven into the driver->do_special can not touch.
* DON'T do it again until a total personality rewrite is committed.
@@ -781,7 +755,6 @@ static void update_ordered(ide_drive_t *drive)
struct hd_driveid *id = drive->id;
unsigned ordered = QUEUE_ORDERED_NONE;
prepare_flush_fn *prep_fn = NULL;
- issue_flush_fn *issue_fn = NULL;
if (drive->wcache) {
unsigned long long capacity;
@@ -805,13 +778,11 @@ static void update_ordered(ide_drive_t *drive)
if (barrier) {
ordered = QUEUE_ORDERED_DRAIN_FLUSH;
prep_fn = idedisk_prepare_flush;
- issue_fn = idedisk_issue_flush;
}
} else
ordered = QUEUE_ORDERED_DRAIN;
blk_queue_ordered(drive->queue, ordered, prep_fn);
- blk_queue_issue_flush_fn(drive->queue, issue_fn);
}
static int write_cache(ide_drive_t *drive, int arg)
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 4cece930114..f36ff5962af 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -322,41 +322,6 @@ static void ide_complete_pm_request (ide_drive_t *drive, struct request *rq)
spin_unlock_irqrestore(&ide_lock, flags);
}
-/*
- * FIXME: probably move this somewhere else, name is bad too :)
- */
-u64 ide_get_error_location(ide_drive_t *drive, char *args)
-{
- u32 high, low;
- u8 hcyl, lcyl, sect;
- u64 sector;
-
- high = 0;
- hcyl = args[5];
- lcyl = args[4];
- sect = args[3];
-
- if (ide_id_has_flush_cache_ext(drive->id)) {
- low = (hcyl << 16) | (lcyl << 8) | sect;
- HWIF(drive)->OUTB(drive->ctl|0x80, IDE_CONTROL_REG);
- high = ide_read_24(drive);
- } else {
- u8 cur = HWIF(drive)->INB(IDE_SELECT_REG);
- if (cur & 0x40) {
- high = cur & 0xf;
- low = (hcyl << 16) | (lcyl << 8) | sect;
- } else {
- low = hcyl * drive->head * drive->sect;
- low += lcyl * drive->sect;
- low += sect - 1;
- }
- }
-
- sector = ((u64) high << 24) | low;
- return sector;
-}
-EXPORT_SYMBOL(ide_get_error_location);
-
/**
* ide_end_drive_cmd - end an explicit drive command
* @drive: command
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 2bcde5798b5..fbe477bb2c6 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -999,33 +999,6 @@ void dm_table_unplug_all(struct dm_table *t)
}
}
-int dm_table_flush_all(struct dm_table *t)
-{
- struct list_head *d, *devices = dm_table_get_devices(t);
- int ret = 0;
- unsigned i;
-
- for (i = 0; i < t->num_targets; i++)
- if (t->targets[i].type->flush)
- t->targets[i].type->flush(&t->targets[i]);
-
- for (d = devices->next; d != devices; d = d->next) {
- struct dm_dev *dd = list_entry(d, struct dm_dev, list);
- struct request_queue *q = bdev_get_queue(dd->bdev);
- int err;
-
- if (!q->issue_flush_fn)
- err = -EOPNOTSUPP;
- else
- err = q->issue_flush_fn(q, dd->bdev->bd_disk, NULL);
-
- if (!ret)
- ret = err;
- }
-
- return ret;
-}
-
struct mapped_device *dm_table_get_md(struct dm_table *t)
{
dm_get(t->md);
@@ -1043,4 +1016,3 @@ EXPORT_SYMBOL(dm_table_get_md);
EXPORT_SYMBOL(dm_table_put);
EXPORT_SYMBOL(dm_table_get);
EXPORT_SYMBOL(dm_table_unplug_all);
-EXPORT_SYMBOL(dm_table_flush_all);
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 167765c4774..d837d37f620 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -840,21 +840,6 @@ static int dm_request(struct request_queue *q, struct bio *bio)
return 0;
}
-static int dm_flush_all(struct request_queue *q, struct gendisk *disk,
- sector_t *error_sector)
-{
- struct mapped_device *md = q->queuedata;
- struct dm_table *map = dm_get_table(md);
- int ret = -ENXIO;
-
- if (map) {
- ret = dm_table_flush_all(map);
- dm_table_put(map);
- }
-
- return ret;
-}
-
static void dm_unplug_all(struct request_queue *q)
{
struct mapped_device *md = q->queuedata;
@@ -1003,7 +988,6 @@ static struct mapped_device *alloc_dev(int minor)
blk_queue_make_request(md->queue, dm_request);
blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
md->queue->unplug_fn = dm_unplug_all;
- md->queue->issue_flush_fn = dm_flush_all;
md->io_pool = mempool_create_slab_pool(MIN_IOS, _io_cache);
if (!md->io_pool)
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index 462ee652a89..4b3faa45277 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -111,7 +111,6 @@ void dm_table_postsuspend_targets(struct dm_table *t);
int dm_table_resume_targets(struct dm_table *t);
int dm_table_any_congested(struct dm_table *t, int bdi_bits);
void dm_table_unplug_all(struct dm_table *t);
-int dm_table_flush_all(struct dm_table *t);
/*-----------------------------------------------------------------
* A registry of target types.
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 550148770bb..56a11f6c127 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -92,25 +92,6 @@ static void linear_unplug(struct request_queue *q)
}
}
-static int linear_issue_flush(struct request_queue *q, struct gendisk *disk,
- sector_t *error_sector)
-{
- mddev_t *mddev = q->queuedata;
- linear_conf_t *conf = mddev_to_conf(mddev);
- int i, ret = 0;
-
- for (i=0; i < mddev->raid_disks && ret == 0; i++) {
- struct block_device *bdev = conf->disks[i].rdev->bdev;
- struct request_queue *r_queue = bdev_get_queue(bdev);
-
- if (!r_queue->issue_flush_fn)
- ret = -EOPNOTSUPP;
- else
- ret = r_queue->issue_flush_fn(r_queue, bdev->bd_disk, error_sector);
- }
- return ret;
-}
-
static int linear_congested(void *data, int bits)
{
mddev_t *mddev = data;
@@ -279,7 +260,6 @@ static int linear_run (mddev_t *mddev)
blk_queue_merge_bvec(mddev->queue, linear_mergeable_bvec);
mddev->queue->unplug_fn = linear_unplug;
- mddev->queue->issue_flush_fn = linear_issue_flush;
mddev->queue->backing_dev_info.congested_fn = linear_congested;
mddev->queue->backing_dev_info.congested_data = mddev;
return 0;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index acf1b81b47c..0dc563d76b3 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -3463,7 +3463,6 @@ static int do_md_stop(mddev_t * mddev, int mode)
mddev->pers->stop(mddev);
mddev->queue->merge_bvec_fn = NULL;
mddev->queue->unplug_fn = NULL;
- mddev->queue->issue_flush_fn = NULL;
mddev->queue->backing_dev_info.congested_fn = NULL;
if (mddev->pers->sync_request)
sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index f2a63f394ad..b35731cceac 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -194,35 +194,6 @@ static void multipath_status (struct seq_file *seq, mddev_t *mddev)
seq_printf (seq, "]");
}
-static int multipath_issue_flush(struct request_queue *q, struct gendisk *disk,
- sector_t *error_sector)
-{
- mddev_t *mddev = q->queuedata;
- multipath_conf_t *conf = mddev_to_conf(mddev);
- int i, ret = 0;
-
- rcu_read_lock();
- for (i=0; i<mddev->raid_disks && ret == 0; i++) {
- mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev);
- if (rdev && !test_bit(Faulty, &rdev->flags)) {
- struct block_device *bdev = rdev->bdev;
- struct request_queue *r_queue = bdev_get_queue(bdev);
-
- if (!r_queue->issue_flush_fn)
- ret = -EOPNOTSUPP;
- else {
- atomic_inc(&rdev->nr_pending);
- rcu_read_unlock();
- ret = r_queue->issue_flush_fn(r_queue, bdev->bd_disk,
- error_sector);
- rdev_dec_pending(rdev, mddev);
- rcu_read_lock();
- }
- }
- }
- rcu_read_unlock();
- return ret;
-}
static int multipath_congested(void *data, int bits)
{
mddev_t *mddev = data;
@@ -527,7 +498,6 @@ static int multipath_run (mddev_t *mddev)
mddev->array_size = mddev->size;
mddev->queue->unplug_fn = multipath_unplug;
- mddev->queue->issue_flush_fn = multipath_issue_flush;
mddev->queue->backing_dev_info.congested_fn = multipath_congested;
mddev->queue->backing_dev_info.congested_data = mddev;
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index ef0da2d8495..e79e1a538d4 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -40,26 +40,6 @@ static void raid0_unplug(struct request_queue *q)
}
}
-static int raid0_issue_flush(struct request_queue *q, struct gendisk *disk,
- sector_t *error_sector)
-{
- mddev_t *mddev = q->queuedata;
- raid0_conf_t *conf = mddev_to_conf(mddev);
- mdk_rdev_t **devlist = conf->strip_zone[0].dev;
- int i, ret = 0;
-
- for (i=0; i<mddev->raid_disks && ret == 0; i++) {
- struct block_device *bdev = devlist[i]->bdev;
- struct request_queue *r_queue = bdev_get_queue(bdev);
-
- if (!r_queue->issue_flush_fn)
- ret = -EOPNOTSUPP;
- else
- ret = r_queue->issue_flush_fn(r_queue, bdev->bd_disk, error_sector);
- }
- return ret;
-}
-
static int raid0_congested(void *data, int bits)
{
mddev_t *mddev = data;
@@ -250,7 +230,6 @@ static int create_strip_zones (mddev_t *mddev)
mddev->queue->unplug_fn = raid0_unplug;
- mddev->queue->issue_flush_fn = raid0_issue_flush;
mddev->queue->backing_dev_info.congested_fn = raid0_congested;
mddev->queue->backing_dev_info.congested_data = mddev;
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 6d03bea6fa5..0bcefad8241 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -567,36 +567,6 @@ static void raid1_unplug(struct request_queue *q)
md_wakeup_thread(mddev->thread);
}
-static int raid1_issue_flush(struct request_queue *q, struct gendisk *disk,
- sector_t *error_sector)
-{
- mddev_t *mddev = q->queuedata;
- conf_t *conf = mddev_to_conf(mddev);
- int i, ret = 0;
-
- rcu_read_lock();
- for (i=0; i<mddev->raid_disks && ret == 0; i++) {
- mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
- if (rdev && !test_bit(Faulty, &rdev->flags)) {
- struct block_device *bdev = rdev->bdev;
- struct request_queue *r_queue = bdev_get_queue(bdev);
-
- if (!r_queue->issue_flush_fn)
- ret = -EOPNOTSUPP;
- else {
- atomic_inc(&rdev->nr_pending);
- rcu_read_unlock();
- ret = r_queue->issue_flush_fn(r_queue, bdev->bd_disk,
- error_sector);
- rdev_dec_pending(rdev, mddev);
- rcu_read_lock();
- }
- }
- }
- rcu_read_unlock();
- return ret;
-}
-
static int raid1_congested(void *data, int bits)
{
mddev_t *mddev = data;
@@ -1997,7 +1967,6 @@ static int run(mddev_t *mddev)
mddev->array_size = mddev->size;
mddev->queue->unplug_fn = raid1_unplug;
- mddev->queue->issue_flush_fn = raid1_issue_flush;
mddev->queue->backing_dev_info.congested_fn = raid1_congested;
mddev->queue->backing_dev_info.congested_data = mddev;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 25a96c42bdb..fc6607acb6e 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -611,36 +611,6 @@ static void raid10_unplug(struct request_queue *q)
md_wakeup_thread(mddev->thread);
}
-static int raid10_issue_flush(struct request_queue *q, struct gendisk *disk,
- sector_t *error_sector)
-{
- mddev_t *mddev = q->queuedata;
- conf_t *conf = mddev_to_conf(mddev);
- int i, ret = 0;
-
- rcu_read_lock();
- for (i=0; i<mddev->raid_disks && ret == 0; i++) {
- mdk_rdev_t