diff options
author | Christoph Hellwig <hch@lst.de> | 2010-08-07 18:20:39 +0200 |
---|---|---|
committer | Jens Axboe <jaxboe@fusionio.com> | 2010-08-07 18:20:39 +0200 |
commit | 7b6d91daee5cac6402186ff224c3af39d79f4a0e (patch) | |
tree | b1518cf0b6c301178e0a320f80610cd5b3aa7625 /drivers/md/dm.c | |
parent | 33659ebbae262228eef4e0fe990f393d1f0ed941 (diff) |
block: unify flags for struct bio and struct request
Remove the current bio flags and reuse the request flags for the bio, too.
This allows to more easily trace the type of I/O from the filesystem
down to the block driver. There were two flags in the bio that were
missing in the requests: BIO_RW_UNPLUG and BIO_RW_AHEAD. Also I've
renamed two request flags that had a superflous RW in them.
Note that the flags are in bio.h despite having the REQ_ name - as
blkdev.h includes bio.h that is the only way to go for now.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'drivers/md/dm.c')
-rw-r--r-- | drivers/md/dm.c | 14 |
1 files changed, 7 insertions, 7 deletions
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 1e0e6dd5150..d6f77baeafd 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -614,7 +614,7 @@ static void dec_pending(struct dm_io *io, int error) */ spin_lock_irqsave(&md->deferred_lock, flags); if (__noflush_suspending(md)) { - if (!bio_rw_flagged(io->bio, BIO_RW_BARRIER)) + if (!(io->bio->bi_rw & REQ_HARDBARRIER)) bio_list_add_head(&md->deferred, io->bio); } else @@ -626,7 +626,7 @@ static void dec_pending(struct dm_io *io, int error) io_error = io->error; bio = io->bio; - if (bio_rw_flagged(bio, BIO_RW_BARRIER)) { + if (bio->bi_rw & REQ_HARDBARRIER) { /* * There can be just one barrier request so we use * a per-device variable for error reporting. @@ -1106,7 +1106,7 @@ static struct bio *split_bvec(struct bio *bio, sector_t sector, clone->bi_sector = sector; clone->bi_bdev = bio->bi_bdev; - clone->bi_rw = bio->bi_rw & ~(1 << BIO_RW_BARRIER); + clone->bi_rw = bio->bi_rw & ~REQ_HARDBARRIER; clone->bi_vcnt = 1; clone->bi_size = to_bytes(len); clone->bi_io_vec->bv_offset = offset; @@ -1133,7 +1133,7 @@ static struct bio *clone_bio(struct bio *bio, sector_t sector, clone = bio_alloc_bioset(GFP_NOIO, bio->bi_max_vecs, bs); __bio_clone(clone, bio); - clone->bi_rw &= ~(1 << BIO_RW_BARRIER); + clone->bi_rw &= ~REQ_HARDBARRIER; clone->bi_destructor = dm_bio_destructor; clone->bi_sector = sector; clone->bi_idx = idx; @@ -1301,7 +1301,7 @@ static void __split_and_process_bio(struct mapped_device *md, struct bio *bio) ci.map = dm_get_live_table(md); if (unlikely(!ci.map)) { - if (!bio_rw_flagged(bio, BIO_RW_BARRIER)) + if (!(bio->bi_rw & REQ_HARDBARRIER)) bio_io_error(bio); else if (!md->barrier_error) @@ -1414,7 +1414,7 @@ static int _dm_request(struct request_queue *q, struct bio *bio) * we have to queue this io for later. */ if (unlikely(test_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags)) || - unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) { + unlikely(bio->bi_rw & REQ_HARDBARRIER)) { up_read(&md->io_lock); if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) && @@ -2296,7 +2296,7 @@ static void dm_wq_work(struct work_struct *work) if (dm_request_based(md)) generic_make_request(c); else { - if (bio_rw_flagged(c, BIO_RW_BARRIER)) + if (c->bi_rw & REQ_HARDBARRIER) process_barrier(md, c); else __split_and_process_bio(md, c); |