diff options
Diffstat (limited to 'drivers/md/dm.c')
-rw-r--r-- | drivers/md/dm.c | 111 |
1 files changed, 85 insertions, 26 deletions
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index c99e4728ff4..51ba1db4b3e 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1,6 +1,6 @@ /* * Copyright (C) 2001, 2002 Sistina Software (UK) Limited. - * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved. + * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. * * This file is released under the GPL. */ @@ -21,6 +21,7 @@ #include <linux/idr.h> #include <linux/hdreg.h> #include <linux/blktrace_api.h> +#include <trace/block.h> #define DM_MSG_PREFIX "core" @@ -31,6 +32,7 @@ static unsigned int _major = 0; static DEFINE_SPINLOCK(_minor_lock); /* + * For bio-based dm. * One of these is allocated per bio. */ struct dm_io { @@ -42,6 +44,7 @@ struct dm_io { }; /* + * For bio-based dm. * One of these is allocated per target within a bio. Hopefully * this will be simplified out one day. */ @@ -51,6 +54,29 @@ struct dm_target_io { union map_info info; }; +DEFINE_TRACE(block_bio_complete); + +/* + * For request-based dm. + * One of these is allocated per request. + */ +struct dm_rq_target_io { + struct mapped_device *md; + struct dm_target *ti; + struct request *orig, clone; + int error; + union map_info info; +}; + +/* + * For request-based dm. + * One of these is allocated per bio. + */ +struct dm_rq_clone_bio_info { + struct bio *orig; + struct request *rq; +}; + union map_info *dm_get_mapinfo(struct bio *bio) { if (bio && bio->bi_private) @@ -141,11 +167,16 @@ struct mapped_device { /* forced geometry settings */ struct hd_geometry geometry; + + /* sysfs handle */ + struct kobject kobj; }; #define MIN_IOS 256 static struct kmem_cache *_io_cache; static struct kmem_cache *_tio_cache; +static struct kmem_cache *_rq_tio_cache; +static struct kmem_cache *_rq_bio_info_cache; static int __init local_init(void) { @@ -161,9 +192,17 @@ static int __init local_init(void) if (!_tio_cache) goto out_free_io_cache; + _rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0); + if (!_rq_tio_cache) + goto out_free_tio_cache; + + _rq_bio_info_cache = KMEM_CACHE(dm_rq_clone_bio_info, 0); + if (!_rq_bio_info_cache) + goto out_free_rq_tio_cache; + r = dm_uevent_init(); if (r) - goto out_free_tio_cache; + goto out_free_rq_bio_info_cache; _major = major; r = register_blkdev(_major, _name); @@ -177,6 +216,10 @@ static int __init local_init(void) out_uevent_exit: dm_uevent_exit(); +out_free_rq_bio_info_cache: + kmem_cache_destroy(_rq_bio_info_cache); +out_free_rq_tio_cache: + kmem_cache_destroy(_rq_tio_cache); out_free_tio_cache: kmem_cache_destroy(_tio_cache); out_free_io_cache: @@ -187,6 +230,8 @@ out_free_io_cache: static void local_exit(void) { + kmem_cache_destroy(_rq_bio_info_cache); + kmem_cache_destroy(_rq_tio_cache); kmem_cache_destroy(_tio_cache); kmem_cache_destroy(_io_cache); unregister_blkdev(_major, _name); @@ -504,8 +549,7 @@ static void dec_pending(struct dm_io *io, int error) end_io_acct(io); if (io->error != DM_ENDIO_REQUEUE) { - blk_add_trace_bio(io->md->queue, io->bio, - BLK_TA_COMPLETE); + trace_block_bio_complete(io->md->queue, io->bio); bio_endio(io->bio, io->error); } @@ -598,7 +642,7 @@ static void __map_bio(struct dm_target *ti, struct bio *clone, if (r == DM_MAPIO_REMAPPED) { /* the bio has been remapped so dispatch it */ - blk_add_trace_remap(bdev_get_queue(clone->bi_bdev), clone, + trace_block_remap(bdev_get_queue(clone->bi_bdev), clone, tio->io->bio->bi_bdev->bd_dev, clone->bi_sector, sector); @@ -794,7 +838,11 @@ static int __split_bio(struct mapped_device *md, struct bio *bio) ci.map = dm_get_table(md); if (unlikely(!ci.map)) return -EIO; - + if (unlikely(bio_barrier(bio) && !dm_table_barrier_ok(ci.map))) { + dm_table_put(ci.map); + bio_endio(bio, -EOPNOTSUPP); + return 0; + } ci.md = md; ci.bio = bio; ci.io = alloc_io(md); @@ -878,15 +926,6 @@ static int dm_request(struct request_queue *q, struct bio *bio) struct mapped_device *md = q->queuedata; int cpu; - /* - * There is no use in forwarding any barrier request since we can't - * guarantee it is (or can be) handled by the targets correctly. - */ - if (unlikely(bio_barrier(bio))) { - bio_endio(bio, -EOPNOTSUPP); - return 0; - } - down_read(&md->io_lock); cpu = part_stat_lock(); @@ -941,8 +980,6 @@ static int dm_any_congested(void *congested_data, int bdi_bits) struct mapped_device *md = congested_data; struct dm_table *map; - atomic_inc(&md->pending); - if (!test_bit(DMF_BLOCK_IO, &md->flags)) { map = dm_get_table(md); if (map) { @@ -951,10 +988,6 @@ static int dm_any_congested(void *congested_data, int bdi_bits) } } - if (!atomic_dec_return(&md->pending)) - /* nudge anyone waiting on suspend queue */ - wake_up(&md->wait); - return r; } @@ -1091,7 +1124,7 @@ static struct mapped_device *alloc_dev(int minor) if (!md->tio_pool) goto bad_tio_pool; - md->bs = bioset_create(16, 16); + md->bs = bioset_create(16, 0); if (!md->bs) goto bad_no_bioset; @@ -1214,10 +1247,12 @@ static int __bind(struct mapped_device *md, struct dm_table *t) if (md->suspended_bdev) __set_size(md, size); - if (size == 0) + + if (!size) { + dm_table_destroy(t); return 0; + } - dm_table_get(t); dm_table_event_callback(t, event_callback, md); write_lock(&md->map_lock); @@ -1239,7 +1274,7 @@ static void __unbind(struct mapped_device *md) write_lock(&md->map_lock); md->map = NULL; write_unlock(&md->map_lock); - dm_table_put(map); + dm_table_destroy(map); } /* @@ -1253,6 +1288,8 @@ int dm_create(int minor, struct mapped_device **result) if (!md) return -ENXIO; + dm_sysfs_init(md); + *result = md; return 0; } @@ -1328,8 +1365,9 @@ void dm_put(struct mapped_device *md) dm_table_presuspend_targets(map); dm_table_postsuspend_targets(map); } - __unbind(md); + dm_sysfs_exit(md); dm_table_put(map); + __unbind(md); free_dev(md); } } @@ -1667,6 +1705,27 @@ struct gendisk *dm_disk(struct mapped_device *md) return md->disk; } +struct kobject *dm_kobject(struct mapped_device *md) +{ + return &md->kobj; +} + +/* + * struct mapped_device should not be exported outside of dm.c + * so use this check to verify that kobj is part of md structure + */ +struct mapped_device *dm_get_from_kobject(struct kobject *kobj) +{ + struct mapped_device *md; + + md = container_of(kobj, struct mapped_device, kobj); + if (&md->kobj != kobj) + return NULL; + + dm_get(md); + return md; +} + int dm_suspended(struct mapped_device *md) { return test_bit(DMF_SUSPENDED, &md->flags); |