diff options
Diffstat (limited to 'drivers/md/dm-raid1.c')
| -rw-r--r-- | drivers/md/dm-raid1.c | 1667 |
1 files changed, 920 insertions, 747 deletions
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index 2375709a392..7dfdb5c746d 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c @@ -1,591 +1,286 @@ /* * Copyright (C) 2003 Sistina Software Limited. + * Copyright (C) 2005-2008 Red Hat, Inc. All rights reserved. * * This file is released under the GPL. */ -#include "dm.h" -#include "dm-bio-list.h" -#include "dm-io.h" -#include "dm-log.h" -#include "kcopyd.h" +#include "dm-bio-record.h" -#include <linux/ctype.h> #include <linux/init.h> #include <linux/mempool.h> #include <linux/module.h> #include <linux/pagemap.h> #include <linux/slab.h> -#include <linux/time.h> -#include <linux/vmalloc.h> #include <linux/workqueue.h> +#include <linux/device-mapper.h> +#include <linux/dm-io.h> +#include <linux/dm-dirty-log.h> +#include <linux/dm-kcopyd.h> +#include <linux/dm-region-hash.h> -static struct workqueue_struct *_kmirrord_wq; -static struct work_struct _kmirrord_work; +#define DM_MSG_PREFIX "raid1" -static inline void wake(void) -{ - queue_work(_kmirrord_wq, &_kmirrord_work); -} +#define MAX_RECOVERY 1 /* Maximum number of regions recovered in parallel. */ + +#define DM_RAID1_HANDLE_ERRORS 0x01 +#define errors_handled(p) ((p)->features & DM_RAID1_HANDLE_ERRORS) + +static DECLARE_WAIT_QUEUE_HEAD(_kmirrord_recovery_stopped); /*----------------------------------------------------------------- - * Region hash - * - * The mirror splits itself up into discrete regions. Each - * region can be in one of three states: clean, dirty, - * nosync. There is no need to put clean regions in the hash. - * - * In addition to being present in the hash table a region _may_ - * be present on one of three lists. - * - * clean_regions: Regions on this list have no io pending to - * them, they are in sync, we are no longer interested in them, - * they are dull. rh_update_states() will remove them from the - * hash table. - * - * quiesced_regions: These regions have been spun down, ready - * for recovery. rh_recovery_start() will remove regions from - * this list and hand them to kmirrord, which will schedule the - * recovery io with kcopyd. - * - * recovered_regions: Regions that kcopyd has successfully - * recovered. rh_update_states() will now schedule any delayed - * io, up the recovery_count, and remove the region from the - * hash. - * - * There are 2 locks: - * A rw spin lock 'hash_lock' protects just the hash table, - * this is never held in write mode from interrupt context, - * which I believe means that we only have to disable irqs when - * doing a write lock. - * - * An ordinary spin lock 'region_lock' that protects the three - * lists in the region_hash, with the 'state', 'list' and - * 'bhs_delayed' fields of the regions. This is used from irq - * context, so all other uses will have to suspend local irqs. + * Mirror set structures. *---------------------------------------------------------------*/ -struct mirror_set; -struct region_hash { - struct mirror_set *ms; - uint32_t region_size; - unsigned region_shift; - - /* holds persistent region state */ - struct dirty_log *log; - - /* hash table */ - rwlock_t hash_lock; - mempool_t *region_pool; - unsigned int mask; - unsigned int nr_buckets; - struct list_head *buckets; - - spinlock_t region_lock; - struct semaphore recovery_count; - struct list_head clean_regions; - struct list_head quiesced_regions; - struct list_head recovered_regions; +enum dm_raid1_error { + DM_RAID1_WRITE_ERROR, + DM_RAID1_FLUSH_ERROR, + DM_RAID1_SYNC_ERROR, + DM_RAID1_READ_ERROR }; -enum { - RH_CLEAN, - RH_DIRTY, - RH_NOSYNC, - RH_RECOVERING +struct mirror { + struct mirror_set *ms; + atomic_t error_count; + unsigned long error_type; + struct dm_dev *dev; + sector_t offset; }; -struct region { - struct region_hash *rh; /* FIXME: can we get rid of this ? */ - region_t key; - int state; - - struct list_head hash_list; +struct mirror_set { + struct dm_target *ti; struct list_head list; - atomic_t pending; - struct bio_list delayed_bios; -}; - -/* - * Conversion fns - */ -static inline region_t bio_to_region(struct region_hash *rh, struct bio *bio) -{ - return bio->bi_sector >> rh->region_shift; -} - -static inline sector_t region_to_sector(struct region_hash *rh, region_t region) -{ - return region << rh->region_shift; -} - -/* FIXME move this */ -static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw); + uint64_t features; -static void *region_alloc(gfp_t gfp_mask, void *pool_data) -{ - return kmalloc(sizeof(struct region), gfp_mask); -} + spinlock_t lock; /* protects the lists */ + struct bio_list reads; + struct bio_list writes; + struct bio_list failures; + struct bio_list holds; /* bios are waiting until suspend */ -static void region_free(void *element, void *pool_data) -{ - kfree(element); -} + struct dm_region_hash *rh; + struct dm_kcopyd_client *kcopyd_client; + struct dm_io_client *io_client; -#define MIN_REGIONS 64 -#define MAX_RECOVERY 1 -static int rh_init(struct region_hash *rh, struct mirror_set *ms, - struct dirty_log *log, uint32_t region_size, - region_t nr_regions) -{ - unsigned int nr_buckets, max_buckets; - size_t i; + /* recovery */ + region_t nr_regions; + int in_sync; + int log_failure; + int leg_failure; + atomic_t suspend; - /* - * Calculate a suitable number of buckets for our hash - * table. - */ - max_buckets = nr_regions >> 6; - for (nr_buckets = 128u; nr_buckets < max_buckets; nr_buckets <<= 1) - ; - nr_buckets >>= 1; - - rh->ms = ms; - rh->log = log; - rh->region_size = region_size; - rh->region_shift = ffs(region_size) - 1; - rwlock_init(&rh->hash_lock); - rh->mask = nr_buckets - 1; - rh->nr_buckets = nr_buckets; - - rh->buckets = vmalloc(nr_buckets * sizeof(*rh->buckets)); - if (!rh->buckets) { - DMERR("unable to allocate region hash memory"); - return -ENOMEM; - } + atomic_t default_mirror; /* Default mirror */ - for (i = 0; i < nr_buckets; i++) - INIT_LIST_HEAD(rh->buckets + i); + struct workqueue_struct *kmirrord_wq; + struct work_struct kmirrord_work; + struct timer_list timer; + unsigned long timer_pending; - spin_lock_init(&rh->region_lock); - sema_init(&rh->recovery_count, 0); - INIT_LIST_HEAD(&rh->clean_regions); - INIT_LIST_HEAD(&rh->quiesced_regions); - INIT_LIST_HEAD(&rh->recovered_regions); + struct work_struct trigger_event; - rh->region_pool = mempool_create(MIN_REGIONS, region_alloc, - region_free, NULL); - if (!rh->region_pool) { - vfree(rh->buckets); - rh->buckets = NULL; - return -ENOMEM; - } + unsigned nr_mirrors; + struct mirror mirror[0]; +}; - return 0; -} +DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(raid1_resync_throttle, + "A percentage of time allocated for raid resynchronization"); -static void rh_exit(struct region_hash *rh) +static void wakeup_mirrord(void *context) { - unsigned int h; - struct region *reg, *nreg; - - BUG_ON(!list_empty(&rh->quiesced_regions)); - for (h = 0; h < rh->nr_buckets; h++) { - list_for_each_entry_safe(reg, nreg, rh->buckets + h, hash_list) { - BUG_ON(atomic_read(®->pending)); - mempool_free(reg, rh->region_pool); - } - } - - if (rh->log) - dm_destroy_dirty_log(rh->log); - if (rh->region_pool) - mempool_destroy(rh->region_pool); - vfree(rh->buckets); -} + struct mirror_set *ms = context; -#define RH_HASH_MULT 2654435387U - -static inline unsigned int rh_hash(struct region_hash *rh, region_t region) -{ - return (unsigned int) ((region * RH_HASH_MULT) >> 12) & rh->mask; + queue_work(ms->kmirrord_wq, &ms->kmirrord_work); } -static struct region *__rh_lookup(struct region_hash *rh, region_t region) +static void delayed_wake_fn(unsigned long data) { - struct region *reg; - - list_for_each_entry (reg, rh->buckets + rh_hash(rh, region), hash_list) - if (reg->key == region) - return reg; - - return NULL; -} + struct mirror_set *ms = (struct mirror_set *) data; -static void __rh_insert(struct region_hash *rh, struct region *reg) -{ - unsigned int h = rh_hash(rh, reg->key); - list_add(®->hash_list, rh->buckets + h); + clear_bit(0, &ms->timer_pending); + wakeup_mirrord(ms); } -static struct region *__rh_alloc(struct region_hash *rh, region_t region) +static void delayed_wake(struct mirror_set *ms) { - struct region *reg, *nreg; - - read_unlock(&rh->hash_lock); - nreg = mempool_alloc(rh->region_pool, GFP_NOIO); - nreg->state = rh->log->type->in_sync(rh->log, region, 1) ? - RH_CLEAN : RH_NOSYNC; - nreg->rh = rh; - nreg->key = region; - - INIT_LIST_HEAD(&nreg->list); - - atomic_set(&nreg->pending, 0); - bio_list_init(&nreg->delayed_bios); - write_lock_irq(&rh->hash_lock); - - reg = __rh_lookup(rh, region); - if (reg) - /* we lost the race */ - mempool_free(nreg, rh->region_pool); - - else { - __rh_insert(rh, nreg); - if (nreg->state == RH_CLEAN) { - spin_lock(&rh->region_lock); - list_add(&nreg->list, &rh->clean_regions); - spin_unlock(&rh->region_lock); - } - reg = nreg; - } - write_unlock_irq(&rh->hash_lock); - read_lock(&rh->hash_lock); + if (test_and_set_bit(0, &ms->timer_pending)) + return; - return reg; + ms->timer.expires = jiffies + HZ / 5; + ms->timer.data = (unsigned long) ms; + ms->timer.function = delayed_wake_fn; + add_timer(&ms->timer); } -static inline struct region *__rh_find(struct region_hash *rh, region_t region) +static void wakeup_all_recovery_waiters(void *context) { - struct region *reg; - - reg = __rh_lookup(rh, region); - if (!reg) - reg = __rh_alloc(rh, region); - - return reg; + wake_up_all(&_kmirrord_recovery_stopped); } -static int rh_state(struct region_hash *rh, region_t region, int may_block) +static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw) { - int r; - struct region *reg; - - read_lock(&rh->hash_lock); - reg = __rh_lookup(rh, region); - read_unlock(&rh->hash_lock); - - if (reg) - return reg->state; - - /* - * The region wasn't in the hash, so we fall back to the - * dirty log. - */ - r = rh->log->type->in_sync(rh->log, region, may_block); + unsigned long flags; + int should_wake = 0; + struct bio_list *bl; - /* - * Any error from the dirty log (eg. -EWOULDBLOCK) gets - * taken as a RH_NOSYNC - */ - return r == 1 ? RH_CLEAN : RH_NOSYNC; -} + bl = (rw == WRITE) ? &ms->writes : &ms->reads; + spin_lock_irqsave(&ms->lock, flags); + should_wake = !(bl->head); + bio_list_add(bl, bio); + spin_unlock_irqrestore(&ms->lock, flags); -static inline int rh_in_sync(struct region_hash *rh, - region_t region, int may_block) -{ - int state = rh_state(rh, region, may_block); - return state == RH_CLEAN || state == RH_DIRTY; + if (should_wake) + wakeup_mirrord(ms); } -static void dispatch_bios(struct mirror_set *ms, struct bio_list *bio_list) +static void dispatch_bios(void *context, struct bio_list *bio_list) { + struct mirror_set *ms = context; struct bio *bio; - while ((bio = bio_list_pop(bio_list))) { + while ((bio = bio_list_pop(bio_list))) queue_bio(ms, bio, WRITE); - } } -static void rh_update_states(struct region_hash *rh) -{ - struct region *reg, *next; - - LIST_HEAD(clean); - LIST_HEAD(recovered); - - /* - * Quickly grab the lists. - */ - write_lock_irq(&rh->hash_lock); - spin_lock(&rh->region_lock); - if (!list_empty(&rh->clean_regions)) { - list_splice(&rh->clean_regions, &clean); - INIT_LIST_HEAD(&rh->clean_regions); - - list_for_each_entry (reg, &clean, list) { - rh->log->type->clear_region(rh->log, reg->key); - list_del(®->hash_list); - } - } - - if (!list_empty(&rh->recovered_regions)) { - list_splice(&rh->recovered_regions, &recovered); - INIT_LIST_HEAD(&rh->recovered_regions); - - list_for_each_entry (reg, &recovered, list) - list_del(®->hash_list); - } - spin_unlock(&rh->region_lock); - write_unlock_irq(&rh->hash_lock); - - /* - * All the regions on the recovered and clean lists have - * now been pulled out of the system, so no need to do - * any more locking. - */ - list_for_each_entry_safe (reg, next, &recovered, list) { - rh->log->type->clear_region(rh->log, reg->key); - rh->log->type->complete_resync_work(rh->log, reg->key, 1); - dispatch_bios(rh->ms, ®->delayed_bios); - up(&rh->recovery_count); - mempool_free(reg, rh->region_pool); - } +struct dm_raid1_bio_record { + struct mirror *m; + /* if details->bi_bdev == NULL, details were not saved */ + struct dm_bio_details details; + region_t write_region; +}; - if (!list_empty(&recovered)) - rh->log->type->flush(rh->log); +/* + * Every mirror should look like this one. + */ +#define DEFAULT_MIRROR 0 - list_for_each_entry_safe (reg, next, &clean, list) - mempool_free(reg, rh->region_pool); +/* + * This is yucky. We squirrel the mirror struct away inside + * bi_next for read/write buffers. This is safe since the bh + * doesn't get submitted to the lower levels of block layer. + */ +static struct mirror *bio_get_m(struct bio *bio) +{ + return (struct mirror *) bio->bi_next; } -static void rh_inc(struct region_hash *rh, region_t region) +static void bio_set_m(struct bio *bio, struct mirror *m) { - struct region *reg; - - read_lock(&rh->hash_lock); - reg = __rh_find(rh, region); - - atomic_inc(®->pending); - - spin_lock_irq(&rh->region_lock); - if (reg->state == RH_CLEAN) { - rh->log->type->mark_region(rh->log, reg->key); - - reg->state = RH_DIRTY; - list_del_init(®->list); /* take off the clean list */ - } - spin_unlock_irq(&rh->region_lock); + bio->bi_next = (struct bio *) m; +} - read_unlock(&rh->hash_lock); +static struct mirror *get_default_mirror(struct mirror_set *ms) +{ + return &ms->mirror[atomic_read(&ms->default_mirror)]; } -static void rh_inc_pending(struct region_hash *rh, struct bio_list *bios) +static void set_default_mirror(struct mirror *m) { - struct bio *bio; + struct mirror_set *ms = m->ms; + struct mirror *m0 = &(ms->mirror[0]); - for (bio = bios->head; bio; bio = bio->bi_next) - rh_inc(rh, bio_to_region(rh, bio)); + atomic_set(&ms->default_mirror, m - m0); } -static void rh_dec(struct region_hash *rh, region_t region) +static struct mirror *get_valid_mirror(struct mirror_set *ms) { - unsigned long flags; - struct region *reg; - int should_wake = 0; - - read_lock(&rh->hash_lock); - reg = __rh_lookup(rh, region); - read_unlock(&rh->hash_lock); + struct mirror *m; - if (atomic_dec_and_test(®->pending)) { - spin_lock_irqsave(&rh->region_lock, flags); - if (atomic_read(®->pending)) { /* check race */ - spin_unlock_irqrestore(&rh->region_lock, flags); - return; - } - if (reg->state == RH_RECOVERING) { - list_add_tail(®->list, &rh->quiesced_regions); - } else { - reg->state = RH_CLEAN; - list_add(®->list, &rh->clean_regions); - } - spin_unlock_irqrestore(&rh->region_lock, flags); - should_wake = 1; - } + for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++) + if (!atomic_read(&m->error_count)) + return m; - if (should_wake) - wake(); + return NULL; } -/* - * Starts quiescing a region in preparation for recovery. +/* fail_mirror + * @m: mirror device to fail + * @error_type: one of the enum's, DM_RAID1_*_ERROR + * + * If errors are being handled, record the type of + * error encountered for this device. If this type + * of error has already been recorded, we can return; + * otherwise, we must signal userspace by triggering + * an event. Additionally, if the device is the + * primary device, we must choose a new primary, but + * only if the mirror is in-sync. + * + * This function must not block. */ -static int __rh_recovery_prepare(struct region_hash *rh) +static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type) { - int r; - struct region *reg; - region_t region; + struct mirror_set *ms = m->ms; + struct mirror *new; - /* - * Ask the dirty log what's next. - */ - r = rh->log->type->get_resync_work(rh->log, ®ion); - if (r <= 0) - return r; + ms->leg_failure = 1; /* - * Get this region, and start it quiescing by setting the - * recovering flag. + * error_count is used for nothing more than a + * simple way to tell if a device has encountered + * errors. */ - read_lock(&rh->hash_lock); - reg = __rh_find(rh, region); - read_unlock(&rh->hash_lock); + atomic_inc(&m->error_count); - spin_lock_irq(&rh->region_lock); - reg->state = RH_RECOVERING; - - /* Already quiesced ? */ - if (atomic_read(®->pending)) - list_del_init(®->list); - - else { - list_del_init(®->list); - list_add(®->list, &rh->quiesced_regions); - } - spin_unlock_irq(&rh->region_lock); - - return 1; -} + if (test_and_set_bit(error_type, &m->error_type)) + return; -static void rh_recovery_prepare(struct region_hash *rh) -{ - while (!down_trylock(&rh->recovery_count)) - if (__rh_recovery_prepare(rh) <= 0) { - up(&rh->recovery_count); - break; - } -} + if (!errors_handled(ms)) + return; -/* - * Returns any quiesced regions. - */ -static struct region *rh_recovery_start(struct region_hash *rh) -{ - struct region *reg = NULL; + if (m != get_default_mirror(ms)) + goto out; - spin_lock_irq(&rh->region_lock); - if (!list_empty(&rh->quiesced_regions)) { - reg = list_entry(rh->quiesced_regions.next, - struct region, list); - list_del_init(®->list); /* remove from the quiesced list */ + if (!ms->in_sync) { + /* + * Better to issue requests to same failing device + * than to risk returning corrupt data. + */ + DMERR("Primary mirror (%s) failed while out-of-sync: " + "Reads may fail.", m->dev->name); + goto out; } - spin_unlock_irq(&rh->region_lock); - - return reg; -} - -/* FIXME: success ignored for now */ -static void rh_recovery_end(struct region *reg, int success) -{ - struct region_hash *rh = reg->rh; - - spin_lock_irq(&rh->region_lock); - list_add(®->list, ®->rh->recovered_regions); - spin_unlock_irq(&rh->region_lock); - - wake(); -} -static void rh_flush(struct region_hash *rh) -{ - rh->log->type->flush(rh->log); -} - -static void rh_delay(struct region_hash *rh, struct bio *bio) -{ - struct region *reg; - - read_lock(&rh->hash_lock); - reg = __rh_find(rh, bio_to_region(rh, bio)); - bio_list_add(®->delayed_bios, bio); - read_unlock(&rh->hash_lock); -} - -static void rh_stop_recovery(struct region_hash *rh) -{ - int i; + new = get_valid_mirror(ms); + if (new) + set_default_mirror(new); + else + DMWARN("All sides of mirror have failed."); - /* wait for any recovering regions */ - for (i = 0; i < MAX_RECOVERY; i++) - down(&rh->recovery_count); +out: + schedule_work(&ms->trigger_event); } -static void rh_start_recovery(struct region_hash *rh) +static int mirror_flush(struct dm_target *ti) { - int i; - - for (i = 0; i < MAX_RECOVERY; i++) - up(&rh->recovery_count); - - wake(); -} - -/*----------------------------------------------------------------- - * Mirror set structures. - *---------------------------------------------------------------*/ -struct mirror { - atomic_t error_count; - struct dm_dev *dev; - sector_t offset; -}; - -struct mirror_set { - struct dm_target *ti; - struct list_head list; - struct region_hash rh; - struct kcopyd_client *kcopyd_client; - - spinlock_t lock; /* protects the next two lists */ - struct bio_list reads; - struct bio_list writes; - - /* recovery */ - region_t nr_regions; - int in_sync; - - unsigned int nr_mirrors; - struct mirror mirror[0]; -}; + struct mirror_set *ms = ti->private; + unsigned long error_bits; -/* - * Every mirror should look like this one. - */ -#define DEFAULT_MIRROR 0 + unsigned int i; + struct dm_io_region io[ms->nr_mirrors]; + struct mirror *m; + struct dm_io_request io_req = { + .bi_rw = WRITE_FLUSH, + .mem.type = DM_IO_KMEM, + .mem.ptr.addr = NULL, + .client = ms->io_client, + }; + + for (i = 0, m = ms->mirror; i < ms->nr_mirrors; i++, m++) { + io[i].bdev = m->dev->bdev; + io[i].sector = 0; + io[i].count = 0; + } -/* - * This is yucky. We squirrel the mirror_set struct away inside - * bi_next for write buffers. This is safe since the bh - * doesn't get submitted to the lower levels of block layer. - */ -static struct mirror_set *bio_get_ms(struct bio *bio) -{ - return (struct mirror_set *) bio->bi_next; -} + error_bits = -1; + dm_io(&io_req, ms->nr_mirrors, io, &error_bits); + if (unlikely(error_bits != 0)) { + for (i = 0; i < ms->nr_mirrors; i++) + if (test_bit(i, &error_bits)) + fail_mirror(ms->mirror + i, + DM_RAID1_FLUSH_ERROR); + return -EIO; + } -static void bio_set_ms(struct bio *bio, struct mirror_set *ms) -{ - bio->bi_next = (struct bio *) ms; + return 0; } /*----------------------------------------------------------------- @@ -595,76 +290,104 @@ static void bio_set_ms(struct bio *bio, struct mirror_set *ms) * are in the no-sync state. We have to recover these by * recopying from the default mirror to all the others. *---------------------------------------------------------------*/ -static void recovery_complete(int read_err, unsigned int write_err, +static void recovery_complete(int read_err, unsigned long write_err, void *context) { - struct region *reg = (struct region *) context; + struct dm_region *reg = context; + struct mirror_set *ms = dm_rh_region_context(reg); + int m, bit = 0; + + if (read_err) { + /* Read error means the failure of default mirror. */ + DMERR_LIMIT("Unable to read primary mirror during recovery"); + fail_mirror(get_default_mirror(ms), DM_RAID1_SYNC_ERROR); + } + + if (write_err) { + DMERR_LIMIT("Write error during recovery (error = 0x%lx)", + write_err); + /* + * Bits correspond to devices (excluding default mirror). + * The default mirror cannot change during recovery. + */ + for (m = 0; m < ms->nr_mirrors; m++) { + if (&ms->mirror[m] == get_default_mirror(ms)) + continue; + if (test_bit(bit, &write_err)) + fail_mirror(ms->mirror + m, + DM_RAID1_SYNC_ERROR); + bit++; + } + } - /* FIXME: better error handling */ - rh_recovery_end(reg, read_err || write_err); + dm_rh_recovery_end(reg, !(read_err || write_err)); } -static int recover(struct mirror_set *ms, struct region *reg) +static int recover(struct mirror_set *ms, struct dm_region *reg) { int r; - unsigned int i; - struct io_region from, to[KCOPYD_MAX_REGIONS], *dest; + unsigned i; + struct dm_io_region from, to[DM_KCOPYD_MAX_REGIONS], *dest; struct mirror *m; unsigned long flags = 0; + region_t key = dm_rh_get_region_key(reg); + sector_t region_size = dm_rh_get_region_size(ms->rh); /* fill in the source */ - m = ms->mirror + DEFAULT_MIRROR; + m = get_default_mirror(ms); from.bdev = m->dev->bdev; - from.sector = m->offset + region_to_sector(reg->rh, reg->key); - if (reg->key == (ms->nr_regions - 1)) { + from.sector = m->offset + dm_rh_region_to_sector(ms->rh, key); + if (key == (ms->nr_regions - 1)) { /* * The final region may be smaller than * region_size. */ - from.count = ms->ti->len & (reg->rh->region_size - 1); + from.count = ms->ti->len & (region_size - 1); if (!from.count) - from.count = reg->rh->region_size; + from.count = region_size; } else - from.count = reg->rh->region_size; + from.count = region_size; /* fill in the destinations */ for (i = 0, dest = to; i < ms->nr_mirrors; i++) { - if (i == DEFAULT_MIRROR) + if (&ms->mirror[i] == get_default_mirror(ms)) continue; m = ms->mirror + i; dest->bdev = m->dev->bdev; - dest->sector = m->offset + region_to_sector(reg->rh, reg->key); + dest->sector = m->offset + dm_rh_region_to_sector(ms->rh, key); dest->count = from.count; dest++; } /* hand to kcopyd */ - set_bit(KCOPYD_IGNORE_ERROR, &flags); - r = kcopyd_copy(ms->kcopyd_client, &from, ms->nr_mirrors - 1, to, flags, - recovery_complete, reg); + if (!errors_handled(ms)) + set_bit(DM_KCOPYD_IGNORE_ERROR, &flags); + + r = dm_kcopyd_copy(ms->kcopyd_client, &from, ms->nr_mirrors - 1, to, + flags, recovery_complete, reg); return r; } static void do_recovery(struct mirror_set *ms) { + struct dm_region *reg; + struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); int r; - struct region *reg; - struct dirty_log *log = ms->rh.log; /* * Start quiescing some regions. */ - rh_recovery_prepare(&ms->rh); + dm_rh_recovery_prepare(ms->rh); /* * Copy any already quiesced regions. */ - while ((reg = rh_recovery_start(&ms->rh))) { + while ((reg = dm_rh_recovery_start(ms->rh))) { r = recover(ms, reg); if (r) - rh_recovery_end(reg, 0); + dm_rh_recovery_end(reg, 0); } /* @@ -683,17 +406,143 @@ static void do_recovery(struct mirror_set *ms) *---------------------------------------------------------------*/ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector) { - /* FIXME: add read balancing */ - return ms->mirror + DEFAULT_MIRROR; + struct mirror *m = get_default_mirror(ms); + + do { + if (likely(!atomic_read(&m->error_count))) + return m; + + if (m-- == ms->mirror) + m += ms->nr_mirrors; + } while (m != get_default_mirror(ms)); + + return NULL; +} + +static int default_ok(struct mirror *m) +{ + struct mirror *default_mirror = get_default_mirror(m->ms); + + return !atomic_read(&default_mirror->error_count); +} + +static int mirror_available(struct mirror_set *ms, struct bio *bio) +{ + struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); + region_t region = dm_rh_bio_to_region(ms->rh, bio); + + if (log->type->in_sync(log, region, 0)) + return choose_mirror(ms, bio->bi_iter.bi_sector) ? 1 : 0; + + return 0; } /* * remap a buffer to a particular mirror. */ -static void map_bio(struct mirror_set *ms, struct mirror *m, struct bio *bio) +static sector_t map_sector(struct mirror *m, struct bio *bio) +{ + if (unlikely(!bio->bi_iter.bi_size)) + return 0; + return m->offset + dm_target_offset(m->ms->ti, bio->bi_iter.bi_sector); +} + +static void map_bio(struct mirror *m, struct bio *bio) { bio->bi_bdev = m->dev->bdev; - bio->bi_sector = m->offset + (bio->bi_sector - ms->ti->begin); + bio->bi_iter.bi_sector = map_sector(m, bio); +} + +static void map_region(struct dm_io_region *io, struct mirror *m, + struct bio *bio) +{ + io->bdev = m->dev->bdev; + io->sector = map_sector(m, bio); + io->count = bio_sectors(bio); +} + +static void hold_bio(struct mirror_set *ms, struct bio *bio) +{ + /* + * Lock is required to avoid race condition during suspend + * process. + */ + spin_lock_irq(&ms->lock); + + if (atomic_read(&ms->suspend)) { + spin_unlock_irq(&ms->lock); + + /* + * If device is suspended, complete the bio. + */ + if (dm_noflush_suspending(ms->ti)) + bio_endio(bio, DM_ENDIO_REQUEUE); + else + bio_endio(bio, -EIO); + return; + } + + /* + * Hold bio until the suspend is complete. + */ + bio_list_add(&ms->holds, bio); + spin_unlock_irq(&ms->lock); +} + +/*----------------------------------------------------------------- + * Reads + *---------------------------------------------------------------*/ +static void read_callback(unsigned long error, void *context) +{ + struct bio *bio = context; + struct mirror *m; + + m = bio_get_m(bio); + bio_set_m(bio, NULL); + + if (likely(!error)) { + bio_endio(bio, 0); + return; + } + + fail_mirror(m, DM_RAID1_READ_ERROR); + + if (likely(default_ok(m)) || mirror_available(m->ms, bio)) { + DMWARN_LIMIT("Read failure on mirror device %s. " + "Trying alternative device.", + m->dev->name); + queue_bio(m->ms, bio, bio_rw(bio)); + return; + } + + DMERR_LIMIT("Read failure on mirror device %s. Failing I/O.", + m->dev->name); + bio_endio(bio, -EIO); +} + +/* Asynchronous read. */ +static void read_async_bio(struct mirror *m, struct bio *bio) +{ + struct dm_io_region io; + struct dm_io_request io_req = { + .bi_rw = READ, + .mem.type = DM_IO_BIO, + .mem.ptr.bio = bio, + .notify.fn = read_callback, + .notify.context = bio, + .client = m->ms->io_client, + }; + + map_region(&io, m, bio); + bio_set_m(bio, m); + BUG_ON(dm_io(&io_req, 1, &io, NULL)); +} + +static inline int region_in_sync(struct mirror_set *ms, region_t region, + int may_block) +{ + int state = dm_rh_get_state(ms->rh, region, may_block); + return state == DM_RH_CLEAN || state == DM_RH_DIRTY; } static void do_reads(struct mirror_set *ms, struct bio_list *reads) @@ -703,18 +552,21 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads) struct mirror *m; while ((bio = bio_list_pop(reads))) { - region = bio_to_region(&ms->rh, bio); + region = dm_rh_bio_to_region(ms->rh, bio); + m = get_default_mirror(ms); /* * We can only read balance if the region is in sync. */ - if (rh_in_sync(&ms->rh, region, 0)) - m = choose_mirror(ms, bio->bi_sector); - else - m = ms->mirror + DEFAULT_MIRROR; + if (likely(region_in_sync(ms, region, 1))) + m = choose_mirror(ms, bio->bi_iter.bi_sector); + else if (m && atomic_read(&m->error_count)) + m = NULL; - map_bio(ms, m, bio); - generic_make_request(bio); + if (likely(m)) + read_async_bio(m, bio); + else + bio_endio(bio, -EIO); } } @@ -728,15 +580,18 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads) * RECOVERING: delay the io until recovery completes * NOSYNC: increment pending, just write to the default mirror *---------------------------------------------------------------*/ + + static void write_callback(unsigned long error, void *context) { - unsigned int i; - int uptodate = 1; + unsigned i, ret = 0; struct bio *bio = (struct bio *) context; struct mirror_set *ms; + int should_wake = 0; + unsigned long flags; - ms = bio_get_ms(bio); - bio_set_ms(bio, NULL); + ms = bio_get_m(bio)->ms; + bio_set_m(bio, NULL); /* * NOTE: We don't decrement the pending count here, @@ -744,40 +599,59 @@ static void write_callback(unsigned long error, void *context) * This way we handle both writes to SYNC and NOSYNC * regions with the same code. */ - - if (error) { - /* - * only error the io if all mirrors failed. - * FIXME: bogus - */ - uptodate = 0; - for (i = 0; i < ms->nr_mirrors; i++) - if (!test_bit(i, &error)) { - uptodate = 1; - break; - } + if (likely(!error)) { + bio_endio(bio, ret); + return; } - bio_endio(bio, bio->bi_size, 0); + + for (i = 0; i < ms->nr_mirrors; i++) + if (test_bit(i, &error)) + fail_mirror(ms->mirror + i, DM_RAID1_WRITE_ERROR); + + /* + * Need to raise event. Since raising + * events can block, we need to do it in + * the main thread. + */ + spin_lock_irqsave(&ms->lock, flags); + if (!ms->failures.head) + should_wake = 1; + bio_list_add(&ms->failures, bio); + spin_unlock_irqrestore(&ms->lock, flags); + if (should_wake) + wakeup_mirrord(ms); } static void do_write(struct mirror_set *ms, struct bio *bio) { unsigned int i; - struct io_region io[KCOPYD_MAX_REGIONS+1]; + struct dm_io_region io[ms->nr_mirrors], *dest = io; struct mirror *m; + struct dm_io_request io_req = { + .bi_rw = WRITE | (bio->bi_rw & WRITE_FLUSH_FUA), + .mem.type = DM_IO_BIO, + .mem.ptr.bio = bio, + .notify.fn = write_callback, + .notify.context = bio, + .client = ms->io_client, + }; + + if (bio->bi_rw & REQ_DISCARD) { + io_req.bi_rw |= REQ_DISCARD; + io_req.mem.type = DM_IO_KMEM; + io_req.mem.ptr.addr = NULL; + } - for (i = 0; i < ms->nr_mirrors; i++) { - m = ms->mirror + i; + for (i = 0, m = ms->mirror; i < ms->nr_mirrors; i++, m++) + map_region(dest++, m, bio); - io[i].bdev = m->dev->bdev; - io[i].sector = m->offset + (bio->bi_sector - ms->ti->begin); - io[i].count = bio->bi_size >> 9; - } + /* + * Use default mirror because we only need it to retrieve the reference + * to the mirror set in write_callback(). + */ + bio_set_m(bio, get_default_mirror(ms)); - bio_set_ms(bio, ms); - dm_io_async_bvec(ms->nr_mirrors, io, WRITE, - bio->bi_io_vec + bio->bi_idx, - write_callback, bio); + BUG_ON(dm_io(&io_req, ms->nr_mirrors, io, NULL)); } static void do_writes(struct mirror_set *ms, struct bio_list *writes) @@ -785,6 +659,9 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes) int state; struct bio *bio; struct bio_list sync, nosync, recover, *this_list = NULL; + struct bio_list requeue; + struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); + region_t region; if (!writes->head) return; @@ -795,20 +672,35 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes) bio_list_init(&sync); bio_list_init(&nosync); bio_list_init(&recover); + bio_list_init(&requeue); while ((bio = bio_list_pop(writes))) { - state = rh_state(&ms->rh, bio_to_region(&ms->rh, bio), 1); + if ((bio->bi_rw & REQ_FLUSH) || + (bio->bi_rw & REQ_DISCARD)) { + bio_list_add(&sync, bio); + continue; + } + + region = dm_rh_bio_to_region(ms->rh, bio); + + if (log->type->is_remote_recovering && + log->type->is_remote_recovering(log, region)) { + bio_list_add(&requeue, bio); + continue; + } + + state = dm_rh_get_state(ms->rh, region, 1); switch (state) { - case RH_CLEAN: - case RH_DIRTY: + case DM_RH_CLEAN: + case DM_RH_DIRTY: this_list = &sync; break; - case RH_NOSYNC: + case DM_RH_NOSYNC: this_list = &nosync; break; - case RH_RECOVERING: + case DM_RH_RECOVERING: this_list = &recover; break; } @@ -817,60 +709,138 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes) } /* + * Add bios that are delayed due to remote recovery + * back on to the write queue + */ + if (unlikely(requeue.head)) { + spin_lock_irq(&ms->lock); + bio_list_merge(&ms->writes, &requeue); + spin_unlock_irq(&ms->lock); + delayed_wake(ms); + } + + /* * Increment the pending counts for any regions that will * be written to (writes to recover regions are going to * be delayed). */ - rh_inc_pending(&ms->rh, &sync); - rh_inc_pending(&ms->rh, &nosync); - rh_flush(&ms->rh); + dm_rh_inc_pending(ms->rh, &sync); + dm_rh_inc_pending(ms->rh, &nosync); + + /* + * If the flush fails on a previous call and succeeds here, + * we must not reset the log_failure variable. We need + * userspace interaction to do that. + */ + ms->log_failure = dm_rh_flush(ms->rh) ? 1 : ms->log_failure; /* * Dispatch io. */ - while ((bio = bio_list_pop(&sync))) - do_write(ms, bio); + if (unlikely(ms->log_failure) && errors_handled(ms)) { + spin_lock_irq(&ms->lock); + bio_list_merge(&ms->failures, &sync); + spin_unlock_irq(&ms->lock); + wakeup_mirrord(ms); + } else + while ((bio = bio_list_pop(&sync))) + do_write(ms, bio); while ((bio = bio_list_pop(&recover))) - rh_delay(&ms->rh, bio); + dm_rh_delay(ms->rh, bio); while ((bio = bio_list_pop(&nosync))) { - map_bio(ms, ms->mirror + DEFAULT_MIRROR, bio); - generic_make_request(bio); + if (unlikely(ms->leg_failure) && errors_handled(ms)) { + spin_lock_irq(&ms->lock); + bio_list_add(&ms->failures, bio); + spin_unlock_irq(&ms->lock); + wakeup_mirrord(ms); + } else { + map_bio(get_default_mirror(ms), bio); + generic_make_request(bio); + } + } +} + +static void do_failures(struct mirror_set *ms, struct bio_list *failures) +{ + struct bio *bio; + + if (likely(!failures->head)) + return; + + /* + * If the log has failed, unattempted writes are being + * put on the holds list. We can't issue those writes + * until a log has been marked, so we must store them. + * + * If a 'noflush' suspend is in progress, we can requeue + * the I/O's to the core. This give userspace a chance + * to reconfigure the mirror, at which point the core + * will reissue the writes. If the 'noflush' flag is + * not set, we have no choice but to return errors. + * + * Some writes on the failures list may have been + * submitted before the log failure and represent a + * failure to write to one of the devices. It is ok + * for us to treat them the same and requeue them + * as well. + */ + while ((bio = bio_list_pop(failures))) { + if (!ms->log_failure) { + ms->in_sync = 0; + dm_rh_mark_nosync(ms->rh, bio); + } + + /* + * If all the legs are dead, fail the I/O. + * If we have been told to handle errors, hold the bio + * and wait for userspace to deal with the problem. + * Otherwise pretend that the I/O succeeded. (This would + * be wrong if the failed leg returned after reboot and + * got replicated back to the good legs.) + */ + if (!get_valid_mirror(ms)) + bio_endio(bio, -EIO); + else if (errors_handled(ms)) + hold_bio(ms, bio); + else + bio_endio(bio, 0); } } +static void trigger_event(struct work_struct *work) +{ + struct mirror_set *ms = + container_of(work, struct mirror_set, trigger_event); + + dm_table_event(ms->ti->table); +} + /*----------------------------------------------------------------- * kmirrord *---------------------------------------------------------------*/ -static LIST_HEAD(_mirror_sets); -static DECLARE_RWSEM(_mirror_sets_lock); - -static void do_mirror(struct mirror_set *ms) +static void do_mirror(struct work_struct *work) { - struct bio_list reads, writes; + struct mirror_set *ms = container_of(work, struct mirror_set, + kmirrord_work); + struct bio_list reads, writes, failures; + unsigned long flags; - spin_lock(&ms->lock); + spin_lock_irqsave(&ms->lock, flags); reads = ms->reads; writes = ms->writes; + failures = ms->failures; bio_list_init(&ms->reads); bio_list_init(&ms->writes); - spin_unlock(&ms->lock); + bio_list_init(&ms->failures); + spin_unlock_irqrestore(&ms->lock, flags); - rh_update_states(&ms->rh); + dm_rh_update_states(ms->rh, errors_handled(ms)); do_recovery(ms); do_reads(ms, &reads); do_writes(ms, &writes); -} - -static void do_work(void *ignored) -{ - struct mirror_set *ms; - - down_read(&_mirror_sets_lock); - list_for_each_entry (ms, &_mirror_sets, list) - do_mirror(ms); - up_read(&_mirror_sets_lock); + do_failures(ms, &failures); } /*----------------------------------------------------------------- @@ -879,32 +849,48 @@ static void do_work(void *ignored) static struct mirror_set *alloc_context(unsigned int nr_mirrors, uint32_t region_size, struct dm_target *ti, - struct dirty_log *dl) + struct dm_dirty_log *dl) { size_t len; struct mirror_set *ms = NULL; - if (array_too_big(sizeof(*ms), sizeof(ms->mirror[0]), nr_mirrors)) - return NULL; - len = sizeof(*ms) + (sizeof(ms->mirror[0]) * nr_mirrors); - ms = kmalloc(len, GFP_KERNEL); + ms = kzalloc(len, GFP_KERNEL); if (!ms) { - ti->error = "dm-mirror: Cannot allocate mirror context"; + ti->error = "Cannot allocate mirror context"; return NULL; } - memset(ms, 0, len); spin_lock_init(&ms->lock); + bio_list_init(&ms->reads); + bio_list_init(&ms->writes); + bio_list_init(&ms->failures); + bio_list_init(&ms->holds); ms->ti = ti; ms->nr_mirrors = nr_mirrors; ms->nr_regions = dm_sector_div_up(ti->len, region_size); ms->in_sync = 0; + ms->log_failure = 0; + ms->leg_failure = 0; + atomic_set(&ms->suspend, 0); + atomic_set(&ms->default_mirror, DEFAULT_MIRROR); + + ms->io_client = dm_io_client_create(); + if (IS_ERR(ms->io_client)) { + ti->error = "Error creating dm_io client"; + kfree(ms); + return NULL; + } - if (rh_init(&ms->rh, ms, dl, region_size, ms->nr_regions)) { - ti->error = "dm-mirror: Error creating dirty region hash"; + ms->rh = dm_region_hash_create(ms, dispatch_bios, wakeup_mirrord, + wakeup_all_recovery_waiters, + ms->ti->begin, MAX_RECOVERY, + dl, region_size, ms->nr_regions); + if (IS_ERR(ms->rh)) { + ti->error = "Error creating dirty region hash"; + dm_io_client_destroy(ms->io_client); kfree(ms); return NULL; } @@ -918,95 +904,110 @@ static void free_context(struct mirror_set *ms, struct dm_target *ti, while (m--) dm_put_device(ti, ms->mirror[m].dev); - rh_exit(&ms->rh); + dm_io_client_destroy(ms->io_client); + dm_region_hash_destroy(ms->rh); kfree(ms); } -static inline int _check_region_size(struct dm_target *ti, uint32_t size) -{ - return !(size % (PAGE_SIZE >> 9) || (size & (size - 1)) || - size > ti->len); -} - static int get_mirror(struct mirror_set *ms, struct dm_target *ti, unsigned int mirror, char **argv) { - sector_t offset; + unsigned long long offset; + char dummy; - if (sscanf(argv[1], SECTOR_FORMAT, &offset) != 1) { - ti->error = "dm-mirror: Invalid offset"; + if (sscanf(argv[1], "%llu%c", &offset, &dummy) != 1) { + ti->error = "Invalid offset"; return -EINVAL; } - if (dm_get_device(ti, argv[0], offset, ti->len, - dm_table_get_mode(ti->table), + if (dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &ms->mirror[mirror].dev)) { - ti->error = "dm-mirror: Device lookup failure"; + ti->error = "Device lookup failure"; return -ENXIO; } + ms->mirror[mirror].ms = ms; + atomic_set(&(ms->mirror[mirror].error_count), 0); + ms->mirror[mirror].error_type = 0; ms->mirror[mirror].offset = offset; return 0; } -static int add_mirror_set(struct mirror_set *ms) -{ - down_write(&_mirror_sets_lock); - list_add_tail(&ms->list, &_mirror_sets); - up_write(&_mirror_sets_lock); - wake(); - - return 0; -} - -static void del_mirror_set(struct mirror_set *ms) -{ - down_write(&_mirror_sets_lock); - list_del(&ms->list); - up_write(&_mirror_sets_lock); -} - /* * Create dirty log: log_type #log_params <log_params> */ -static struct dirty_log *create_dirty_log(struct dm_target *ti, - unsigned int argc, char **argv, - unsigned int *args_used) +static struct dm_dirty_log *create_dirty_log(struct dm_target *ti, + unsigned argc, char **argv, + unsigned *args_used) { - unsigned int param_count; - struct dirty_log *dl; + unsigned param_count; + struct dm_dirty_log *dl; + char dummy; if (argc < 2) { - ti->error = "dm-mirror: Insufficient mirror log arguments"; + ti->error = "Insufficient mirror log arguments"; return NULL; } - if (sscanf(argv[1], "%u", ¶m_count) != 1) { - ti->error = "dm-mirror: Invalid mirror log argument count"; + if (sscanf(argv[1], "%u%c", ¶m_count, &dummy) != 1) { + ti->error = "Invalid mirror log argument count"; return NULL; } *args_used = 2 + param_count; if (argc < *args_used) { - ti->error = "dm-mirror: Insufficient mirror log arguments"; + ti->error = "Insufficient mirror log arguments"; return NULL; } - dl = dm_create_dirty_log(argv[0], ti, param_count, argv + 2); + dl = dm_dirty_log_create(argv[0], ti, mirror_flush, param_count, + argv + 2); if (!dl) { - ti->error = "dm-mirror: Error creating mirror dirty log"; + ti->error = "Error creating mirror dirty log"; return NULL; } - if (!_check_region_size(ti, dl->type->get_region_size(dl))) { - ti->error = "dm-mirror: Invalid region size"; - dm_destroy_dirty_log(dl); - return NULL; + return dl; +} + +static int parse_features(struct mirror_set *ms, unsigned argc, char **argv, + unsigned *args_used) +{ + unsigned num_features; + struct dm_target *ti = ms->ti; + char dummy; + + *args_used = 0; + + if (!argc) + return 0; + + if (sscanf(argv[0], "%u%c", &num_features, &dummy) != 1) { + ti->error = "Invalid number of features"; + return -EINVAL; } - return dl; + argc--; + argv++; + (*args_used)++; + + if (num_features > argc) { + ti->error = "Not enough arguments to support feature count"; + return -EINVAL; + } + + if (!strcmp("handle_errors", argv[0])) + ms->features |= DM_RAID1_HANDLE_ERRORS; + else { + ti->error = "Unrecognised feature requested"; + return -EINVAL; + } + + (*args_used)++; + + return 0; } /* @@ -1014,17 +1015,20 @@ static struct dirty_log *create_dirty_log(struct dm_target *ti, * * log_type #log_params <log_params> * #mirrors [mirror_path offset]{2,} + * [#features <features>] * * log_type is "core" or "disk" * #log_params is between 1 and 3 + * + * If present, features must be "handle_errors". */ -#define DM_IO_PAGES 64 static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv) { int r; unsigned int nr_mirrors, m, args_used; struct mirror_set *ms; - struct dirty_log *dl; + struct dm_dirty_log *dl; + char dummy; dl = create_dirty_log(ti, argc, argv, &args_used); if (!dl) @@ -1033,24 +1037,24 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv) argv += args_used; argc -= args_used; - if (!argc || sscanf(argv[0], "%u", &nr_mirrors) != 1 || - nr_mirrors < 2 || nr_mirrors > KCOPYD_MAX_REGIONS + 1) { - ti->error = "dm-mirror: Invalid number of mirrors"; - dm_destroy_dirty_log(dl); + if (!argc || sscanf(argv[0], "%u%c", &nr_mirrors, &dummy) != 1 || + nr_mirrors < 2 || nr_mirrors > DM_KCOPYD_MAX_REGIONS + 1) { + ti->error = "Invalid number of mirrors"; + dm_dirty_log_destroy(dl); return -EINVAL; } argv++, argc--; - if (argc != nr_mirrors * 2) { - ti->error = "dm-mirror: Wrong number of mirror arguments"; - dm_destroy_dirty_log(dl); + if (argc < nr_mirrors * 2) { + ti->error = "Too few mirror arguments"; + dm_dirty_log_destroy(dl); return -EINVAL; } ms = alloc_context(nr_mirrors, dl->type->get_region_size(dl), ti, dl); if (!ms) { - dm_destroy_dirty_log(dl); + dm_dirty_log_destroy(dl); return -ENOMEM; } @@ -1066,205 +1070,374 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv) } ti->private = ms; - ti->split_io = ms->rh.region_size; - r = kcopyd_client_create(DM_IO_PAGES, &ms->kcopyd_client); - if (r) { - free_context(ms, ti, ms->nr_mirrors); - return r; + r = dm_set_target_max_io_len(ti, dm_rh_get_region_size(ms->rh)); + if (r) + goto err_free_context; + + ti->num_flush_bios = 1; + ti->num_discard_bios = 1; + ti->per_bio_data_size = sizeof(struct dm_raid1_bio_record); + ti->discard_zeroes_data_unsupported = true; + + ms->kmirrord_wq = alloc_workqueue("kmirrord", WQ_MEM_RECLAIM, 0); + if (!ms->kmirrord_wq) { + DMERR("couldn't start kmirrord"); + r = -ENOMEM; + goto err_free_context; } + INIT_WORK(&ms->kmirrord_work, do_mirror); + init_timer(&ms->timer); + ms->timer_pending = 0; + INIT_WORK(&ms->trigger_event, trigger_event); - add_mirror_set(ms); + r = parse_features(ms, argc, argv, &args_used); + if (r) + goto err_destroy_wq; + + argv += args_used; + argc -= args_used; + + /* + * Any read-balancing addition depends on the + * DM_RAID1_HANDLE_ERRORS flag being present. + * This is because the decision to balance depends + * on the sync state of a region. If the above + * flag is not present, we ignore errors; and + * the sync state may be inaccurate. + */ + + if (argc) { + ti->error = "Too many mirror arguments"; + r = -EINVAL; + goto err_destroy_wq; + } + + ms->kcopyd_client = dm_kcopyd_client_create(&dm_kcopyd_throttle); + if (IS_ERR(ms->kcopyd_client)) { + r = PTR_ERR(ms->kcopyd_client); + goto err_destroy_wq; + } + + wakeup_mirrord(ms); return 0; + +err_destroy_wq: + destroy_workqueue(ms->kmirrord_wq); +err_free_context: + free_context(ms, ti, ms->nr_mirrors); + return r; } static void mirror_dtr(struct dm_target *ti) { struct mirror_set *ms = (struct mirror_set *) ti->private; - del_mirror_set(ms); - kcopyd_client_destroy(ms->kcopyd_client); + del_timer_sync(&ms->timer); + flush_workqueue(ms->kmirrord_wq); + flush_work(&ms->trigger_event); + dm_kcopyd_client_destroy(ms->kcopyd_client); + destroy_workqueue(ms->kmirrord_wq); free_context(ms, ti, ms->nr_mirrors); } -static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw) -{ - int should_wake = 0; - struct bio_list *bl; - - bl = (rw == WRITE) ? &ms->writes : &ms->reads; - spin_lock(&ms->lock); - should_wake = !(bl->head); - bio_list_add(bl, bio); - spin_unlock(&ms->lock); - - if (should_wake) - wake(); -} - /* * Mirror mapping function */ -static int mirror_map(struct dm_target *ti, struct bio *bio, - union map_info *map_context) +static int mirror_map(struct dm_target *ti, struct bio *bio) { int r, rw = bio_rw(bio); struct mirror *m; struct mirror_set *ms = ti->private; + struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); + struct dm_raid1_bio_record *bio_record = + dm_per_bio_data(bio, sizeof(struct dm_raid1_bio_record)); - map_context->ll = bio->bi_sector >> ms->rh.region_shift; + bio_record->details.bi_bdev = NULL; if (rw == WRITE) { + /* Save region for mirror_end_io() handler */ + bio_record->write_region = dm_rh_bio_to_region(ms->rh, bio); queue_bio(ms, bio, rw); - return 0; + return DM_MAPIO_SUBMITTED; } - r = ms->rh.log->type->in_sync(ms->rh.log, - bio_to_region(&ms->rh, bio), 0); + r = log->type->in_sync(log, dm_rh_bio_to_region(ms->rh, bio), 0); if (r < 0 && r != -EWOULDBLOCK) return r; - if (r == -EWOULDBLOCK) /* FIXME: ugly */ - r = 0; - /* - * We don't want to fast track a recovery just for a read - * ahead. So we just let it silently fail. - * FIXME: get rid of this. + * If region is not in-sync queue the bio. */ - if (!r && rw == READA) - return -EIO; + if (!r || (r == -EWOULDBLOCK)) { + if (rw == READA) + return -EWOULDBLOCK; - if (!r) { - /* Pass this io over to the daemon */ queue_bio(ms, bio, rw); - return 0; + return DM_MAPIO_SUBMITTED; } - m = choose_mirror(ms, bio->bi_sector); - if (!m) + /* + * The region is in-sync and we can perform reads directly. + * Store enough information so we can retry if it fails. + */ + m = choose_mirror(ms, bio->bi_iter.bi_sector); + if (unlikely(!m)) return -EIO; - map_bio(ms, m, bio); - return 1; + dm_bio_record(&bio_record->details, bio); + bio_record->m = m; + + map_bio(m, bio); + + return DM_MAPIO_REMAPPED; } -static int mirror_end_io(struct dm_target *ti, struct bio *bio, - int error, union map_info *map_context) +static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error) { int rw = bio_rw(bio); struct mirror_set *ms = (struct mirror_set *) ti->private; - region_t region = map_context->ll; + struct mirror *m = NULL; + struct dm_bio_details *bd = NULL; + struct dm_raid1_bio_record *bio_record = + dm_per_bio_data(bio, sizeof(struct dm_raid1_bio_record)); /* * We need to dec pending if this was a write. */ - if (rw == WRITE) - rh_dec(&ms->rh, region); + if (rw == WRITE) { + if (!(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))) + dm_rh_dec(ms->rh, bio_record->write_region); + return error; + } - return 0; + if (error == -EOPNOTSUPP) + goto out; + + if ((error == -EWOULDBLOCK) && (bio->bi_rw & REQ_RAHEAD)) + goto out; + + if (unlikely(error)) { + if (!bio_record->details.bi_bdev) { + /* + * There wasn't enough memory to record necessary + * information for a retry or there was no other + * mirror in-sync. + */ + DMERR_LIMIT("Mirror read failed."); + return -EIO; + } + + m = bio_record->m; + + DMERR("Mirror read failed from %s. Trying alternative device.", + m->dev->name); + + fail_mirror(m, DM_RAID1_READ_ERROR); + + /* + * A failed read is requeued for another attempt using an intact + * mirror. + */ + if (default_ok(m) || mirror_available(ms, bio)) { + bd = &bio_record->details; + + dm_bio_restore(bd, bio); + bio_record->details.bi_bdev = NULL; + + atomic_inc(&bio->bi_remaining); + + queue_bio(ms, bio, rw); + return DM_ENDIO_INCOMPLETE; + } + DMERR("All replicated volumes dead, failing I/O"); + } + +out: + bio_record->details.bi_bdev = NULL; + + return error; } -static void mirror_postsuspend(struct dm_target *ti) +static void mirror_presuspend(struct dm_target *ti) { struct mirror_set *ms = (struct mirror_set *) ti->private; - struct dirty_log *log = ms->rh.log; + struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); + + struct bio_list holds; + struct bio *bio; + + atomic_set(&ms->suspend, 1); + + /* + * Process bios in the hold list to start recovery waiting + * for bios in the hold list. After the process, no bio has + * a chance to be added in the hold list because ms->suspend + * is set. + */ + spin_lock_irq(&ms->lock); + holds = ms->holds; + bio_list_init(&ms->holds); + spin_unlock_irq(&ms->lock); + + while ((bio = bio_list_pop(&holds))) + hold_bio(ms, bio); + + /* + * We must finish up all the work that we've + * generated (i.e. recovery work). + */ + dm_rh_stop_recovery(ms->rh); + + wait_event(_kmirrord_recovery_stopped, + !dm_rh_recovery_in_flight(ms->rh)); - rh_stop_recovery(&ms->rh); - if (log->type->suspend && log->type->suspend(log)) + if (log->type->presuspend && log->type->presuspend(log)) /* FIXME: need better error handling */ - DMWARN("log suspend failed"); + DMWARN("log presuspend failed"); + + /* + * Now that recovery is complete/stopped and the + * delayed bios are queued, we need to wait for + * the worker thread to complete. This way, + * we know that all of our I/O has been pushed. + */ + flush_workqueue(ms->kmirrord_wq); +} + +static void mirror_postsuspend(struct dm_target *ti) +{ + struct mirror_set *ms = ti->private; + struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); + + if (log->type->postsuspend && log->type->postsuspend(log)) + /* FIXME: need better error handling */ + DMWARN("log postsuspend failed"); } static void mirror_resume(struct dm_target *ti) { - struct mirror_set *ms = (struct mirror_set *) ti->private; - struct dirty_log *log = ms->rh.log; + struct mirror_set *ms = ti->private; + struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); + + atomic_set(&ms->suspend, 0); if (log->type->resume && log->type->resume(log)) /* FIXME: need better error handling */ DMWARN("log resume failed"); - rh_start_recovery(&ms->rh); + dm_rh_start_recovery(ms->rh); +} + +/* + * device_status_char + * @m: mirror device/leg we want the status of + * + * We return one character representing the most severe error + * we have encountered. + * A => Alive - No failures + * D => Dead - A write failure occurred leaving mirror out-of-sync + * S => Sync - A sychronization failure occurred, mirror out-of-sync + * R => Read - A read failure occurred, mirror data unaffected + * + * Returns: <char> + */ +static char device_status_char(struct mirror *m) +{ + if (!atomic_read(&(m->error_count))) + return 'A'; + + return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' : + (test_bit(DM_RAID1_WRITE_ERROR, &(m->error_type))) ? 'D' : + (test_bit(DM_RAID1_SYNC_ERROR, &(m->error_type))) ? 'S' : + (test_bit(DM_RAID1_READ_ERROR, &(m->error_type))) ? 'R' : 'U'; } -static int mirror_status(struct dm_target *ti, status_type_t type, - char *result, unsigned int maxlen) + +static void mirror_status(struct dm_target *ti, status_type_t type, + unsigned status_flags, char *result, unsigned maxlen) { - unsigned int m, sz; + unsigned int m, sz = 0; struct mirror_set *ms = (struct mirror_set *) ti->private; - - sz = ms->rh.log->type->status(ms->rh.log, type, result, maxlen); + struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); + char buffer[ms->nr_mirrors + 1]; switch (type) { case STATUSTYPE_INFO: DMEMIT("%d ", ms->nr_mirrors); - for (m = 0; m < ms->nr_mirrors; m++) + for (m = 0; m < ms->nr_mirrors; m++) { DMEMIT("%s ", ms->mirror[m].dev->name); + buffer[m] = device_status_char(&(ms->mirror[m])); + } + buffer[m] = '\0'; + + DMEMIT("%llu/%llu 1 %s ", + (unsigned long long)log->type->get_sync_count(log), + (unsigned long long)ms->nr_regions, buffer); + + sz += log->type->status(log, type, result+sz, maxlen-sz); - DMEMIT(SECTOR_FORMAT "/" SECTOR_FORMAT, - ms->rh.log->type->get_sync_count(ms->rh.log), - ms->nr_regions); break; case STATUSTYPE_TABLE: - DMEMIT("%d ", ms->nr_mirrors); + sz = log->type->status(log, type, result, maxlen); + + DMEMIT("%d", ms->nr_mirrors); for (m = 0; m < ms->nr_mirrors; m++) - DMEMIT("%s " SECTOR_FORMAT " ", - ms->mirror[m].dev->name, ms->mirror[m].offset); + DMEMIT(" %s %llu", ms->mirror[m].dev->name, + (unsigned long long)ms->mirror[m].offset); + + if (ms->features & DM_RAID1_HANDLE_ERRORS) + DMEMIT(" 1 handle_errors"); } +} - return 0; +static int mirror_iterate_devices(struct dm_target *ti, + iterate_devices_callout_fn fn, void *data) +{ + struct mirror_set *ms = ti->private; + int ret = 0; + unsigned i; + + for (i = 0; !ret && i < ms->nr_mirrors; i++) + ret = fn(ti, ms->mirror[i].dev, + ms->mirror[i].offset, ti->len, data); + + return ret; } static struct target_type mirror_target = { .name = "mirror", - .version = {1, 0, 1}, + .version = {1, 13, 2}, .module = THIS_MODULE, .ctr = mirror_ctr, .dtr = mirror_dtr, .map = mirror_map, .end_io = mirror_end_io, + .presuspend = mirror_presuspend, .postsuspend = mirror_postsuspend, .resume = mirror_resume, .status = mirror_status, + .iterate_devices = mirror_iterate_devices, }; static int __init dm_mirror_init(void) { int r; - r = dm_dirty_log_init(); - if (r) - return r; - - _kmirrord_wq = create_singlethread_workqueue("kmirrord"); - if (!_kmirrord_wq) { - DMERR("couldn't start kmirrord"); - dm_dirty_log_exit(); - return r; - } - INIT_WORK(&_kmirrord_work, do_work, NULL); - r = dm_register_target(&mirror_target); if (r < 0) { - DMERR("%s: Failed to register mirror target", - mirror_target.name); - dm_dirty_log_exit(); - destroy_workqueue(_kmirrord_wq); + DMERR("Failed to register mirror target"); + goto bad_target; } + return 0; + +bad_target: return r; } static void __exit dm_mirror_exit(void) { - int r; - - r = dm_unregister_target(&mirror_target); - if (r < 0) - DMERR("%s: unregister failed %d", mirror_target.name, r); - - destroy_workqueue(_kmirrord_wq); - dm_dirty_log_exit(); + dm_unregister_target(&mirror_target); } /* Module hooks */ |
