diff options
author | David Woodhouse <dwmw2@infradead.org> | 2006-05-03 13:30:35 +0100 |
---|---|---|
committer | David Woodhouse <dwmw2@infradead.org> | 2006-05-03 13:30:35 +0100 |
commit | edc4ff7c08e9885c40e60c4fb39fa42cc91a0602 (patch) | |
tree | f375d28043dd4457428a841167dc93d760ba9a46 /drivers/md | |
parent | cbb9a56177b16294ed347ba7fcb1c66c8adb5dc4 (diff) | |
parent | e17df688f7064dae1417ce425dd1e4b71d24d63b (diff) |
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Diffstat (limited to 'drivers/md')
-rw-r--r-- | drivers/md/md.c | 24 | ||||
-rw-r--r-- | drivers/md/raid1.c | 29 | ||||
-rw-r--r-- | drivers/md/raid10.c | 46 |
3 files changed, 60 insertions, 39 deletions
diff --git a/drivers/md/md.c b/drivers/md/md.c index 434ca39d19c..d7316b829a6 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -279,11 +279,6 @@ static inline int mddev_lock(mddev_t * mddev) return mutex_lock_interruptible(&mddev->reconfig_mutex); } -static inline void mddev_lock_uninterruptible(mddev_t * mddev) -{ - mutex_lock(&mddev->reconfig_mutex); -} - static inline int mddev_trylock(mddev_t * mddev) { return mutex_trylock(&mddev->reconfig_mutex); @@ -2458,9 +2453,11 @@ md_attr_show(struct kobject *kobj, struct attribute *attr, char *page) if (!entry->show) return -EIO; - mddev_lock(mddev); - rv = entry->show(mddev, page); - mddev_unlock(mddev); + rv = mddev_lock(mddev); + if (!rv) { + rv = entry->show(mddev, page); + mddev_unlock(mddev); + } return rv; } @@ -2474,9 +2471,11 @@ md_attr_store(struct kobject *kobj, struct attribute *attr, if (!entry->store) return -EIO; - mddev_lock(mddev); - rv = entry->store(mddev, page, length); - mddev_unlock(mddev); + rv = mddev_lock(mddev); + if (!rv) { + rv = entry->store(mddev, page, length); + mddev_unlock(mddev); + } return rv; } @@ -4341,8 +4340,9 @@ static int md_seq_show(struct seq_file *seq, void *v) return 0; } - if (mddev_lock(mddev)!=0) + if (mddev_lock(mddev) < 0) return -EINTR; + if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) { seq_printf(seq, "%s : %sactive", mdname(mddev), mddev->pers ? "" : "in"); diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 6081941de1b..4070eff6f0f 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -315,10 +315,11 @@ static int raid1_end_write_request(struct bio *bio, unsigned int bytes_done, int if (r1_bio->bios[mirror] == bio) break; - if (error == -ENOTSUPP && test_bit(R1BIO_Barrier, &r1_bio->state)) { + if (error == -EOPNOTSUPP && test_bit(R1BIO_Barrier, &r1_bio->state)) { set_bit(BarriersNotsupp, &conf->mirrors[mirror].rdev->flags); set_bit(R1BIO_BarrierRetry, &r1_bio->state); r1_bio->mddev->barriers_work = 0; + /* Don't rdev_dec_pending in this branch - keep it for the retry */ } else { /* * this branch is our 'one mirror IO has finished' event handler: @@ -365,6 +366,7 @@ static int raid1_end_write_request(struct bio *bio, unsigned int bytes_done, int } } } + rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev); } /* * @@ -374,11 +376,9 @@ static int raid1_end_write_request(struct bio *bio, unsigned int bytes_done, int if (atomic_dec_and_test(&r1_bio->remaining)) { if (test_bit(R1BIO_BarrierRetry, &r1_bio->state)) { reschedule_retry(r1_bio); - /* Don't dec_pending yet, we want to hold - * the reference over the retry - */ goto out; } + /* it really is the end of this request */ if (test_bit(R1BIO_BehindIO, &r1_bio->state)) { /* free extra copy of the data pages */ int i = bio->bi_vcnt; @@ -393,8 +393,6 @@ static int raid1_end_write_request(struct bio *bio, unsigned int bytes_done, int md_write_end(r1_bio->mddev); raid_end_bio_io(r1_bio); } - - rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev); out: if (to_put) bio_put(to_put); @@ -753,18 +751,24 @@ static int make_request(request_queue_t *q, struct bio * bio) const int rw = bio_data_dir(bio); int do_barriers; - if (unlikely(!mddev->barriers_work && bio_barrier(bio))) { - bio_endio(bio, bio->bi_size, -EOPNOTSUPP); - return 0; - } - /* * Register the new request and wait if the reconstruction * thread has put up a bar for new requests. * Continue immediately if no resync is active currently. + * We test barriers_work *after* md_write_start as md_write_start + * may cause the first superblock write, and that will check out + * if barriers work. */ + md_write_start(mddev, bio); /* wait on superblock update early */ + if (unlikely(!mddev->barriers_work && bio_barrier(bio))) { + if (rw == WRITE) + md_write_end(mddev); + bio_endio(bio, bio->bi_size, -EOPNOTSUPP); + return 0; + } + wait_barrier(conf); disk_stat_inc(mddev->gendisk, ios[rw]); @@ -1404,10 +1408,11 @@ static void raid1d(mddev_t *mddev) unplug = 1; } else if (test_bit(R1BIO_BarrierRetry, &r1_bio->state)) { /* some requests in the r1bio were BIO_RW_BARRIER - * requests which failed with -ENOTSUPP. Hohumm.. + * requests which failed with -EOPNOTSUPP. Hohumm.. * Better resubmit without the barrier. * We know which devices to resubmit for, because * all others have had their bios[] entry cleared. + * We already have a nr_pending reference on these rdevs. */ int i; clear_bit(R1BIO_BarrierRetry, &r1_bio->state); diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 617012bc107..1440935414e 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -1407,43 +1407,54 @@ static void raid10d(mddev_t *mddev) if (s > (PAGE_SIZE>>9)) s = PAGE_SIZE >> 9; + rcu_read_lock(); do { int d = r10_bio->devs[sl].devnum; - rdev = conf->mirrors[d].rdev; + rdev = rcu_dereference(conf->mirrors[d].rdev); if (rdev && - test_bit(In_sync, &rdev->flags) && - sync_page_io(rdev->bdev, - r10_bio->devs[sl].addr + - sect + rdev->data_offset, - s<<9, - conf->tmppage, READ)) - success = 1; - else { - sl++; - if (sl == conf->copies) - sl = 0; + test_bit(In_sync, &rdev->flags)) { + atomic_inc(&rdev->nr_pending); + rcu_read_unlock(); + success = sync_page_io(rdev->bdev, + r10_bio->devs[sl].addr + + sect + rdev->data_offset, + s<<9, + conf->tmppage, READ); + rdev_dec_pending(rdev, mddev); + rcu_read_lock(); + if (success) + break; } + sl++; + if (sl == conf->copies) + sl = 0; } while (!success && sl != r10_bio->read_slot); + rcu_read_unlock(); if (success) { int start = sl; /* write it back and re-read */ + rcu_read_lock(); while (sl != r10_bio->read_slot) { int d; if (sl==0) sl = conf->copies; sl--; d = r10_bio->devs[sl].devnum; - rdev = conf->mirrors[d].rdev; - atomic_add(s, &rdev->corrected_errors); + rdev = rcu_dereference(conf->mirrors[d].rdev); if (rdev && test_bit(In_sync, &rdev->flags)) { + atomic_inc(&rdev->nr_pending); + rcu_read_unlock(); + atomic_add(s, &rdev->corrected_errors); if (sync_page_io(rdev->bdev, r10_bio->devs[sl].addr + sect + rdev->data_offset, s<<9, conf->tmppage, WRITE) == 0) /* Well, this device is dead */ md_error(mddev, rdev); + rdev_dec_pending(rdev, mddev); + rcu_read_lock(); } } sl = start; @@ -1453,17 +1464,22 @@ static void raid10d(mddev_t *mddev) sl = conf->copies; sl--; d = r10_bio->devs[sl].devnum; - rdev = conf->mirrors[d].rdev; + rdev = rcu_dereference(conf->mirrors[d].rdev); if (rdev && test_bit(In_sync, &rdev->flags)) { + atomic_inc(&rdev->nr_pending); + rcu_read_unlock(); if (sync_page_io(rdev->bdev, r10_bio->devs[sl].addr + sect + rdev->data_offset, s<<9, conf->tmppage, READ) == 0) /* Well, this device is dead */ md_error(mddev, rdev); + rdev_dec_pending(rdev, mddev); + rcu_read_lock(); } } + rcu_read_unlock(); } else { /* Cannot read from anywhere -- bye bye array */ md_error(mddev, conf->mirrors[r10_bio->devs[r10_bio->read_slot].devnum].rdev); |