aboutsummaryrefslogtreecommitdiff
path: root/drivers/md/raid10.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/raid10.c')
-rw-r--r--drivers/md/raid10.c1281
1 files changed, 1128 insertions, 153 deletions
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 3f91c2e1dfe..987db37cb87 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -24,6 +24,7 @@
#include <linux/module.h>
#include <linux/seq_file.h>
#include <linux/ratelimit.h>
+#include <linux/kthread.h>
#include "md.h"
#include "raid10.h"
#include "raid0.h"
@@ -68,6 +69,11 @@ static int max_queued_requests = 1024;
static void allow_barrier(struct r10conf *conf);
static void lower_barrier(struct r10conf *conf);
static int enough(struct r10conf *conf, int ignore);
+static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
+ int *skipped);
+static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio);
+static void end_reshape_write(struct bio *bio, int error);
+static void end_reshape(struct r10conf *conf);
static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data)
{
@@ -112,7 +118,8 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
if (!r10_bio)
return NULL;
- if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
+ if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery) ||
+ test_bit(MD_RECOVERY_RESHAPE, &conf->mddev->recovery))
nalloc = conf->copies; /* resync */
else
nalloc = 2; /* recovery */
@@ -140,9 +147,10 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
struct bio *rbio = r10_bio->devs[j].repl_bio;
bio = r10_bio->devs[j].bio;
for (i = 0; i < RESYNC_PAGES; i++) {
- if (j == 1 && !test_bit(MD_RECOVERY_SYNC,
- &conf->mddev->recovery)) {
- /* we can share bv_page's during recovery */
+ if (j > 0 && !test_bit(MD_RECOVERY_SYNC,
+ &conf->mddev->recovery)) {
+ /* we can share bv_page's during recovery
+ * and reshape */
struct bio *rbio = r10_bio->devs[0].bio;
page = rbio->bi_io_vec[i].bv_page;
get_page(page);
@@ -165,10 +173,11 @@ out_free_pages:
while (j--)
for (i = 0; i < RESYNC_PAGES ; i++)
safe_put_page(r10_bio->devs[j].bio->bi_io_vec[i].bv_page);
- j = -1;
+ j = 0;
out_free_bio:
- while (++j < nalloc) {
- bio_put(r10_bio->devs[j].bio);
+ for ( ; j < nalloc; j++) {
+ if (r10_bio->devs[j].bio)
+ bio_put(r10_bio->devs[j].bio);
if (r10_bio->devs[j].repl_bio)
bio_put(r10_bio->devs[j].repl_bio);
}
@@ -504,79 +513,96 @@ static void raid10_end_write_request(struct bio *bio, int error)
* sector offset to a virtual address
*/
-static void raid10_find_phys(struct r10conf *conf, struct r10bio *r10bio)
+static void __raid10_find_phys(struct geom *geo, struct r10bio *r10bio)
{
int n,f;
sector_t sector;
sector_t chunk;
sector_t stripe;
int dev;
-
int slot = 0;
/* now calculate first sector/dev */
- chunk = r10bio->sector >> conf->chunk_shift;
- sector = r10bio->sector & conf->chunk_mask;
+ chunk = r10bio->sector >> geo->chunk_shift;
+ sector = r10bio->sector & geo->chunk_mask;
- chunk *= conf->near_copies;
+ chunk *= geo->near_copies;
stripe = chunk;
- dev = sector_div(stripe, conf->raid_disks);
- if (conf->far_offset)
- stripe *= conf->far_copies;
+ dev = sector_div(stripe, geo->raid_disks);
+ if (geo->far_offset)
+ stripe *= geo->far_copies;
- sector += stripe << conf->chunk_shift;
+ sector += stripe << geo->chunk_shift;
/* and calculate all the others */
- for (n=0; n < conf->near_copies; n++) {
+ for (n = 0; n < geo->near_copies; n++) {
int d = dev;
sector_t s = sector;
r10bio->devs[slot].addr = sector;
r10bio->devs[slot].devnum = d;
slot++;
- for (f = 1; f < conf->far_copies; f++) {
- d += conf->near_copies;
- if (d >= conf->raid_disks)
- d -= conf->raid_disks;
- s += conf->stride;
+ for (f = 1; f < geo->far_copies; f++) {
+ d += geo->near_copies;
+ if (d >= geo->raid_disks)
+ d -= geo->raid_disks;
+ s += geo->stride;
r10bio->devs[slot].devnum = d;
r10bio->devs[slot].addr = s;
slot++;
}
dev++;
- if (dev >= conf->raid_disks) {
+ if (dev >= geo->raid_disks) {
dev = 0;
- sector += (conf->chunk_mask + 1);
+ sector += (geo->chunk_mask + 1);
}
}
- BUG_ON(slot != conf->copies);
+}
+
+static void raid10_find_phys(struct r10conf *conf, struct r10bio *r10bio)
+{
+ struct geom *geo = &conf->geo;
+
+ if (conf->reshape_progress != MaxSector &&
+ ((r10bio->sector >= conf->reshape_progress) !=
+ conf->mddev->reshape_backwards)) {
+ set_bit(R10BIO_Previous, &r10bio->state);
+ geo = &conf->prev;
+ } else
+ clear_bit(R10BIO_Previous, &r10bio->state);
+
+ __raid10_find_phys(geo, r10bio);
}
static sector_t raid10_find_virt(struct r10conf *conf, sector_t sector, int dev)
{
sector_t offset, chunk, vchunk;
+ /* Never use conf->prev as this is only called during resync
+ * or recovery, so reshape isn't happening
+ */
+ struct geom *geo = &conf->geo;
- offset = sector & conf->chunk_mask;
- if (conf->far_offset) {
+ offset = sector & geo->chunk_mask;
+ if (geo->far_offset) {
int fc;
- chunk = sector >> conf->chunk_shift;
- fc = sector_div(chunk, conf->far_copies);
- dev -= fc * conf->near_copies;
+ chunk = sector >> geo->chunk_shift;
+ fc = sector_div(chunk, geo->far_copies);
+ dev -= fc * geo->near_copies;
if (dev < 0)
- dev += conf->raid_disks;
+ dev += geo->raid_disks;
} else {
- while (sector >= conf->stride) {
- sector -= conf->stride;
- if (dev < conf->near_copies)
- dev += conf->raid_disks - conf->near_copies;
+ while (sector >= geo->stride) {
+ sector -= geo->stride;
+ if (dev < geo->near_copies)
+ dev += geo->raid_disks - geo->near_copies;
else
- dev -= conf->near_copies;
+ dev -= geo->near_copies;
}
- chunk = sector >> conf->chunk_shift;
+ chunk = sector >> geo->chunk_shift;
}
- vchunk = chunk * conf->raid_disks + dev;
- sector_div(vchunk, conf->near_copies);
- return (vchunk << conf->chunk_shift) + offset;
+ vchunk = chunk * geo->raid_disks + dev;
+ sector_div(vchunk, geo->near_copies);
+ return (vchunk << geo->chunk_shift) + offset;
}
/**
@@ -597,10 +623,17 @@ static int raid10_mergeable_bvec(struct request_queue *q,
struct r10conf *conf = mddev->private;
sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
int max;
- unsigned int chunk_sectors = mddev->chunk_sectors;
+ unsigned int chunk_sectors;
unsigned int bio_sectors = bvm->bi_size >> 9;
+ struct geom *geo = &conf->geo;
+
+ chunk_sectors = (conf->geo.chunk_mask & conf->prev.chunk_mask) + 1;
+ if (conf->reshape_progress != MaxSector &&
+ ((sector >= conf->reshape_progress) !=
+ conf->mddev->reshape_backwards))
+ geo = &conf->prev;
- if (conf->near_copies < conf->raid_disks) {
+ if (geo->near_copies < geo->raid_disks) {
max = (chunk_sectors - ((sector & (chunk_sectors - 1))
+ bio_sectors)) << 9;
if (max < 0)
@@ -614,6 +647,12 @@ static int raid10_mergeable_bvec(struct request_queue *q,
if (mddev->merge_check_needed) {
struct r10bio r10_bio;
int s;
+ if (conf->reshape_progress != MaxSector) {
+ /* Cannot give any guidance during reshape */
+ if (max <= biovec->bv_len && bio_sectors == 0)
+ return biovec->bv_len;
+ return 0;
+ }
r10_bio.sector = sector;
raid10_find_phys(conf, &r10_bio);
rcu_read_lock();
@@ -681,6 +720,7 @@ static struct md_rdev *read_balance(struct r10conf *conf,
struct md_rdev *rdev, *best_rdev;
int do_balance;
int best_slot;
+ struct geom *geo = &conf->geo;
raid10_find_phys(conf, r10_bio);
rcu_read_lock();
@@ -761,11 +801,11 @@ retry:
* sequential read speed for 'far copies' arrays. So only
* keep it for 'near' arrays, and review those later.
*/
- if (conf->near_copies > 1 && !atomic_read(&rdev->nr_pending))
+ if (geo->near_copies > 1 && !atomic_read(&rdev->nr_pending))
break;
/* for far > 1 always use the lowest address */
- if (conf->far_copies > 1)
+ if (geo->far_copies > 1)
new_distance = r10_bio->devs[slot].addr;
else
new_distance = abs(r10_bio->devs[slot].addr -
@@ -812,7 +852,10 @@ static int raid10_congested(void *data, int bits)
if (mddev_congested(mddev, bits))
return 1;
rcu_read_lock();
- for (i = 0; i < conf->raid_disks && ret == 0; i++) {
+ for (i = 0;
+ (i < conf->geo.raid_disks || i < conf->prev.raid_disks)
+ && ret == 0;
+ i++) {
struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
if (rdev && !test_bit(Faulty, &rdev->flags)) {
struct request_queue *q = bdev_get_queue(rdev->bdev);
@@ -973,13 +1016,24 @@ static void unfreeze_array(struct r10conf *conf)
spin_unlock_irq(&conf->resync_lock);
}
+static sector_t choose_data_offset(struct r10bio *r10_bio,
+ struct md_rdev *rdev)
+{
+ if (!test_bit(MD_RECOVERY_RESHAPE, &rdev->mddev->recovery) ||
+ test_bit(R10BIO_Previous, &r10_bio->state))
+ return rdev->data_offset;
+ else
+ return rdev->new_data_offset;
+}
+
static void make_request(struct mddev *mddev, struct bio * bio)
{
struct r10conf *conf = mddev->private;
struct r10bio *r10_bio;
struct bio *read_bio;
int i;
- int chunk_sects = conf->chunk_mask + 1;
+ sector_t chunk_mask = (conf->geo.chunk_mask & conf->prev.chunk_mask);
+ int chunk_sects = chunk_mask + 1;
const int rw = bio_data_dir(bio);
const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
const unsigned long do_fua = (bio->bi_rw & REQ_FUA);
@@ -988,6 +1042,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
int plugged;
int sectors_handled;
int max_sectors;
+ int sectors;
if (unlikely(bio->bi_rw & REQ_FLUSH)) {
md_flush_request(mddev, bio);
@@ -997,9 +1052,10 @@ static void make_request(struct mddev *mddev, struct bio * bio)
/* If this request crosses a chunk boundary, we need to
* split it. This will only happen for 1 PAGE (or less) requests.
*/
- if (unlikely( (bio->bi_sector & conf->chunk_mask) + (bio->bi_size >> 9)
- > chunk_sects &&
- conf->near_copies < conf->raid_disks)) {
+ if (unlikely((bio->bi_sector & chunk_mask) + (bio->bi_size >> 9)
+ > chunk_sects
+ && (conf->geo.near_copies < conf->geo.raid_disks
+ || conf->prev.near_copies < conf->prev.raid_disks))) {
struct bio_pair *bp;
/* Sanity check -- queue functions should prevent this happening */
if (bio->bi_vcnt != 1 ||
@@ -1051,10 +1107,41 @@ static void make_request(struct mddev *mddev, struct bio * bio)
*/
wait_barrier(conf);
+ sectors = bio->bi_size >> 9;
+ while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
+ bio->bi_sector < conf->reshape_progress &&
+ bio->bi_sector + sectors > conf->reshape_progress) {
+ /* IO spans the reshape position. Need to wait for
+ * reshape to pass
+ */
+ allow_barrier(conf);
+ wait_event(conf->wait_barrier,
+ conf->reshape_progress <= bio->bi_sector ||
+ conf->reshape_progress >= bio->bi_sector + sectors);
+ wait_barrier(conf);
+ }
+ if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
+ bio_data_dir(bio) == WRITE &&
+ (mddev->reshape_backwards
+ ? (bio->bi_sector < conf->reshape_safe &&
+ bio->bi_sector + sectors > conf->reshape_progress)
+ : (bio->bi_sector + sectors > conf->reshape_safe &&
+ bio->bi_sector < conf->reshape_progress))) {
+ /* Need to update reshape_position in metadata */
+ mddev->reshape_position = conf->reshape_progress;
+ set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ set_bit(MD_CHANGE_PENDING, &mddev->flags);
+ md_wakeup_thread(mddev->thread);
+ wait_event(mddev->sb_wait,
+ !test_bit(MD_CHANGE_PENDING, &mddev->flags));
+
+ conf->reshape_safe = mddev->reshape_position;
+ }
+
r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
r10_bio->master_bio = bio;
- r10_bio->sectors = bio->bi_size >> 9;
+ r10_bio->sectors = sectors;
r10_bio->mddev = mddev;
r10_bio->sector = bio->bi_sector;
@@ -1093,7 +1180,7 @@ read_again:
r10_bio->devs[slot].rdev = rdev;
read_bio->bi_sector = r10_bio->devs[slot].addr +
- rdev->data_offset;
+ choose_data_offset(r10_bio, rdev);
read_bio->bi_bdev = rdev->bdev;
read_bio->bi_end_io = raid10_end_read_request;
read_bio->bi_rw = READ | do_sync;
@@ -1297,7 +1384,8 @@ retry_write:
r10_bio->devs[i].bio = mbio;
mbio->bi_sector = (r10_bio->devs[i].addr+
- conf->mirrors[d].rdev->data_offset);
+ choose_data_offset(r10_bio,
+ conf->mirrors[d].rdev));
mbio->bi_bdev = conf->mirrors[d].rdev->bdev;
mbio->bi_end_io = raid10_end_write_request;
mbio->bi_rw = WRITE | do_sync | do_fua;
@@ -1321,8 +1409,10 @@ retry_write:
* so it cannot disappear, so the replacement cannot
* become NULL here
*/
- mbio->bi_sector = (r10_bio->devs[i].addr+
- conf->mirrors[d].replacement->data_offset);
+ mbio->bi_sector = (r10_bio->devs[i].addr +
+ choose_data_offset(
+ r10_bio,
+ conf->mirrors[d].replacement));
mbio->bi_bdev = conf->mirrors[d].replacement->bdev;
mbio->bi_end_io = raid10_end_write_request;
mbio->bi_rw = WRITE | do_sync | do_fua;
@@ -1368,19 +1458,19 @@ static void status(struct seq_file *seq, struct mddev *mddev)
struct r10conf *conf = mddev->private;
int i;
- if (conf->near_copies < conf->raid_disks)
+ if (conf->geo.near_copies < conf->geo.raid_disks)
seq_printf(seq, " %dK chunks", mddev->chunk_sectors / 2);
- if (conf->near_copies > 1)
- seq_printf(seq, " %d near-copies", conf->near_copies);
- if (conf->far_copies > 1) {
- if (conf->far_offset)
- seq_printf(seq, " %d offset-copies", conf->far_copies);
+ if (conf->geo.near_copies > 1)
+ seq_printf(seq, " %d near-copies", conf->geo.near_copies);
+ if (conf->geo.far_copies > 1) {
+ if (conf->geo.far_offset)
+ seq_printf(seq, " %d offset-copies", conf->geo.far_copies);
else
- seq_printf(seq, " %d far-copies", conf->far_copies);
+ seq_printf(seq, " %d far-copies", conf->geo.far_copies);
}
- seq_printf(seq, " [%d/%d] [", conf->raid_disks,
- conf->raid_disks - mddev->degraded);
- for (i = 0; i < conf->raid_disks; i++)
+ seq_printf(seq, " [%d/%d] [", conf->geo.raid_disks,
+ conf->geo.raid_disks - mddev->degraded);
+ for (i = 0; i < conf->geo.raid_disks; i++)
seq_printf(seq, "%s",
conf->mirrors[i].rdev &&
test_bit(In_sync, &conf->mirrors[i].rdev->flags) ? "U" : "_");
@@ -1392,7 +1482,7 @@ static void status(struct seq_file *seq, struct mddev *mddev)
* Don't consider the device numbered 'ignore'
* as we might be about to remove it.
*/
-static int enough(struct r10conf *conf, int ignore)
+static int _enough(struct r10conf *conf, struct geom *geo, int ignore)
{
int first = 0;
@@ -1403,7 +1493,7 @@ static int enough(struct r10conf *conf, int ignore)
if (conf->mirrors[first].rdev &&
first != ignore)
cnt++;
- first = (first+1) % conf->raid_disks;
+ first = (first+1) % geo->raid_disks;
}
if (cnt == 0)
return 0;
@@ -1411,6 +1501,12 @@ static int enough(struct r10conf *conf, int ignore)
return 1;
}
+static int enough(struct r10conf *conf, int ignore)
+{
+ return _enough(conf, &conf->geo, ignore) &&
+ _enough(conf, &conf->prev, ignore);
+}
+
static void error(struct mddev *mddev, struct md_rdev *rdev)
{
char b[BDEVNAME_SIZE];
@@ -1445,7 +1541,7 @@ static void error(struct mddev *mddev, struct md_rdev *rdev)
"md/raid10:%s: Disk failure on %s, disabling device.\n"
"md/raid10:%s: Operation continuing on %d devices.\n",
mdname(mddev), bdevname(rdev->bdev, b),
- mdname(mddev), conf->raid_disks - mddev->degraded);
+ mdname(mddev), conf->geo.raid_disks - mddev->degraded);
}
static void print_conf(struct r10conf *conf)
@@ -1458,10 +1554,10 @@ static void print_conf(struct r10conf *conf)
printk(KERN_DEBUG "(!conf)\n");
return;
}
- printk(KERN_DEBUG " --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
- conf->raid_disks);
+ printk(KERN_DEBUG " --- wd:%d rd:%d\n", conf->geo.raid_disks - conf->mddev->degraded,
+ conf->geo.raid_disks);
- for (i = 0; i < conf->raid_disks; i++) {
+ for (i = 0; i < conf->geo.raid_disks; i++) {
char b[BDEVNAME_SIZE];
tmp = conf->mirrors + i;
if (tmp->rdev)
@@ -1493,7 +1589,7 @@ static int raid10_spare_active(struct mddev *mddev)
* Find all non-in_sync disks within the RAID10 configuration
* and mark them in_sync
*/
- for (i = 0; i < conf->raid_disks; i++) {
+ for (i = 0; i < conf->geo.raid_disks; i++) {
tmp = conf->mirrors + i;
if (tmp->replacement
&& tmp->replacement->recovery_offset == MaxSector
@@ -1535,7 +1631,7 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
int err = -EEXIST;
int mirror;
int first = 0;
- int last = conf->raid_disks - 1;
+ int last = conf->geo.raid_disks - 1;
struct request_queue *q = bdev_get_queue(rdev->bdev);
if (mddev->recovery_cp < MaxSector)
@@ -1543,7 +1639,7 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
* very different from resync
*/
return -EBUSY;
- if (rdev->saved_raid_disk < 0 && !enough(conf, -1))
+ if (rdev->saved_raid_disk < 0 && !_enough(conf, &conf->prev, -1))
return -EINVAL;
if (rdev->raid_disk >= 0)
@@ -1635,6 +1731,7 @@ static int raid10_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
if (!test_bit(Faulty, &rdev->flags) &&
mddev->recovery_disabled != p->recovery_disabled &&
(!p->replacement || p->replacement == rdev) &&
+ number < conf->geo.raid_disks &&
enough(conf, -1)) {
err = -EBUSY;
goto abort;
@@ -1676,7 +1773,11 @@ static void end_sync_read(struct bio *bio, int error)
struct r10conf *conf = r10_bio->mddev->private;
int d;
- d = find_bio_disk(conf, r10_bio, bio, NULL, NULL);
+ if (bio == r10_bio->master_bio) {
+ /* this is a reshape read */
+ d = r10_bio->read_slot; /* really the read dev */
+ } else
+ d = find_bio_disk(conf, r10_bio, bio, NULL, NULL);
if (test_bit(BIO_UPTODATE, &bio->bi_flags))
set_bit(R10BIO_Uptodate, &r10_bio->state);
@@ -2218,7 +2319,9 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
" (%d sectors at %llu on %s)\n",
mdname(mddev), s,
(unsigned long long)(
- sect + rdev->data_offset),
+ sect +
+ choose_data_offset(r10_bio,
+ rdev)),
bdevname(rdev->bdev, b));
printk(KERN_NOTICE "md/raid10:%s: %s: failing "
"drive\n",
@@ -2256,7 +2359,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
" (%d sectors at %llu on %s)\n",
mdname(mddev), s,
(unsigned long long)(
- sect + rdev->data_offset),
+ sect +
+ choose_data_offset(r10_bio, rdev)),
bdevname(rdev->bdev, b));
printk(KERN_NOTICE "md/raid10:%s: %s: failing "
"drive\n",
@@ -2269,7 +2373,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
" (%d sectors at %llu on %s)\n",
mdname(mddev), s,
(unsigned long long)(
- sect + rdev->data_offset),
+ sect +
+ choose_data_offset(r10_bio, rdev)),
bdevname(rdev->bdev, b));
atomic_add(s, &rdev->corrected_errors);
}
@@ -2343,7 +2448,7 @@ static int narrow_write_error(struct r10bio *r10_bio, int i)
wbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
md_trim_bio(wbio, sector - bio->bi_sector, sectors);
wbio->bi_sector = (r10_bio->devs[i].addr+
- rdev->data_offset+
+ choose_data_offset(r10_bio, rdev) +
(sector - r10_bio->sector));
wbio->bi_bdev = rdev->bdev;
if (submit_bio_wait(WRITE, wbio) == 0)
@@ -2420,7 +2525,7 @@ read_more:
r10_bio->devs[slot].bio = bio;
r10_bio->devs[slot].rdev = rdev;
bio->bi_sector = r10_bio->devs[slot].addr
- + rdev->data_offset;
+ + choose_data_offset(r10_bio, rdev);
bio->bi_bdev = rdev->bdev;
bio->bi_rw = READ | do_sync;
bio->bi_private = r10_bio;
@@ -2480,7 +2585,7 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
rdev_clear_badblocks(
rdev,
r10_bio->devs[m].addr,
- r10_bio->sectors);
+ r10_bio->sectors, 0);
} else {
if (!rdev_set_badblocks(
rdev,
@@ -2496,7 +2601,7 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
rdev_clear_badblocks(
rdev,
r10_bio->devs[m].addr,
- r10_bio->sectors);
+ r10_bio->sectors, 0);
} else {
if (!rdev_set_badblocks(
rdev,
@@ -2515,7 +2620,7 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
rdev_clear_badblocks(
rdev,
r10_bio->devs[m].addr,
- r10_bio->sectors);
+ r10_bio->sectors, 0);
rdev_dec_pending(rdev, conf->mddev);
} else if (bio != NULL &&
!test_bit(BIO_UPTODATE, &bio->bi_flags)) {
@@ -2532,7 +2637,7 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
rdev_clear_badblocks(
rdev,
r10_bio->devs[m].addr,
- r10_bio->sectors);
+ r10_bio->sectors, 0);
rdev_dec_pending(rdev, conf->mddev);
}
}
@@ -2573,6 +2678,8 @@ static void raid10d(struct mddev *mddev)
if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
test_bit(R10BIO_WriteError, &r10_bio->state))
handle_write_completed(conf, r10_bio);
+ else if (test_bit(R10BIO_IsReshape, &r10_bio->state))
+ reshape_request_write(mddev, r10_bio);
else if (test_bit(R10BIO_IsSync, &r10_bio->state))
sync_request_write(mddev, r10_bio);
else if (test_bit(R10BIO_IsRecover, &r10_bio->state))
@@ -2603,7 +2710,7 @@ static int init_resync(struct r10conf *conf)
buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
BUG_ON(conf->r10buf_pool);
conf->have_replacement = 0;
- for (i = 0; i < conf->raid_disks; i++)
+ for (i = 0; i < conf->geo.raid_disks; i++)
if (conf->mirrors[i].replacement)
conf->have_replacement = 1;
conf->r10buf_pool = mempool_create(buffs, r10buf_pool_alloc, r10buf_pool_free, conf);
@@ -2657,6 +2764,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
sector_t sync_blocks;
sector_t sectors_skipped = 0;
int chunks_skipped = 0;
+ sector_t chunk_mask = conf->geo.chunk_mask;
if (!conf->r10buf_pool)
if (init_resync(conf))
@@ -2664,7 +2772,8 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
skipped:
max_sector = mddev->dev_sectors;
- if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
+ if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
+ test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
max_sector = mddev->resync_max_sectors;
if (sector_nr >= max_sector) {
/* If we aborted, we need to abort the
@@ -2676,11 +2785,16 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
* we need to convert that to several
* virtual addresses.
*/
+ if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
+ end_reshape(conf);
+ return 0;
+ }
+
if (mddev->curr_resync < max_sector) { /* aborted */
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
&sync_blocks, 1);
- else for (i=0; i<conf->raid_disks; i++) {
+ else for (i = 0; i < conf->geo.raid_disks; i++) {
sector_t sect =
raid10_find_virt(conf, mddev->curr_resync, i);
bitmap_end_sync(mddev->bitmap, sect,
@@ -2694,7 +2808,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
/* Completed a full sync so the replacements
* are now fully recovered.
*/
- for (i = 0; i < conf->raid_disks; i++)
+ for (i = 0; i < conf->geo.raid_disks; i++)
if (conf->mirrors[i].replacement)
conf->mirrors[i].replacement
->recovery_offset
@@ -2707,7 +2821,11 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
*skipped = 1;
return sectors_skipped;
}
- if (chunks_skipped >= conf->raid_disks) {
+
+ if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
+ return reshape_request(mddev, sector_nr, skipped);
+
+ if (chunks_skipped >= conf->geo.raid_disks) {
/* if there has been nothing to do on any drive,
* then there is nothing to do at all..
*/
@@ -2721,9 +2839,9 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
/* make sure whole request will fit in a chunk - if chunks
* are meaningful
*/
- if (conf->near_copies < conf->raid_disks &&
- max_sector > (sector_nr | conf->chunk_mask))
- max_sector = (sector_nr | conf->chunk_mask) + 1;
+ if (conf->geo.near_copies < conf->geo.raid_disks &&
+ max_sector > (sector_nr | chunk_mask))
+ max_sector = (sector_nr | chunk_mask) + 1;
/*
* If there is non-resync activity waiting for us then
* put in a delay to throttle resync.
@@ -2752,7 +2870,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
int j;
r10_bio = NULL;
- for (i=0 ; i<conf->raid_disks; i++) {
+ for (i = 0 ; i < conf->geo.raid_disks; i++) {
int still_degraded;
struct r10bio *rb2;
sector_t sect;
@@ -2806,7 +2924,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
/* Need to check if the array will still be
* degraded
*/
- for (j=0; j<conf->raid_disks; j++)
+ for (j = 0; j < conf->geo.raid_disks; j++)
if (conf->mirrors[j].rdev == NULL ||
test_bit(Faulty, &conf->mirrors[j].rdev->flags)) {
still_degraded = 1;
@@ -2984,9 +3102,9 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
r10_bio->sector = sector_nr;
set_bit(R10BIO_IsSync, &r10_bio->state);
raid10_find_phys(conf, r10_bio);
- r10_bio->sectors = (sector_nr | conf->chunk_mask) - sector_nr +1;
+ r10_bio->sectors = (sector_nr | chunk_mask) - sector_nr + 1;
- for (i=0; i<conf->copies; i++) {
+ for (i = 0; i < conf->copies; i++) {
int d = r10_bio->devs[i].devnum;
sector_t first_bad, sector;
int bad_sectors;
@@ -3152,16 +3270,17 @@ raid10_size(struct mddev *mddev, sector_t sectors, int raid_disks)
struct r10conf *conf = mddev->private;
if (!raid_disks)
- raid_disks = conf->raid_disks;
+ raid_disks = min(conf->geo.raid_disks,
+ conf->prev.raid_disks);
if (!sectors)
sectors = conf->dev_sectors;
- size = sectors >> conf->chunk_shift;
- sector_div(size, conf->far_copies);
+ size = sectors >> conf->geo.chunk_shift;
+ sector_div(size, conf->geo.far_copies);
size = size * raid_disks;
- sector_div(size, conf->near_copies);
+ sector_div(size, conf->geo.near_copies);
- return size << conf->chunk_shift;
+ return size << conf->geo.chunk_shift;
}
static void calc_sectors(struct r10conf *conf, sector_t size)
@@ -3171,10 +3290,10 @@ static void calc_sectors(struct r10conf *conf, sector_t size)
* conf->stride
*/
- size = size >> conf->chunk_shift;
- sector_div(size, conf->far_copies);
- size = size * conf->raid_disks;
- sector_div(size, conf->near_copies);
+ size = size >> conf->geo.chunk_shift;
+ sector_div(size, conf->geo.far_copies);
+ size = size * conf->geo.raid_disks;
+ sector_div(size, conf->geo.near_copies);
/* 'size' is now the number of chunks in the array */
/* calculate "used chunks per device" */
size = size * conf->copies;
@@ -3182,38 +3301,76 @@ static void calc_sectors(struct r10conf *conf, sector_t size)
/* We need to round up when dividing by raid_disks to
* get the stride size.
*/
- size = DIV_ROUND_UP_SECTOR_T(size, conf->raid_disks);
+ size = DIV_ROUND_UP_SECTOR_T(size, conf->geo.raid_disks);
- conf->dev_sectors = size << conf->chunk_shift;
+ conf->dev_sectors = size << conf->geo.chunk_shift;
- if (conf->far_offset)
- conf->stride = 1 << conf->chunk_shift;
+ if (conf->geo.far_offset)
+ conf->geo.stride = 1 << conf->geo.chunk_shift;
else {
- sector_div(size, conf->far_copies);
- conf->stride = size << conf->chunk_shift;
+ sector_div(size, conf->geo.far_copies);
+ conf->geo.stride = size << conf->geo.chunk_shift;
}
}
+enum geo_type {geo_new, geo_old, geo_start};
+static int setup_geo(struct geom *geo, struct mddev *mddev, enum geo_type new)
+{
+ int nc, fc, fo;
+ int layout, chunk, disks;
+ switch (new) {
+ case geo_old:
+ layout = mddev->layout;
+ chunk = mddev->chunk_sectors;
+ disks = mddev->raid_disks - mddev->delta_disks;
+ break;
+ case geo_new:
+ layout = mddev->new_layout;
+ chunk = mddev->new_chunk_sectors;
+ disks = mddev->raid_disks;
+ break;
+ default: /* avoid 'may be unused' warnings */
+ case geo_start: /* new when starting reshape - raid_disks not
+ * updated yet. */
+ layout = mddev->new_layout;
+ chunk = mddev->new_chunk_sectors;
+ disks = mddev->raid_disks + mddev->delta_disks;
+ break;
+ }
+ if (layout >> 17)
+ return -1;
+ if (chunk < (PAGE_SIZE >> 9) ||
+ !is_power_of_2(chunk))
+ return -2;
+ nc = layout & 255;
+ fc = (layout >> 8) & 255;
+ fo = layout & (1<<16);
+ geo->raid_disks = disks;
+ geo->near_copies = nc;
+ geo->far_copies = fc;
+ geo->far_offset = fo;
+ geo->chunk_mask = chunk - 1;
+ geo->chunk_shift = ffz(~chunk);
+ return nc*fc;
+}
+
static struct r10conf *setup_conf(struct mddev *mddev)
{
struct r10conf *conf = NULL;
- int nc, fc, fo;
int err = -EINVAL;
+ struct geom geo;
+ int copies;
+
+ copies = setup_geo(&geo, mddev, geo_new);
- if (mddev->new_chunk_sectors < (PAGE_SIZE >> 9) ||
- !is_power_of_2(mddev->new_chunk_sectors)) {
+ if (copies == -2) {
printk(KERN_ERR "md/raid10:%s: chunk size must be "
"at least PAGE_SIZE(%ld) and be a power of 2.\n",
mdname(mddev), PAGE_SIZE);
goto out;
}
- nc = mddev->new_layout & 255;
- fc = (mddev->new_layout >> 8) & 255;
- fo = mddev->new_layout & (1<<16);
-
- if ((nc*fc) <2 || (nc*fc) > mddev->raid_disks ||
- (mddev->new_layout >> 17)) {
+ if (copies < 2 || copies > mddev->raid_disks) {
printk(KERN_ERR "md/raid10:%s: unsupported raid10 layout: 0x%8x\n",
mdname(mddev), mddev->new_layout);
goto out;
@@ -3224,7 +3381,9 @@ static struct r10conf *setup_conf(struct mddev *mddev)
if (!conf)
goto out;
- conf->mirrors = kzalloc(sizeof(struct mirror_info)*mddev->raid_disks,
+ /* FIXME calc properly */
+ conf->mirrors = kzalloc(sizeof(struct mirror_info)*(mddev->raid_disks +
+ max(0,mddev->delta_disks)),
GFP_KERNEL);
if (!conf->mirrors)
goto out;
@@ -3233,22 +3392,29 @@ static struct r10conf *setup_conf(struct mddev *mddev)
if (!conf->tmppage)
goto out;
-
- conf->raid_disks = mddev->raid_disks;
- conf->near_copies = nc;
- conf->far_copies = fc;
- conf->copies = nc*fc;
- conf->far_offset = fo;
- conf->chunk_mask = mddev->new_chunk_sectors - 1;
- conf->chunk_shift = ffz(~mddev->new_chunk_sectors);
-
+ conf->geo = geo;
+ conf->copies = copies;
conf->r10bio_pool = mempool_create(NR_RAID10_BIOS, r10bio_pool_alloc,
r10bio_pool_free, conf);
if (!conf->r10bio_pool)
goto out;
calc_sectors(conf, mddev->dev_sectors);
-
+ if (mddev->reshape_position == MaxSector) {
+ conf->prev = conf->geo;
+ conf->reshape_progress = MaxSector;
+ } else {
+ if (setup_geo(&conf->prev, mddev, geo_old) != conf->copies) {
+ err = -EINVAL;
+ goto out;
+ }
+ conf->reshape_progress = mddev->reshape_position;
+ if (conf->prev.far_offset)
+ conf->prev.stride = 1 << conf->prev.chunk_shift;
+ else
+ /* far_copies must be 1 */
+ conf->prev.stride = conf->dev_sectors;
+ }
spin_lock_init(&conf->device_lock);
INIT_LIST_HEAD(&conf->retry_list);
@@ -3263,8 +3429,9 @@ static struct r10conf *setup_conf(struct mddev *mddev)
return conf;
out:
- printk(KERN_ERR "md/raid10:%s: couldn't allocate memory.\n",
- mdname(mddev));
+ if (err == -ENOMEM)
+ printk(KERN_ERR "md/raid10:%s: couldn't allocate memory.\n",
+ mdname(mddev));
if (conf) {
if (conf->r10bio_pool)
mempool_destroy(conf->r10bio_pool);
@@ -3282,12 +3449,8 @@ static int run(struct mddev *mddev)
struct mirror_info *disk;
struct md_rdev *rdev;
sector_t size;
-
- /*
- * copy the already verified devices into our private RAID10
- * bookkeeping area. [whatever we allocate in run(),
- * should be freed in stop()]
- */
+ sector_t min_offset_diff = 0;
+ int first = 1;
if (mddev->private == NULL) {
conf = setup_conf(mddev);
@@ -3304,17 +3467,20 @@ static int run(struct mddev *mddev)
chunk_size = mddev->chunk_sectors << 9;
blk_queue_io_min(mddev->queue, chunk_size);
- if (conf->raid_disks % conf->near_copies)
- blk_queue_io_opt(mddev->queue, chunk_size * conf->raid_disks);
+ if (conf->geo.raid_disks % conf->geo.near_copies)
+ blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks);
else
blk_queue_io_opt(mddev->queue, chunk_size *
- (conf->raid_disks / conf->near_copies));
+ (conf->geo.raid_disks / conf->geo.near_copies));
rdev_for_each(rdev, mddev) {
+ long long diff;
disk_idx = rdev->raid_disk;
- if (disk_idx >= conf->raid_disks
- || disk_idx < 0)
+ if (disk_idx < 0)
+ continue;
+ if (disk_idx >= conf->geo.raid_disks &&
+ disk_idx >= conf->prev.raid_disks)
continue;
disk = conf->mirrors + disk_idx;
@@ -3327,12 +3493,20 @@ static int run(struct mddev *mddev)
goto out_free_conf;
disk->rdev = rdev;
}
+ diff = (rdev->new_data_offset - rdev->data_offset);
+ if (!mddev->reshape_backwards)
+ diff = -diff;
+ if (diff < 0)
+ diff = 0;
+ if (first || diff < min_offset_diff)
+ min_offset_diff = diff;
disk_stack_limits(mddev->gendisk, rdev->bdev,
rdev->data_offset << 9);
disk->head_position = 0;
}
+
/* need to check that every block has at least one working mirror */
if (!enough(conf, -1)) {
printk(KERN_ERR "md/raid10:%s: not enough operational mirrors.\n",
@@ -3340,8 +3514,21 @@ static int run(struct mddev *mddev)
goto out_free_conf;
}
+ if (conf->reshape_progress != MaxSector) {
+ /* must ensure that shape change is supported */
+ if (conf->geo.far_copies != 1 &&
+ conf->geo.far_offset == 0)
+ goto out_free_conf;
+ if (conf->prev.far_copies != 1 &&
+ conf->geo.far_offset == 0)
+ goto out_free_conf;
+ }
+
mddev->degraded = 0;
- for (i = 0; i < conf->raid_disks; i++) {
+ for (i = 0;
+ i < conf->geo.raid_disks
+ || i < conf->prev.raid_disks;
+ i++) {
disk = conf->mirrors + i;
@@ -3368,8 +3555,8 @@ static int run(struct mddev *mddev)
mdname(mddev));
printk(KERN_INFO
"md/raid10:%s: active with %d out of %d devices\n",
- mdname(mddev), conf->raid_disks - mddev->degraded,
- conf->raid_disks);
+ mdname(mddev), conf->geo.raid_disks - mddev->degraded,
+ conf->geo.raid_disks);
/*
* Ok, everything is just fine now
*/
@@ -3386,11 +3573,11 @@ static int run(struct mddev *mddev)
* maybe...
*/
{
- int stripe = conf->raid_disks *
+ int stripe = conf->geo.raid_disks *
((mddev->chunk_sectors << 9) / PAGE_SIZE);
- stripe /= conf->near_copies;
- if (mddev->queue->backing_dev_info.ra_pages < 2* stripe)
- mddev->queue->backing_dev_info.ra_pages = 2* stripe;
+ stripe /= conf->geo.near_copies;
+ if (mddev-