aboutsummaryrefslogtreecommitdiff
path: root/drivers/md/raid0.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/raid0.c')
-rw-r--r--drivers/md/raid0.c864
1 files changed, 540 insertions, 324 deletions
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 818b4828409..407a99e46f6 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -18,35 +18,26 @@
Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#include <linux/blkdev.h>
+#include <linux/seq_file.h>
#include <linux/module.h>
-#include <linux/raid/raid0.h>
-
-#define MAJOR_NR MD_MAJOR
-#define MD_DRIVER
-#define MD_PERSONALITY
-
-static void raid0_unplug(struct request_queue *q)
-{
- mddev_t *mddev = q->queuedata;
- raid0_conf_t *conf = mddev_to_conf(mddev);
- mdk_rdev_t **devlist = conf->strip_zone[0].dev;
- int i;
-
- for (i=0; i<mddev->raid_disks; i++) {
- struct request_queue *r_queue = bdev_get_queue(devlist[i]->bdev);
-
- blk_unplug(r_queue);
- }
-}
+#include <linux/slab.h>
+#include "md.h"
+#include "raid0.h"
+#include "raid5.h"
static int raid0_congested(void *data, int bits)
{
- mddev_t *mddev = data;
- raid0_conf_t *conf = mddev_to_conf(mddev);
- mdk_rdev_t **devlist = conf->strip_zone[0].dev;
+ struct mddev *mddev = data;
+ struct r0conf *conf = mddev->private;
+ struct md_rdev **devlist = conf->devlist;
+ int raid_disks = conf->strip_zone[0].nb_dev;
int i, ret = 0;
- for (i = 0; i < mddev->raid_disks && !ret ; i++) {
+ if (mddev_congested(mddev, bits))
+ return 1;
+
+ for (i = 0; i < raid_disks && !ret ; i++) {
struct request_queue *q = bdev_get_queue(devlist[i]->bdev);
ret |= bdi_congested(&q->backing_dev_info, bits);
@@ -54,68 +45,110 @@ static int raid0_congested(void *data, int bits)
return ret;
}
+/*
+ * inform the user of the raid configuration
+*/
+static void dump_zones(struct mddev *mddev)
+{
+ int j, k;
+ sector_t zone_size = 0;
+ sector_t zone_start = 0;
+ char b[BDEVNAME_SIZE];
+ struct r0conf *conf = mddev->private;
+ int raid_disks = conf->strip_zone[0].nb_dev;
+ printk(KERN_INFO "md: RAID0 configuration for %s - %d zone%s\n",
+ mdname(mddev),
+ conf->nr_strip_zones, conf->nr_strip_zones==1?"":"s");
+ for (j = 0; j < conf->nr_strip_zones; j++) {
+ printk(KERN_INFO "md: zone%d=[", j);
+ for (k = 0; k < conf->strip_zone[j].nb_dev; k++)
+ printk(KERN_CONT "%s%s", k?"/":"",
+ bdevname(conf->devlist[j*raid_disks
+ + k]->bdev, b));
+ printk(KERN_CONT "]\n");
+
+ zone_size = conf->strip_zone[j].zone_end - zone_start;
+ printk(KERN_INFO " zone-offset=%10lluKB, "
+ "device-offset=%10lluKB, size=%10lluKB\n",
+ (unsigned long long)zone_start>>1,
+ (unsigned long long)conf->strip_zone[j].dev_start>>1,
+ (unsigned long long)zone_size>>1);
+ zone_start = conf->strip_zone[j].zone_end;
+ }
+ printk(KERN_INFO "\n");
+}
-static int create_strip_zones (mddev_t *mddev)
+static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
{
- int i, c, j;
- sector_t current_offset, curr_zone_offset;
- sector_t min_spacing;
- raid0_conf_t *conf = mddev_to_conf(mddev);
- mdk_rdev_t *smallest, *rdev1, *rdev2, *rdev;
- struct list_head *tmp1, *tmp2;
+ int i, c, err;
+ sector_t curr_zone_end, sectors;
+ struct md_rdev *smallest, *rdev1, *rdev2, *rdev, **dev;
struct strip_zone *zone;
int cnt;
char b[BDEVNAME_SIZE];
-
- /*
- * The number of 'same size groups'
- */
- conf->nr_strip_zones = 0;
-
- rdev_for_each(rdev1, tmp1, mddev) {
- printk("raid0: looking at %s\n",
- bdevname(rdev1->bdev,b));
+ char b2[BDEVNAME_SIZE];
+ struct r0conf *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
+ bool discard_supported = false;
+
+ if (!conf)
+ return -ENOMEM;
+ rdev_for_each(rdev1, mddev) {
+ pr_debug("md/raid0:%s: looking at %s\n",
+ mdname(mddev),
+ bdevname(rdev1->bdev, b));
c = 0;
- rdev_for_each(rdev2, tmp2, mddev) {
- printk("raid0: comparing %s(%llu)",
- bdevname(rdev1->bdev,b),
- (unsigned long long)rdev1->size);
- printk(" with %s(%llu)\n",
- bdevname(rdev2->bdev,b),
- (unsigned long long)rdev2->size);
+
+ /* round size to chunk_size */
+ sectors = rdev1->sectors;
+ sector_div(sectors, mddev->chunk_sectors);
+ rdev1->sectors = sectors * mddev->chunk_sectors;
+
+ rdev_for_each(rdev2, mddev) {
+ pr_debug("md/raid0:%s: comparing %s(%llu)"
+ " with %s(%llu)\n",
+ mdname(mddev),
+ bdevname(rdev1->bdev,b),
+ (unsigned long long)rdev1->sectors,
+ bdevname(rdev2->bdev,b2),
+ (unsigned long long)rdev2->sectors);
if (rdev2 == rdev1) {
- printk("raid0: END\n");
+ pr_debug("md/raid0:%s: END\n",
+ mdname(mddev));
break;
}
- if (rdev2->size == rdev1->size)
- {
+ if (rdev2->sectors == rdev1->sectors) {
/*
* Not unique, don't count it as a new
* group
*/
- printk("raid0: EQUAL\n");
+ pr_debug("md/raid0:%s: EQUAL\n",
+ mdname(mddev));
c = 1;
break;
}
- printk("raid0: NOT EQUAL\n");
+ pr_debug("md/raid0:%s: NOT EQUAL\n",
+ mdname(mddev));
}
if (!c) {
- printk("raid0: ==> UNIQUE\n");
+ pr_debug("md/raid0:%s: ==> UNIQUE\n",
+ mdname(mddev));
conf->nr_strip_zones++;
- printk("raid0: %d zones\n", conf->nr_strip_zones);
+ pr_debug("md/raid0:%s: %d zones\n",
+ mdname(mddev), conf->nr_strip_zones);
}
}
- printk("raid0: FINAL %d zones\n", conf->nr_strip_zones);
-
+ pr_debug("md/raid0:%s: FINAL %d zones\n",
+ mdname(mddev), conf->nr_strip_zones);
+ err = -ENOMEM;
conf->strip_zone = kzalloc(sizeof(struct strip_zone)*
conf->nr_strip_zones, GFP_KERNEL);
if (!conf->strip_zone)
- return 1;
- conf->devlist = kzalloc(sizeof(mdk_rdev_t*)*
+ goto abort;
+ conf->devlist = kzalloc(sizeof(struct md_rdev*)*
conf->nr_strip_zones*mddev->raid_disks,
GFP_KERNEL);
if (!conf->devlist)
- return 1;
+ goto abort;
/* The first zone must contain all devices, so here we check that
* there is a proper alignment of slots to devices and find them all
@@ -123,226 +156,306 @@ static int create_strip_zones (mddev_t *mddev)
zone = &conf->strip_zone[0];
cnt = 0;
smallest = NULL;
- zone->dev = conf->devlist;
- rdev_for_each(rdev1, tmp1, mddev) {
+ dev = conf->devlist;
+ err = -EINVAL;
+ rdev_for_each(rdev1, mddev) {
int j = rdev1->raid_disk;
- if (j < 0 || j >= mddev->raid_disks) {
- printk("raid0: bad disk number %d - aborting!\n", j);
+ if (mddev->level == 10) {
+ /* taking over a raid10-n2 array */
+ j /= 2;
+ rdev1->new_raid_disk = j;
+ }
+
+ if (mddev->level == 1) {
+ /* taiking over a raid1 array-
+ * we have only one active disk
+ */
+ j = 0;
+ rdev1->new_raid_disk = j;
+ }
+
+ if (j < 0) {
+ printk(KERN_ERR
+ "md/raid0:%s: remove inactive devices before converting to RAID0\n",
+ mdname(mddev));
goto abort;
}
- if (zone->dev[j]) {
- printk("raid0: multiple devices for %d - aborting!\n",
- j);
+ if (j >= mddev->raid_disks) {
+ printk(KERN_ERR "md/raid0:%s: bad disk number %d - "
+ "aborting!\n", mdname(mddev), j);
goto abort;
}
- zone->dev[j] = rdev1;
+ if (dev[j]) {
+ printk(KERN_ERR "md/raid0:%s: multiple devices for %d - "
+ "aborting!\n", mdname(mddev), j);
+ goto abort;
+ }
+ dev[j] = rdev1;
- blk_queue_stack_limits(mddev->queue,
- rdev1->bdev->bd_disk->queue);
- /* as we don't honour merge_bvec_fn, we must never risk
- * violating it, so limit ->max_sector to one PAGE, as
- * a one page request is never in violation.
- */
+ disk_stack_limits(mddev->gendisk, rdev1->bdev,
+ rdev1->data_offset << 9);
- if (rdev1->bdev->bd_disk->queue->merge_bvec_fn &&
- mddev->queue->max_sectors > (PAGE_SIZE>>9))
- blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
+ if (rdev1->bdev->bd_disk->queue->merge_bvec_fn)
+ conf->has_merge_bvec = 1;
- if (!smallest || (rdev1->size <smallest->size))
+ if (!smallest || (rdev1->sectors < smallest->sectors))
smallest = rdev1;
cnt++;
+
+ if (blk_queue_discard(bdev_get_queue(rdev1->bdev)))
+ discard_supported = true;
}
if (cnt != mddev->raid_disks) {
- printk("raid0: too few disks (%d of %d) - aborting!\n",
- cnt, mddev->raid_disks);
+ printk(KERN_ERR "md/raid0:%s: too few disks (%d of %d) - "
+ "aborting!\n", mdname(mddev), cnt, mddev->raid_disks);
goto abort;
}
zone->nb_dev = cnt;
- zone->size = smallest->size * cnt;
- zone->zone_offset = 0;
+ zone->zone_end = smallest->sectors * cnt;
- current_offset = smallest->size;
- curr_zone_offset = zone->size;
+ curr_zone_end = zone->zone_end;
/* now do the other zones */
for (i = 1; i < conf->nr_strip_zones; i++)
{
+ int j;
+
zone = conf->strip_zone + i;
- zone->dev = conf->strip_zone[i-1].dev + mddev->raid_disks;
+ dev = conf->devlist + i * mddev->raid_disks;
- printk("raid0: zone %d\n", i);
- zone->dev_offset = current_offset;
+ pr_debug("md/raid0:%s: zone %d\n", mdname(mddev), i);
+ zone->dev_start = smallest->sectors;
smallest = NULL;
c = 0;
for (j=0; j<cnt; j++) {
- char b[BDEVNAME_SIZE];
- rdev = conf->strip_zone[0].dev[j];
- printk("raid0: checking %s ...", bdevname(rdev->bdev,b));
- if (rdev->size > current_offset)
- {
- printk(" contained as device %d\n", c);
- zone->dev[c] = rdev;
- c++;
- if (!smallest || (rdev->size <smallest->size)) {
- smallest = rdev;
- printk(" (%llu) is smallest!.\n",
- (unsigned long long)rdev->size);
- }
- } else
- printk(" nope.\n");
+ rdev = conf->devlist[j];
+ if (rdev->sectors <= zone->dev_start) {
+ pr_debug("md/raid0:%s: checking %s ... nope\n",
+ mdname(mddev),
+ bdevname(rdev->bdev, b));
+ continue;
+ }
+ pr_debug("md/raid0:%s: checking %s ..."
+ " contained as device %d\n",
+ mdname(mddev),
+ bdevname(rdev->bdev, b), c);
+ dev[c] = rdev;
+ c++;
+ if (!smallest || rdev->sectors < smallest->sectors) {
+ smallest = rdev;
+ pr_debug("md/raid0:%s: (%llu) is smallest!.\n",
+ mdname(mddev),
+ (unsigned long long)rdev->sectors);
+ }
}
zone->nb_dev = c;
- zone->size = (smallest->size - current_offset) * c;
- printk("raid0: zone->nb_dev: %d, size: %llu\n",
- zone->nb_dev, (unsigned long long)zone->size);
+ sectors = (smallest->sectors - zone->dev_start) * c;
+ pr_debug("md/raid0:%s: zone->nb_dev: %d, sectors: %llu\n",
+ mdname(mddev),
+ zone->nb_dev, (unsigned long long)sectors);
- zone->zone_offset = curr_zone_offset;
- curr_zone_offset += zone->size;
+ curr_zone_end += sectors;
+ zone->zone_end = curr_zone_end;
- current_offset = smallest->size;
- printk("raid0: current zone offset: %llu\n",
- (unsigned long long)current_offset);
+ pr_debug("md/raid0:%s: current zone start: %llu\n",
+ mdname(mddev),
+ (unsigned long long)smallest->sectors);
}
+ mddev->queue->backing_dev_info.congested_fn = raid0_congested;
+ mddev->queue->backing_dev_info.congested_data = mddev;
- /* Now find appropriate hash spacing.
- * We want a number which causes most hash entries to cover
- * at most two strips, but the hash table must be at most
- * 1 PAGE. We choose the smallest strip, or contiguous collection
- * of strips, that has big enough size. We never consider the last
- * strip though as it's size has no bearing on the efficacy of the hash
- * table.
+ /*
+ * now since we have the hard sector sizes, we can make sure
+ * chunk size is a multiple of that sector size
*/
- conf->hash_spacing = curr_zone_offset;
- min_spacing = curr_zone_offset;
- sector_div(min_spacing, PAGE_SIZE/sizeof(struct strip_zone*));
- for (i=0; i < conf->nr_strip_zones-1; i++) {
- sector_t sz = 0;
- for (j=i; j<conf->nr_strip_zones-1 &&
- sz < min_spacing ; j++)
- sz += conf->strip_zone[j].size;
- if (sz >= min_spacing && sz < conf->hash_spacing)
- conf->hash_spacing = sz;
+ if ((mddev->chunk_sectors << 9) % queue_logical_block_size(mddev->queue)) {
+ printk(KERN_ERR "md/raid0:%s: chunk_size of %d not valid\n",
+ mdname(mddev),
+ mddev->chunk_sectors << 9);
+ goto abort;
}
- mddev->queue->unplug_fn = raid0_unplug;
+ blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
+ blk_queue_io_opt(mddev->queue,
+ (mddev->chunk_sectors << 9) * mddev->raid_disks);
- mddev->queue->backing_dev_info.congested_fn = raid0_congested;
- mddev->queue->backing_dev_info.congested_data = mddev;
+ if (!discard_supported)
+ queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
+ else
+ queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
+
+ pr_debug("md/raid0:%s: done.\n", mdname(mddev));
+ *private_conf = conf;
- printk("raid0: done.\n");
return 0;
- abort:
- return 1;
+abort:
+ kfree(conf->strip_zone);
+ kfree(conf->devlist);
+ kfree(conf);
+ *private_conf = ERR_PTR(err);
+ return err;
+}
+
+/* Find the zone which holds a particular offset
+ * Update *sectorp to be an offset in that zone
+ */
+static struct strip_zone *find_zone(struct r0conf *conf,
+ sector_t *sectorp)
+{
+ int i;
+ struct strip_zone *z = conf->strip_zone;
+ sector_t sector = *sectorp;
+
+ for (i = 0; i < conf->nr_strip_zones; i++)
+ if (sector < z[i].zone_end) {
+ if (i)
+ *sectorp = sector - z[i-1].zone_end;
+ return z + i;
+ }
+ BUG();
+}
+
+/*
+ * remaps the bio to the target device. we separate two flows.
+ * power 2 flow and a general flow for the sake of perfromance
+*/
+static struct md_rdev *map_sector(struct mddev *mddev, struct strip_zone *zone,
+ sector_t sector, sector_t *sector_offset)
+{
+ unsigned int sect_in_chunk;
+ sector_t chunk;
+ struct r0conf *conf = mddev->private;
+ int raid_disks = conf->strip_zone[0].nb_dev;
+ unsigned int chunk_sects = mddev->chunk_sectors;
+
+ if (is_power_of_2(chunk_sects)) {
+ int chunksect_bits = ffz(~chunk_sects);
+ /* find the sector offset inside the chunk */
+ sect_in_chunk = sector & (chunk_sects - 1);
+ sector >>= chunksect_bits;
+ /* chunk in zone */
+ chunk = *sector_offset;
+ /* quotient is the chunk in real device*/
+ sector_div(chunk, zone->nb_dev << chunksect_bits);
+ } else{
+ sect_in_chunk = sector_div(sector, chunk_sects);
+ chunk = *sector_offset;
+ sector_div(chunk, chunk_sects * zone->nb_dev);
+ }
+ /*
+ * position the bio over the real device
+ * real sector = chunk in device + starting of zone
+ * + the position in the chunk
+ */
+ *sector_offset = (chunk * chunk_sects) + sect_in_chunk;
+ return conf->devlist[(zone - conf->strip_zone)*raid_disks
+ + sector_div(sector, zone->nb_dev)];
}
/**
- * raid0_mergeable_bvec -- tell bio layer if a two requests can be merged
+ * raid0_mergeable_bvec -- tell bio layer if two requests can be merged
* @q: request queue
- * @bio: the buffer head that's been built up so far
+ * @bvm: properties of new bio
* @biovec: the request that could be merged to it.
*
* Return amount of bytes we can accept at this offset
*/
-static int raid0_mergeable_bvec(struct request_queue *q, struct bio *bio, struct bio_vec *biovec)
+static int raid0_mergeable_bvec(struct request_queue *q,
+ struct bvec_merge_data *bvm,
+ struct bio_vec *biovec)
{
- mddev_t *mddev = q->queuedata;
- sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev);
+ struct mddev *mddev = q->queuedata;
+ struct r0conf *conf = mddev->private;
+ sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
+ sector_t sector_offset = sector;
int max;
- unsigned int chunk_sectors = mddev->chunk_size >> 9;
- unsigned int bio_sectors = bio->bi_size >> 9;
-
- max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9;
- if (max < 0) max = 0; /* bio_add cannot handle a negative return */
+ unsigned int chunk_sectors = mddev->chunk_sectors;
+ unsigned int bio_sectors = bvm->bi_size >> 9;
+ struct strip_zone *zone;
+ struct md_rdev *rdev;
+ struct request_queue *subq;
+
+ if (is_power_of_2(chunk_sectors))
+ max = (chunk_sectors - ((sector & (chunk_sectors-1))
+ + bio_sectors)) << 9;
+ else
+ max = (chunk_sectors - (sector_div(sector, chunk_sectors)
+ + bio_sectors)) << 9;
+ if (max < 0)
+ max = 0; /* bio_add cannot handle a negative return */
if (max <= biovec->bv_len && bio_sectors == 0)
return biovec->bv_len;
- else
+ if (max < biovec->bv_len)
+ /* too small already, no need to check further */
+ return max;
+ if (!conf->has_merge_bvec)
+ return max;
+
+ /* May need to check subordinate device */
+ sector = sector_offset;
+ zone = find_zone(mddev->private, &sector_offset);
+ rdev = map_sector(mddev, zone, sector, &sector_offset);
+ subq = bdev_get_queue(rdev->bdev);
+ if (subq->merge_bvec_fn) {
+ bvm->bi_bdev = rdev->bdev;
+ bvm->bi_sector = sector_offset + zone->dev_start +
+ rdev->data_offset;
+ return min(max, subq->merge_bvec_fn(subq, bvm, biovec));
+ } else
return max;
}
-static int raid0_run (mddev_t *mddev)
+static sector_t raid0_size(struct mddev *mddev, sector_t sectors, int raid_disks)
{
- unsigned cur=0, i=0, nb_zone;
- s64 size;
- raid0_conf_t *conf;
- mdk_rdev_t *rdev;
- struct list_head *tmp;
-
- if (mddev->chunk_size == 0) {
- printk(KERN_ERR "md/raid0: non-zero chunk size required.\n");
- return -EINVAL;
- }
- printk(KERN_INFO "%s: setting max_sectors to %d, segment boundary to %d\n",
- mdname(mddev),
- mddev->chunk_size >> 9,
- (mddev->chunk_size>>1)-1);
- blk_queue_max_sectors(mddev->queue, mddev->chunk_size >> 9);
- blk_queue_segment_boundary(mddev->queue, (mddev->chunk_size>>1) - 1);
+ sector_t array_sectors = 0;
+ struct md_rdev *rdev;
- conf = kmalloc(sizeof (raid0_conf_t), GFP_KERNEL);
- if (!conf)
- goto out;
- mddev->private = (void *)conf;
-
- conf->strip_zone = NULL;
- conf->devlist = NULL;
- if (create_strip_zones (mddev))
- goto out_free_conf;
+ WARN_ONCE(sectors || raid_disks,
+ "%s does not support generic reshape\n", __func__);
- /* calculate array device size */
- mddev->array_size = 0;
- rdev_for_each(rdev, tmp, mddev)
- mddev->array_size += rdev->size;
-
- printk("raid0 : md_size is %llu blocks.\n",
- (unsigned long long)mddev->array_size);
- printk("raid0 : conf->hash_spacing is %llu blocks.\n",
- (unsigned long long)conf->hash_spacing);
- {
- sector_t s = mddev->array_size;
- sector_t space = conf->hash_spacing;
- int round;
- conf->preshift = 0;
- if (sizeof(sector_t) > sizeof(u32)) {
- /*shift down space and s so that sector_div will work */
- while (space > (sector_t) (~(u32)0)) {
- s >>= 1;
- space >>= 1;
- s += 1; /* force round-up */
- conf->preshift++;
- }
- }
- round = sector_div(s, (u32)space) ? 1 : 0;
- nb_zone = s + round;
- }
- printk("raid0 : nb_zone is %d.\n", nb_zone);
-
- printk("raid0 : Allocating %Zd bytes for hash.\n",
- nb_zone*sizeof(struct strip_zone*));
- conf->hash_table = kmalloc (sizeof (struct strip_zone *)*nb_zone, GFP_KERNEL);
- if (!conf->hash_table)
- goto out_free_conf;
- size = conf->strip_zone[cur].size;
-
- conf->hash_table[0] = conf->strip_zone + cur;
- for (i=1; i< nb_zone; i++) {
- while (size <= conf->hash_spacing) {
- cur++;
- size += conf->strip_zone[cur].size;
- }
- size -= conf->hash_spacing;
- conf->hash_table[i] = conf->strip_zone + cur;
+ rdev_for_each(rdev, mddev)
+ array_sectors += (rdev->sectors &
+ ~(sector_t)(mddev->chunk_sectors-1));
+
+ return array_sectors;
+}
+
+static int raid0_stop(struct mddev *mddev);
+
+static int raid0_run(struct mddev *mddev)
+{
+ struct r0conf *conf;
+ int ret;
+
+ if (mddev->chunk_sectors == 0) {
+ printk(KERN_ERR "md/raid0:%s: chunk size must be set.\n",
+ mdname(mddev));
+ return -EINVAL;
}
- if (conf->preshift) {
- conf->hash_spacing >>= conf->preshift;
- /* round hash_spacing up so when we divide by it, we
- * err on the side of too-low, which is safest
- */
- conf->hash_spacing++;
+ if (md_check_no_bitmap(mddev))
+ return -EINVAL;
+ blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
+ blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors);
+ blk_queue_max_discard_sectors(mddev->queue, mddev->chunk_sectors);
+
+ /* if private is not null, we are here after takeover */
+ if (mddev->private == NULL) {
+ ret = create_strip_zones(mddev, &conf);
+ if (ret < 0)
+ return ret;
+ mddev->private = conf;
}
+ conf = mddev->private;
+
+ /* calculate array device size */
+ md_set_array_sectors(mddev, raid0_size(mddev, 0, 0));
+ printk(KERN_INFO "md/raid0:%s: md_size is %llu sectors.\n",
+ mdname(mddev),
+ (unsigned long long)mddev->array_sectors);
/* calculate the max read-ahead size.
* For read-ahead of large files to be effective, we need to
* readahead at least twice a whole stripe. i.e. number of devices
@@ -353,154 +466,253 @@ static int raid0_run (mddev_t *mddev)
* chunksize should be used in that case.
*/
{
- int stripe = mddev->raid_disks * mddev->chunk_size / PAGE_SIZE;
+ int stripe = mddev->raid_disks *
+ (mddev->chunk_sectors << 9) / PAGE_SIZE;
if (mddev->queue->backing_dev_info.ra_pages < 2* stripe)
mddev->queue->backing_dev_info.ra_pages = 2* stripe;
}
-
blk_queue_merge_bvec(mddev->queue, raid0_mergeable_bvec);
- return 0;
+ dump_zones(mddev);
-out_free_conf:
- kfree(conf->strip_zone);
- kfree(conf->devlist);
- kfree(conf);
- mddev->private = NULL;
-out:
- return -ENOMEM;
+ ret = md_integrity_register(mddev);
+ if (ret)
+ raid0_stop(mddev);
+
+ return ret;
}
-static int raid0_stop (mddev_t *mddev)
+static int raid0_stop(struct mddev *mddev)
{
- raid0_conf_t *conf = mddev_to_conf(mddev);
+ struct r0conf *conf = mddev->private;
blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
- kfree(conf->hash_table);
- conf->hash_table = NULL;
kfree(conf->strip_zone);
- conf->strip_zone = NULL;
+ kfree(conf->devlist);
kfree(conf);
mddev->private = NULL;
-
return 0;
}
-static int raid0_make_request (struct request_queue *q, struct bio *bio)
+/*
+ * Is io distribute over 1 or more chunks ?
+*/
+static inline int is_io_in_chunk_boundary(struct mddev *mddev,
+ unsigned int chunk_sects, struct bio *bio)
+{
+ if (likely(is_power_of_2(chunk_sects))) {
+ return chunk_sects >=
+ ((bio->bi_iter.bi_sector & (chunk_sects-1))
+ + bio_sectors(bio));
+ } else{
+ sector_t sector = bio->bi_iter.bi_sector;
+ return chunk_sects >= (sector_div(sector, chunk_sects)
+ + bio_sectors(bio));
+ }
+}
+
+static void raid0_make_request(struct mddev *mddev, struct bio *bio)
{
- mddev_t *mddev = q->queuedata;
- unsigned int sect_in_chunk, chunksize_bits, chunk_size, chunk_sects;
- raid0_conf_t *conf = mddev_to_conf(mddev);
struct strip_zone *zone;
- mdk_rdev_t *tmp_dev;
- sector_t chunk;
- sector_t block, rsect;
- const int rw = bio_data_dir(bio);
+ struct md_rdev *tmp_dev;
+ struct bio *split;
- if (unlikely(bio_barrier(bio))) {
- bio_endio(bio, -EOPNOTSUPP);
- return 0;
+ if (unlikely(bio->bi_rw & REQ_FLUSH)) {
+ md_flush_request(mddev, bio);
+ return;
}
- disk_stat_inc(mddev->gendisk, ios[rw]);
- disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bio));
-
- chunk_size = mddev->chunk_size >> 10;
- chunk_sects = mddev->chunk_size >> 9;
- chunksize_bits = ffz(~chunk_size);
- block = bio->bi_sector >> 1;
-
-
- if (unlikely(chunk_sects < (bio->bi_sector & (chunk_sects - 1)) + (bio->bi_size >> 9))) {
- struct bio_pair *bp;
- /* Sanity check -- queue functions should prevent this happening */
- if (bio->bi_vcnt != 1 ||
- bio->bi_idx != 0)
- goto bad_map;
- /* This is a one page bio that upper layers
- * refuse to split for us, so we need to split it.
- */
- bp = bio_split(bio, bio_split_pool, chunk_sects - (bio->bi_sector & (chunk_sects - 1)) );
- if (raid0_make_request(q, &bp->bio1))
- generic_make_request(&bp->bio1);
- if (raid0_make_request(q, &bp->bio2))
- generic_make_request(&bp->bio2);
-
- bio_pair_release(bp);
- return 0;
+ do {
+ sector_t sector = bio->bi_iter.bi_sector;
+ unsigned chunk_sects = mddev->chunk_sectors;
+
+ unsigned sectors = chunk_sects -
+ (likely(is_power_of_2(chunk_sects))
+ ? (sector & (chunk_sects-1))
+ : sector_div(sector, chunk_sects));
+
+ if (sectors < bio_sectors(bio)) {
+ split = bio_split(bio, sectors, GFP_NOIO, fs_bio_set);
+ bio_chain(split, bio);
+ } else {
+ split = bio;
+ }
+
+ zone = find_zone(mddev->private, &sector);
+ tmp_dev = map_sector(mddev, zone, sector, &sector);
+ split->bi_bdev = tmp_dev->bdev;
+ split->bi_iter.bi_sector = sector + zone->dev_start +
+ tmp_dev->data_offset;
+
+ if (unlikely((split->bi_rw & REQ_DISCARD) &&
+ !blk_queue_discard(bdev_get_queue(split->bi_bdev)))) {
+ /* Just ignore it */
+ bio_endio(split, 0);
+ } else
+ generic_make_request(split);
+ } while (split != bio);
+}
+
+static void raid0_status(struct seq_file *seq, struct mddev *mddev)
+{
+ seq_printf(seq, " %dk chunks", mddev->chunk_sectors / 2);
+ return;
+}
+
+static void *raid0_takeover_raid45(struct mddev *mddev)
+{
+ struct md_rdev *rdev;
+ struct r0conf *priv_conf;
+
+ if (mddev->degraded != 1) {
+ printk(KERN_ERR "md/raid0:%s: raid5 must be degraded! Degraded disks: %d\n",
+ mdname(mddev),
+ mddev->degraded);
+ return ERR_PTR(-EINVAL);
}
-
- {
- sector_t x = block >> conf->preshift;
- sector_div(x, (u32)conf->hash_spacing);
- zone = conf->hash_table[x];
+ rdev_for_each(rdev, mddev) {
+ /* check slot number for a disk */
+ if (rdev->raid_disk == mddev->raid_disks-1) {
+ printk(KERN_ERR "md/raid0:%s: raid5 must have missing parity disk!\n",
+ mdname(mddev));
+ return ERR_PTR(-EINVAL);
+ }
+ rdev->sectors = mddev->dev_sectors;
}
-
- while (block >= (zone->zone_offset + zone->size))
- zone++;
-
- sect_in_chunk = bio->bi_sector & ((chunk_size<<1) -1);
+ /* Set new parameters */
+ mddev->new_level = 0;
+ mddev->new_layout = 0;
+ mddev->new_chunk_sectors = mddev->chunk_sectors;
+ mddev->raid_disks--;
+ mddev->delta_disks = -1;
+ /* make sure it will be not marked as dirty */
+ mddev->recovery_cp = MaxSector;
+
+ create_strip_zones(mddev, &priv_conf);
+ return priv_conf;
+}
- {
- sector_t x = (block - zone->zone_offset) >> chunksize_bits;
+static void *raid0_takeover_raid10(struct mddev *mddev)
+{
+ struct r0conf *priv_conf;
+
+ /* Check layout:
+ * - far_copies must be 1
+ * - near_copies must be 2
+ * - disks number must be even
+ * - all mirrors must be already degraded
+ */
+ if (mddev->layout != ((1 << 8) + 2)) {
+ printk(KERN_ERR "md/raid0:%s:: Raid0 cannot takover layout: 0x%x\n",
+ mdname(mddev),
+ mddev->layout);
+ return ERR_PTR(-EINVAL);
+ }
+ if (mddev->raid_disks & 1) {
+ printk(KERN_ERR "md/raid0:%s: Raid0 cannot takover Raid10 with odd disk number.\n",
+ mdname(mddev));
+ return ERR_PTR(-EINVAL);
+ }
+ if (mddev->degraded != (mddev->raid_disks>>1)) {
+ printk(KERN_ERR "md/raid0:%s: All mirrors must be already degraded!\n",
+ mdname(mddev));
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* Set new parameters */
+ mddev->new_level = 0;
+ mddev->new_layout = 0;
+ mddev->new_chunk_sectors = mddev->chunk_sectors;
+ mddev->delta_disks = - mddev->raid_disks / 2;
+ mddev->raid_disks += mddev->delta_disks;
+ mddev->degraded = 0;
+ /* make sure it will be not marked as dirty */
+ mddev->recovery_cp = MaxSector;
+
+ create_strip_zones(mddev, &priv_conf);
+ return priv_conf;
+}
- sector_div(x, zone->nb_dev);
- chunk = x;
+static void *raid0_takeover_raid1(struct mddev *mddev)
+{
+ struct r0conf *priv_conf;
+ int chunksect;
- x = block >> chunksize_bits;
- tmp_dev = zone->dev[sector_div(x, zone->nb_dev)];
+ /* Check layout:
+ * - (N - 1) mirror drives must be already faulty
+ */
+ if ((mddev->raid_disks - 1) != mddev->degraded) {
+ printk(KERN_ERR "md/raid0:%s: (N - 1) mirrors drives must be already faulty!\n",
+ mdname(mddev));
+ return ERR_PTR(-EINVAL);
}
- rsect = (((chunk << chunksize_bits) + zone->dev_offset)<<1)
- + sect_in_chunk;
-
- bio->bi_bdev = tmp_dev->bdev;
- bio->bi_sector = rsect + tmp_dev->data_offset;
/*
- * Let the main block layer submit the IO and resolve recursion:
+ * a raid1 doesn't have the notion of chunk size, so
+ * figure out the largest suitable size we can use.
*/
- return 1;
-
-bad_map:
- printk("raid0_make_request bug: can't convert block across chunks"
- " or bigger than %dk %llu %d\n", chunk_size,
- (unsigned long long)bio->bi_sector, bio->bi_size >> 10);
-
- bio_io_error(bio);
- return 0;
+ chunksect = 64 * 2; /* 64K by default */
+
+ /* The array must be an exact multiple of chunksize */
+ while (chunksect && (mddev->array_sectors & (chunksect - 1)))
+ chunksect >>= 1;
+
+ if ((chunksect << 9) < PAGE_SIZE)
+ /* array size does not allow a suitable chunk size */
+ return ERR_PTR(-EINVAL);
+
+ /* Set new parameters */
+ mddev->new_level = 0;
+ mddev->new_layout = 0;
+ mddev->new_chunk_sectors = chunksect;
+ mddev->chunk_sectors = chunksect;
+ mddev->delta_disks = 1 - mddev->raid_disks;
+ mddev->raid_disks = 1;
+ /* make sure it will be not marked as dirty */
+ mddev->recovery_cp = MaxSector;
+
+ create_strip_zones(mddev, &priv_conf);
+ return priv_conf;
}
-static void raid0_status (struct seq_file *seq, mddev_t *mddev)
+static void *raid0_takeover(struct mddev *mddev)
{
-#undef MD_DEBUG
-#ifdef MD_DEBUG
- int j, k, h;
- char b[BDEVNAME_SIZE];
- raid0_conf_t *conf = mddev_to_conf(mddev);
+ /* raid0 can take over:
+ * raid4 - if all data disks are active.
+ * raid5 - providing it is Raid4 layout and one disk is faulty
+ * raid10 - assuming we have all necessary active disks
+ * raid1 - with (N -1) mirror drives faulty
+ */
+ if (mddev->level == 4)
+ return raid0_takeover_raid45(mddev);
- h = 0;
- for (j = 0; j < conf->nr_strip_zones; j++) {
- seq_printf(seq, " z%d", j);
- if (conf->hash_table[h] == conf->strip_zone+j)
- seq_printf(seq, "(h%d)", h++);
- seq_printf(seq, "=[");
- for (k = 0; k < conf->strip_zone[j].nb_dev; k++)
- seq_printf(seq, "%s/", bdevname(
- conf->strip_zone[j].dev[k]->bdev,b));
+ if (mddev->level == 5) {
+ if (mddev->layout == ALGORITHM_PARITY_N)
+ return raid0_takeover_raid45(mddev);
- seq_printf(seq, "] zo=%d do=%d s=%d\n",
- conf->strip_zone[j].zone_offset,
- conf->strip_zone[j].dev_offset,
- conf->strip_zone[j].size);
+ printk(KERN_ERR "md/raid0:%s: Raid can only takeover Raid5 with layout: %d\n",
+ mdname(mddev), ALGORITHM_PARITY_N);
}
-#endif
- seq_printf(seq, " %dk chunks", mddev->chunk_size/1024);
- return;
+
+ if (mddev->level == 10)
+ return raid0_takeover_raid10(mddev);
+
+ if (mddev->level == 1)
+ return raid0_takeover_raid1(mddev);
+
+ printk(KERN_ERR "Takeover from raid%i to raid0 not supported\n",
+ mddev->level);
+
+ return ERR_PTR(-EINVAL);
+}
+
+static void raid0_quiesce(struct mddev *mddev, int state)
+{
}
-static struct mdk_personality raid0_personality=
+static struct md_personality raid0_personality=
{
.name = "raid0",
.level = 0,
@@ -509,6 +721,9 @@ static struct mdk_personality raid0_personality=
.run = raid0_run,
.stop = raid0_stop,
.status = raid0_status,
+ .size = raid0_size,
+ .takeover = raid0_takeover,
+ .quiesce = raid0_quiesce,
};
static int __init raid0_init (void)
@@ -524,6 +739,7 @@ static void raid0_exit (void)
module_init(raid0_init);
module_exit(raid0_exit);
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("RAID0 (striping) personality for MD");
MODULE_ALIAS("md-personality-2"); /* RAID0 */
MODULE_ALIAS("md-raid0");
MODULE_ALIAS("md-level-0");