diff options
Diffstat (limited to 'drivers/md')
32 files changed, 3351 insertions, 1199 deletions
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig index 2158377a135..acb3a4e404f 100644 --- a/drivers/md/Kconfig +++ b/drivers/md/Kconfig @@ -185,11 +185,10 @@ config MD_MULTIPATH tristate "Multipath I/O support" depends on BLK_DEV_MD help - Multipath-IO is the ability of certain devices to address the same - physical disk over multiple 'IO paths'. The code ensures that such - paths can be defined and handled at runtime, and ensures that a - transparent failover to the backup path(s) happens if a IO errors - arrives on the primary path. + MD_MULTIPATH provides a simple multi-path personality for use + the MD framework. It is not under active development. New + projects should consider using DM_MULTIPATH which has more + features and more testing. If unsure, say N. diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index 60e2b322db1..26ac8aad0b1 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c @@ -212,7 +212,7 @@ static void bitmap_checkfree(struct bitmap *bitmap, unsigned long page) */ /* IO operations when bitmap is stored near all superblocks */ -static struct page *read_sb_page(mddev_t *mddev, long offset, +static struct page *read_sb_page(mddev_t *mddev, loff_t offset, struct page *page, unsigned long index, int size) { @@ -287,27 +287,36 @@ static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait) while ((rdev = next_active_rdev(rdev, mddev)) != NULL) { int size = PAGE_SIZE; + loff_t offset = mddev->bitmap_info.offset; if (page->index == bitmap->file_pages-1) size = roundup(bitmap->last_page_size, bdev_logical_block_size(rdev->bdev)); /* Just make sure we aren't corrupting data or * metadata */ - if (bitmap->offset < 0) { + if (mddev->external) { + /* Bitmap could be anywhere. */ + if (rdev->sb_start + offset + (page->index *(PAGE_SIZE/512)) > + rdev->data_offset && + rdev->sb_start + offset < + rdev->data_offset + mddev->dev_sectors + + (PAGE_SIZE/512)) + goto bad_alignment; + } else if (offset < 0) { /* DATA BITMAP METADATA */ - if (bitmap->offset + if (offset + (long)(page->index * (PAGE_SIZE/512)) + size/512 > 0) /* bitmap runs in to metadata */ goto bad_alignment; if (rdev->data_offset + mddev->dev_sectors - > rdev->sb_start + bitmap->offset) + > rdev->sb_start + offset) /* data runs in to bitmap */ goto bad_alignment; } else if (rdev->sb_start < rdev->data_offset) { /* METADATA BITMAP DATA */ if (rdev->sb_start - + bitmap->offset + + offset + page->index*(PAGE_SIZE/512) + size/512 > rdev->data_offset) /* bitmap runs in to data */ @@ -316,7 +325,7 @@ static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait) /* DATA METADATA BITMAP - no problems */ } md_super_write(mddev, rdev, - rdev->sb_start + bitmap->offset + rdev->sb_start + offset + page->index * (PAGE_SIZE/512), size, page); @@ -488,6 +497,8 @@ void bitmap_update_sb(struct bitmap *bitmap) if (!bitmap || !bitmap->mddev) /* no bitmap for this array */ return; + if (bitmap->mddev->bitmap_info.external) + return; spin_lock_irqsave(&bitmap->lock, flags); if (!bitmap->sb_page) { /* no superblock */ spin_unlock_irqrestore(&bitmap->lock, flags); @@ -501,6 +512,9 @@ void bitmap_update_sb(struct bitmap *bitmap) bitmap->events_cleared = bitmap->mddev->events; sb->events_cleared = cpu_to_le64(bitmap->events_cleared); } + /* Just in case these have been changed via sysfs: */ + sb->daemon_sleep = cpu_to_le32(bitmap->mddev->bitmap_info.daemon_sleep/HZ); + sb->write_behind = cpu_to_le32(bitmap->mddev->bitmap_info.max_write_behind); kunmap_atomic(sb, KM_USER0); write_page(bitmap, bitmap->sb_page, 1); } @@ -550,7 +564,8 @@ static int bitmap_read_sb(struct bitmap *bitmap) bitmap->sb_page = read_page(bitmap->file, 0, bitmap, bytes); } else { - bitmap->sb_page = read_sb_page(bitmap->mddev, bitmap->offset, + bitmap->sb_page = read_sb_page(bitmap->mddev, + bitmap->mddev->bitmap_info.offset, NULL, 0, sizeof(bitmap_super_t)); } @@ -563,7 +578,7 @@ static int bitmap_read_sb(struct bitmap *bitmap) sb = (bitmap_super_t *)kmap_atomic(bitmap->sb_page, KM_USER0); chunksize = le32_to_cpu(sb->chunksize); - daemon_sleep = le32_to_cpu(sb->daemon_sleep); + daemon_sleep = le32_to_cpu(sb->daemon_sleep) * HZ; write_behind = le32_to_cpu(sb->write_behind); /* verify that the bitmap-specific fields are valid */ @@ -576,7 +591,7 @@ static int bitmap_read_sb(struct bitmap *bitmap) reason = "bitmap chunksize too small"; else if ((1 << ffz(~chunksize)) != chunksize) reason = "bitmap chunksize not a power of 2"; - else if (daemon_sleep < 1 || daemon_sleep > MAX_SCHEDULE_TIMEOUT / HZ) + else if (daemon_sleep < 1 || daemon_sleep > MAX_SCHEDULE_TIMEOUT) reason = "daemon sleep period out of range"; else if (write_behind > COUNTER_MAX) reason = "write-behind limit out of range (0 - 16383)"; @@ -610,10 +625,9 @@ static int bitmap_read_sb(struct bitmap *bitmap) } success: /* assign fields using values from superblock */ - bitmap->chunksize = chunksize; - bitmap->daemon_sleep = daemon_sleep; - bitmap->daemon_lastrun = jiffies; - bitmap->max_write_behind = write_behind; + bitmap->mddev->bitmap_info.chunksize = chunksize; + bitmap->mddev->bitmap_info.daemon_sleep = daemon_sleep; + bitmap->mddev->bitmap_info.max_write_behind = write_behind; bitmap->flags |= le32_to_cpu(sb->state); if (le32_to_cpu(sb->version) == BITMAP_MAJOR_HOSTENDIAN) bitmap->flags |= BITMAP_HOSTENDIAN; @@ -664,16 +678,26 @@ static int bitmap_mask_state(struct bitmap *bitmap, enum bitmap_state bits, * general bitmap file operations */ +/* + * on-disk bitmap: + * + * Use one bit per "chunk" (block set). We do the disk I/O on the bitmap + * file a page at a time. There's a superblock at the start of the file. + */ /* calculate the index of the page that contains this bit */ -static inline unsigned long file_page_index(unsigned long chunk) +static inline unsigned long file_page_index(struct bitmap *bitmap, unsigned long chunk) { - return CHUNK_BIT_OFFSET(chunk) >> PAGE_BIT_SHIFT; + if (!bitmap->mddev->bitmap_info.external) + chunk += sizeof(bitmap_super_t) << 3; + return chunk >> PAGE_BIT_SHIFT; } /* calculate the (bit) offset of this bit within a page */ -static inline unsigned long file_page_offset(unsigned long chunk) +static inline unsigned long file_page_offset(struct bitmap *bitmap, unsigned long chunk) { - return CHUNK_BIT_OFFSET(chunk) & (PAGE_BITS - 1); + if (!bitmap->mddev->bitmap_info.external) + chunk += sizeof(bitmap_super_t) << 3; + return chunk & (PAGE_BITS - 1); } /* @@ -686,8 +710,9 @@ static inline unsigned long file_page_offset(unsigned long chunk) static inline struct page *filemap_get_page(struct bitmap *bitmap, unsigned long chunk) { - if (file_page_index(chunk) >= bitmap->file_pages) return NULL; - return bitmap->filemap[file_page_index(chunk) - file_page_index(0)]; + if (file_page_index(bitmap, chunk) >= bitmap->file_pages) return NULL; + return bitmap->filemap[file_page_index(bitmap, chunk) + - file_page_index(bitmap, 0)]; } @@ -710,7 +735,7 @@ static void bitmap_file_unmap(struct bitmap *bitmap) spin_unlock_irqrestore(&bitmap->lock, flags); while (pages--) - if (map[pages]->index != 0) /* 0 is sb_page, release it below */ + if (map[pages] != sb_page) /* 0 is sb_page, release it below */ free_buffers(map[pages]); kfree(map); kfree(attr); @@ -821,7 +846,7 @@ static void bitmap_file_set_bit(struct bitmap *bitmap, sector_t block) page = filemap_get_page(bitmap, chunk); if (!page) return; - bit = file_page_offset(chunk); + bit = file_page_offset(bitmap, chunk); /* set the bit */ kaddr = kmap_atomic(page, KM_USER0); @@ -907,7 +932,7 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start) chunks = bitmap->chunks; file = bitmap->file; - BUG_ON(!file && !bitmap->offset); + BUG_ON(!file && !bitmap->mddev->bitmap_info.offset); #ifdef INJECT_FAULTS_3 outofdate = 1; @@ -919,14 +944,17 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start) "recovery\n", bmname(bitmap)); bytes = (chunks + 7) / 8; + if (!bitmap->mddev->bitmap_info.external) + bytes += sizeof(bitmap_super_t); - num_pages = (bytes + sizeof(bitmap_super_t) + PAGE_SIZE - 1) / PAGE_SIZE; + + num_pages = (bytes + PAGE_SIZE - 1) / PAGE_SIZE; - if (file && i_size_read(file->f_mapping->host) < bytes + sizeof(bitmap_super_t)) { + if (file && i_size_read(file->f_mapping->host) < bytes) { printk(KERN_INFO "%s: bitmap file too short %lu < %lu\n", bmname(bitmap), (unsigned long) i_size_read(file->f_mapping->host), - bytes + sizeof(bitmap_super_t)); + bytes); goto err; } @@ -947,17 +975,16 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start) for (i = 0; i < chunks; i++) { int b; - index = file_page_index(i); - bit = file_page_offset(i); + index = file_page_index(bitmap, i); + bit = file_page_offset(bitmap, i); if (index != oldindex) { /* this is a new page, read it in */ int count; /* unmap the old page, we're done with it */ if (index == num_pages-1) - count = bytes + sizeof(bitmap_super_t) - - index * PAGE_SIZE; + count = bytes - index * PAGE_SIZE; else count = PAGE_SIZE; - if (index == 0) { + if (index == 0 && bitmap->sb_page) { /* * if we're here then the superblock page * contains some bits (PAGE_SIZE != sizeof sb) @@ -967,14 +994,15 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start) offset = sizeof(bitmap_super_t); if (!file) read_sb_page(bitmap->mddev, - bitmap->offset, + bitmap->mddev->bitmap_info.offset, page, index, count); } else if (file) { page = read_page(file, index, bitmap, count); offset = 0; } else { - page = read_sb_page(bitmap->mddev, bitmap->offset, + page = read_sb_page(bitmap->mddev, + bitmap->mddev->bitmap_info.offset, NULL, index, count); offset = 0; @@ -1078,23 +1106,32 @@ static bitmap_counter_t *bitmap_get_counter(struct bitmap *bitmap, * out to disk */ -void bitmap_daemon_work(struct bitmap *bitmap) +void bitmap_daemon_work(mddev_t *mddev) { + struct bitmap *bitmap; unsigned long j; unsigned long flags; struct page *page = NULL, *lastpage = NULL; int blocks; void *paddr; - if (bitmap == NULL) + /* Use a mutex to guard daemon_work against + * bitmap_destroy. + */ + mutex_lock(&mddev->bitmap_info.mutex); + bitmap = mddev->bitmap; + if (bitmap == NULL) { + mutex_unlock(&mddev->bitmap_info.mutex); return; - if (time_before(jiffies, bitmap->daemon_lastrun + bitmap->daemon_sleep*HZ)) + } + if (time_before(jiffies, bitmap->daemon_lastrun + + bitmap->mddev->bitmap_info.daemon_sleep)) goto done; bitmap->daemon_lastrun = jiffies; if (bitmap->allclean) { bitmap->mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT; - return; + goto done; } bitmap->allclean = 1; @@ -1142,7 +1179,8 @@ void bitmap_daemon_work(struct bitmap *bitmap) /* We are possibly going to clear some bits, so make * sure that events_cleared is up-to-date. */ - if (bitmap->need_sync) { + if (bitmap->need_sync && + bitmap->mddev->bitmap_info.external == 0) { bitmap_super_t *sb; bitmap->need_sync = 0; sb = kmap_atomic(bitmap->sb_page, KM_USER0); @@ -1152,7 +1190,8 @@ void bitmap_daemon_work(struct bitmap *bitmap) write_page(bitmap, bitmap->sb_page, 1); } spin_lock_irqsave(&bitmap->lock, flags); - clear_page_attr(bitmap, page, BITMAP_PAGE_CLEAN); + if (!bitmap->need_sync) + clear_page_attr(bitmap, page, BITMAP_PAGE_CLEAN); } bmc = bitmap_get_counter(bitmap, (sector_t)j << CHUNK_BLOCK_SHIFT(bitmap), @@ -1167,7 +1206,7 @@ void bitmap_daemon_work(struct bitmap *bitmap) if (*bmc == 2) { *bmc=1; /* maybe clear the bit next time */ set_page_attr(bitmap, page, BITMAP_PAGE_CLEAN); - } else if (*bmc == 1) { + } else if (*bmc == 1 && !bitmap->need_sync) { /* we can clear the bit */ *bmc = 0; bitmap_count_page(bitmap, @@ -1177,9 +1216,11 @@ void bitmap_daemon_work(struct bitmap *bitmap) /* clear the bit */ paddr = kmap_atomic(page, KM_USER0); if (bitmap->flags & BITMAP_HOSTENDIAN) - clear_bit(file_page_offset(j), paddr); + clear_bit(file_page_offset(bitmap, j), + paddr); else - ext2_clear_bit(file_page_offset(j), paddr); + ext2_clear_bit(file_page_offset(bitmap, j), + paddr); kunmap_atomic(paddr, KM_USER0); } } else @@ -1202,7 +1243,9 @@ void bitmap_daemon_work(struct bitmap *bitmap) done: if (bitmap->allclean == 0) - bitmap->mddev->thread->timeout = bitmap->daemon_sleep * HZ; + bitmap->mddev->thread->timeout = + bitmap->mddev->bitmap_info.daemon_sleep; + mutex_unlock(&mddev->bitmap_info.mutex); } static bitmap_counter_t *bitmap_get_counter(struct bitmap *bitmap, @@ -1332,6 +1375,7 @@ void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long secto bitmap->events_cleared < bitmap->mddev->events) { bitmap->events_cleared = bitmap->mddev->events; bitmap->need_sync = 1; + sysfs_notify_dirent(bitmap->sysfs_can_clear); } if (!success && ! (*bmc & NEEDED_MASK)) @@ -1470,7 +1514,7 @@ void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector) return; } if (time_before(jiffies, (bitmap->last_end_sync - + bitmap->daemon_sleep * HZ))) + + bitmap->mddev->bitmap_info.daemon_sleep))) return; wait_event(bitmap->mddev->recovery_wait, atomic_read(&bitmap->mddev->recovery_active) == 0); @@ -1522,6 +1566,12 @@ void bitmap_dirty_bits(struct bitmap *bitmap, unsigned long s, unsigned long e) sector_t sec = (sector_t)chunk << CHUNK_BLOCK_SHIFT(bitmap); bitmap_set_memory_bits(bitmap, sec, 1); bitmap_file_set_bit(bitmap, sec); + if (sec < bitmap->mddev->recovery_cp) + /* We are asserting that the array is dirty, + * so move the recovery_cp address back so + * that it is obvious that it is dirty + */ + bitmap->mddev->recovery_cp = sec; } } @@ -1531,7 +1581,7 @@ void bitmap_dirty_bits(struct bitmap *bitmap, unsigned long s, unsigned long e) void bitmap_flush(mddev_t *mddev) { struct bitmap *bitmap = mddev->bitmap; - int sleep; + long sleep; if (!bitmap) /* there was no bitmap */ return; @@ -1539,12 +1589,13 @@ void bitmap_flush(mddev_t *mddev) /* run the daemon_work three time to ensure everything is flushed * that can be */ - sleep = bitmap->daemon_sleep; - bitmap->daemon_sleep = 0; - bitmap_daemon_work(bitmap); - bitmap_daemon_work(bitmap); - bitmap_daemon_work(bitmap); - bitmap->daemon_sleep = sleep; + sleep = mddev->bitmap_info.daemon_sleep * 2; + bitmap->daemon_lastrun -= sleep; + bitmap_daemon_work(mddev); + bitmap->daemon_lastrun -= sleep; + bitmap_daemon_work(mddev); + bitmap->daemon_lastrun -= sleep; + bitmap_daemon_work(mddev); bitmap_update_sb(bitmap); } @@ -1574,6 +1625,7 @@ static void bitmap_free(struct bitmap *bitmap) kfree(bp); kfree(bitmap); } + void bitmap_destroy(mddev_t *mddev) { struct bitmap *bitmap = mddev->bitmap; @@ -1581,10 +1633,15 @@ void bitmap_destroy(mddev_t *mddev) if (!bitmap) /* there was no bitmap */ return; + mutex_lock(&mddev->bitmap_info.mutex); mddev->bitmap = NULL; /* disconnect from the md device */ + mutex_unlock(&mddev->bitmap_info.mutex); if (mddev->thread) mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT; + if (bitmap->sysfs_can_clear) + sysfs_put(bitmap->sysfs_can_clear); + bitmap_free(bitmap); } @@ -1598,16 +1655,17 @@ int bitmap_create(mddev_t *mddev) sector_t blocks = mddev->resync_max_sectors; unsigned long chunks; unsigned long pages; - struct file *file = mddev->bitmap_file; + struct file *file = mddev->bitmap_info.file; int err; sector_t start; + struct sysfs_dirent *bm; BUILD_BUG_ON(sizeof(bitmap_super_t) != 256); - if (!file && !mddev->bitmap_offset) /* bitmap disabled, nothing to do */ + if (!file && !mddev->bitmap_info.offset) /* bitmap disabled, nothing to do */ return 0; - BUG_ON(file && mddev->bitmap_offset); + BUG_ON(file && mddev->bitmap_info.offset); bitmap = kzalloc(sizeof(*bitmap), GFP_KERNEL); if (!bitmap) @@ -1620,8 +1678,14 @@ int bitmap_create(mddev_t *mddev) bitmap->mddev = mddev; + bm = sysfs_get_dirent(mddev->kobj.sd, "bitmap"); + if (bm) { + bitmap->sysfs_can_clear = sysfs_get_dirent(bm, "can_clear"); + sysfs_put(bm); + } else + bitmap->sysfs_can_clear = NULL; + bitmap->file = file; - bitmap->offset = mddev->bitmap_offset; if (file) { get_file(file); /* As future accesses to this file will use bmap, @@ -1630,12 +1694,22 @@ int bitmap_create(mddev_t *mddev) */ vfs_fsync(file, file->f_dentry, 1); } - /* read superblock from bitmap file (this sets bitmap->chunksize) */ - err = bitmap_read_sb(bitmap); + /* read superblock from bitmap file (this sets mddev->bitmap_info.chunksize) */ + if (!mddev->bitmap_info.external) + err = bitmap_read_sb(bitmap); + else { + err = 0; + if (mddev->bitmap_info.chunksize == 0 || + mddev->bitmap_info.daemon_sleep == 0) + /* chunksize and time_base need to be + * set first. */ + err = -EINVAL; + } if (err) goto error; - bitmap->chunkshift = ffz(~bitmap->chunksize); + bitmap->daemon_lastrun = jiffies; + bitmap->chunkshift = ffz(~mddev->bitmap_info.chunksize); /* now that chunksize and chunkshift are set, we can use these macros */ chunks = (blocks + CHUNK_BLOCK_RATIO(bitmap) - 1) >> @@ -1677,7 +1751,8 @@ int bitmap_create(mddev_t *mddev) mddev->bitmap = bitmap; - mddev->thread->timeout = bitmap->daemon_sleep * HZ; + mddev->thread->timeout = mddev->bitmap_info.daemon_sleep; + md_wakeup_thread(mddev->thread); bitmap_update_sb(bitmap); @@ -1688,6 +1763,264 @@ int bitmap_create(mddev_t *mddev) return err; } +static ssize_t +location_show(mddev_t *mddev, char *page) +{ + ssize_t len; + if (mddev->bitmap_info.file) { + len = sprintf(page, "file"); + } else if (mddev->bitmap_info.offset) { + len = sprintf(page, "%+lld", (long long)mddev->bitmap_info.offset); + } else + len = sprintf(page, "none"); + len += sprintf(page+len, "\n"); + return len; +} + +static ssize_t +location_store(mddev_t *mddev, const char *buf, size_t len) +{ + + if (mddev->pers) { + if (!mddev->pers->quiesce) + return -EBUSY; + if (mddev->recovery || mddev->sync_thread) + return -EBUSY; + } + + if (mddev->bitmap || mddev->bitmap_info.file || + mddev->bitmap_info.offset) { + /* bitmap already configured. Only option is to clear it */ + if (strncmp(buf, "none", 4) != 0) + return -EBUSY; + if (mddev->pers) { + mddev->pers->quiesce(mddev, 1); + bitmap_destroy(mddev); + mddev->pers->quiesce(mddev, 0); + } + mddev->bitmap_info.offset = 0; + if (mddev->bitmap_info.file) { + struct file *f = mddev->bitmap_info.file; + mddev->bitmap_info.file = NULL; + restore_bitmap_write_access(f); + fput(f); + } + } else { + /* No bitmap, OK to set a location */ + long long offset; + if (strncmp(buf, "none", 4) == 0) + /* nothing to be done */; + else if (strncmp(buf, "file:", 5) == 0) { + /* Not supported yet */ + return -EINVAL; + } else { + int rv; + if (buf[0] == '+') + rv = strict_strtoll(buf+1, 10, &offset); + else + rv = strict_strtoll(buf, 10, &offset); + if (rv) + return rv; + if (offset == 0) + return -EINVAL; + if (mddev->bitmap_info.external == 0 && + mddev->major_version == 0 && + offset != mddev->bitmap_info.default_offset) + return -EINVAL; + mddev->bitmap_info.offset = offset; + if (mddev->pers) { + mddev->pers->quiesce(mddev, 1); + rv = bitmap_create(mddev); + if (rv) { + bitmap_destroy(mddev); + mddev->bitmap_info.offset = 0; + } + mddev->pers->quiesce(mddev, 0); + if (rv) + return rv; + } + } + } + if (!mddev->external) { + /* Ensure new bitmap info is stored in + * metadata promptly. + */ + set_bit(MD_CHANGE_DEVS, &mddev->flags); + md_wakeup_thread(mddev->thread); + } + return len; +} + +static struct md_sysfs_entry bitmap_location = +__ATTR(location, S_IRUGO|S_IWUSR, location_show, location_store); + +static ssize_t +timeout_show(mddev_t *mddev, char *page) +{ + ssize_t len; + unsigned long secs = mddev->bitmap_info.daemon_sleep / HZ; + unsigned long jifs = mddev->bitmap_info.daemon_sleep % HZ; + + len = sprintf(page, "%lu", secs); + if (jifs) + len += sprintf(page+len, ".%03u", jiffies_to_msecs(jifs)); + len += sprintf(page+len, "\n"); + return len; +} + +static ssize_t +timeout_store(mddev_t *mddev, const char *buf, size_t len) +{ + /* timeout can be set at any time */ + unsigned long timeout; + int rv = strict_strtoul_scaled(buf, &timeout, 4); + if (rv) + return rv; + + /* just to make sure we don't overflow... */ + if (timeout >= LONG_MAX / HZ) + return -EINVAL; + + timeout = timeout * HZ / 10000; + + if (timeout >= MAX_SCHEDULE_TIMEOUT) + timeout = MAX_SCHEDULE_TIMEOUT-1; + if (timeout < 1) + timeout = 1; + mddev->bitmap_info.daemon_sleep = timeout; + if (mddev->thread) { + /* if thread->timeout is MAX_SCHEDULE_TIMEOUT, then + * the bitmap is all clean and we don't need to + * adjust the timeout right now + */ + if (mddev->thread->timeout < MAX_SCHEDULE_TIMEOUT) { + mddev->thread->timeout = timeout; + md_wakeup_thread(mddev->thread); + } + } + return len; +} + +static struct md_sysfs_entry bitmap_timeout = +__ATTR(time_base, S_IRUGO|S_IWUSR, timeout_show, timeout_store); + +static ssize_t +backlog_show(mddev_t *mddev, char *page) +{ + return sprintf(page, "%lu\n", mddev->bitmap_info.max_write_behind); +} + +static ssize_t +backlog_store(mddev_t *mddev, const char *buf, size_t len) +{ + unsigned long backlog; + int rv = strict_strtoul(buf, 10, &backlog); + if (rv) + return rv; + if (backlog > COUNTER_MAX) + return -EINVAL; + mddev->bitmap_info.max_write_behind = backlog; + return len; +} + +static struct md_sysfs_entry bitmap_backlog = +__ATTR(backlog, S_IRUGO|S_IWUSR, backlog_show, backlog_store); + +static ssize_t +chunksize_show(mddev_t *mddev, char *page) +{ + return sprintf(page, "%lu\n", mddev->bitmap_info.chunksize); +} + +static ssize_t +chunksize_store(mddev_t *mddev, const char *buf, size_t len) +{ + /* Can only be changed when no bitmap is active */ + int rv; + unsigned long csize; + if (mddev->bitmap) + return -EBUSY; + rv = strict_strtoul(buf, 10, &csize); + if (rv) + return rv; + if (csize < 512 || + !is_power_of_2(csize)) + return -EINVAL; + mddev->bitmap_info.chunksize = csize; + return len; +} + +static struct md_sysfs_entry bitmap_chunksize = +__ATTR(chunksize, S_IRUGO|S_IWUSR, chunksize_show, chunksize_store); + +static ssize_t metadata_show(mddev_t *mddev, char *page) +{ + return sprintf(page, "%s\n", (mddev->bitmap_info.external + ? "external" : "internal")); +} + +static ssize_t metadata_store(mddev_t *mddev, const char *buf, size_t len) +{ + if (mddev->bitmap || + mddev->bitmap_info.file || + mddev->bitmap_info.offset) + return -EBUSY; + if (strncmp(buf, "external", 8) == 0) + mddev->bitmap_info.external = 1; + else if (strncmp(buf, "internal", 8) == 0) + mddev->bitmap_info.external = 0; + else + return -EINVAL; + return len; +} + +static struct md_sysfs_entry bitmap_metadata = +__ATTR(metadata, S_IRUGO|S_IWUSR, metadata_show, metadata_store); + +static ssize_t can_clear_show(mddev_t *mddev, char *page) +{ + int len; + if (mddev->bitmap) + len = sprintf(page, "%s\n", (mddev->bitmap->need_sync ? + "false" : "true")); + else + len = sprintf(page, "\n"); + return len; +} + +static ssize_t can_clear_store(mddev_t *mddev, const char *buf, size_t len) +{ + if (mddev->bitmap == NULL) + return -ENOENT; + if (strncmp(buf, "false", 5) == 0) + mddev->bitmap->need_sync = 1; + else if (strncmp(buf, "true", 4) == 0) { + if (mddev->degraded) + return -EBUSY; + mddev->bitmap->need_sync = 0; + } else + return -EINVAL; + return len; +} + +static struct md_sysfs_entry bitmap_can_clear = +__ATTR(can_clear, S_IRUGO|S_IWUSR, can_clear_show, can_clear_store); + +static struct attribute *md_bitmap_attrs[] = { + &bitmap_location.attr, + &bitmap_timeout.attr, + &bitmap_backlog.attr, + &bitmap_chunksize.attr, + &bitmap_metadata.attr, + &bitmap_can_clear.attr, + NULL +}; +struct attribute_group md_bitmap_group = { + .name = "bitmap", + .attrs = md_bitmap_attrs, +}; + + /* the bitmap API -- for raid personalities */ EXPORT_SYMBOL(bitmap_startwrite); EXPORT_SYMBOL(bitmap_endwrite); diff --git a/drivers/md/bitmap.h b/drivers/md/bitmap.h index e98900671ca..cb821d76d1b 100644 --- a/drivers/md/bitmap.h +++ b/drivers/md/bitmap.h @@ -106,7 +106,7 @@ typedef __u16 bitmap_counter_t; #define BITMAP_BLOCK_SHIFT 9 /* how many blocks per chunk? (this is variable) */ -#define CHUNK_BLOCK_RATIO(bitmap) ((bitmap)->chunksize >> BITMAP_BLOCK_SHIFT) +#define CHUNK_BLOCK_RATIO(bitmap) ((bitmap)->mddev->bitmap_info.chunksize >> BITMAP_BLOCK_SHIFT) #define CHUNK_BLOCK_SHIFT(bitmap) ((bitmap)->chunkshift - BITMAP_BLOCK_SHIFT) #define CHUNK_BLOCK_MASK(bitmap) (CHUNK_BLOCK_RATIO(bitmap) - 1) @@ -118,16 +118,6 @@ typedef __u16 bitmap_counter_t; (CHUNK_BLOCK_SHIFT(bitmap) + PAGE_COUNTER_SHIFT - 1) #define PAGEPTR_BLOCK_MASK(bitmap) (PAGEPTR_BLOCK_RATIO(bitmap) - 1) -/* - * on-disk bitmap: - * - * Use one bit per "chunk" (block set). We do the disk I/O on the bitmap - * file a page at a time. There's a superblock at the start of the file. - */ - -/* map chunks (bits) to file pages - offset by the size of the superblock */ -#define CHUNK_BIT_OFFSET(chunk) ((chunk) + (sizeof(bitmap_super_t) << 3)) - #endif /* @@ -209,7 +199,6 @@ struct bitmap { int counter_bits; /* how many bits per block counter */ /* bitmap chunksize -- how much data does each bit represent? */ - unsigned long chunksize; unsigned long chunkshift; /* chunksize = 2^chunkshift (for bitops) */ unsigned long chunks; /* total number of data chunks for the array */ @@ -226,7 +215,6 @@ struct bitmap { /* bitmap spinlock */ spinlock_t lock; - long offset; /* offset from superblock if file is NULL */ struct file *file; /* backing disk file */ struct page *sb_page; /* cached copy of the bitmap file superblock */ struct page **filemap; /* list of cache pages for the file */ @@ -238,7 +226,6 @@ struct bitmap { int allclean; - unsigned long max_write_behind; /* write-behind mode */ atomic_t behind_writes; /* @@ -246,7 +233,6 @@ struct bitmap { * file, cleaning up bits and flushing out pages to disk as necessary */ unsigned long daemon_lastrun; /* jiffies of last run */ - unsigned long daemon_sleep; /* how many seconds between updates? */ unsigned long last_end_sync; /* when we lasted called end_sync to * update bitmap with resync progress */ @@ -254,6 +240,7 @@ struct bitmap { wait_queue_head_t write_wait; wait_queue_head_t overflow_wait; + struct sysfs_dirent *sysfs_can_clear; }; /* the bitmap API */ @@ -282,7 +269,7 @@ void bitmap_close_sync(struct bitmap *bitmap); void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector); void bitmap_unplug(struct bitmap *bitmap); -void bitmap_daemon_work(struct bitmap *bitmap); +void bitmap_daemon_work(mddev_t *mddev); #endif #endif diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index ed103816401..a93637223c8 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -1,7 +1,7 @@ /* * Copyright (C) 2003 Christophe Saout <christophe@saout.de> * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org> - * Copyright (C) 2006-2008 Red Hat, Inc. All rights reserved. + * Copyright (C) 2006-2009 Red Hat, Inc. All rights reserved. * * This file is released under the GPL. */ @@ -71,10 +71,21 @@ struct crypt_iv_operations { int (*ctr)(struct crypt_config *cc, struct dm_target *ti, const char *opts); void (*dtr)(struct crypt_config *cc); - const char *(*status)(struct crypt_config *cc); + int (*init)(struct crypt_config *cc); + int (*wipe)(struct crypt_config *cc); int (*generator)(struct crypt_config *cc, u8 *iv, sector_t sector); }; +struct iv_essiv_private { + struct crypto_cipher *tfm; + struct crypto_hash *hash_tfm; + u8 *salt; +}; + +struct iv_benbi_private { + int shift; +}; + /* * Crypt: maps a linear range of a block device * and encrypts / decrypts at the same time. @@ -102,8 +113,8 @@ struct crypt_config { struct crypt_iv_operations *iv_gen_ops; char *iv_mode; union { - struct crypto_cipher *essiv_tfm; - int benbi_shift; + struct iv_essiv_private essiv; + struct iv_benbi_private benbi; } iv_gen_private; sector_t iv_offset; unsigned int iv_size; @@ -147,6 +158,9 @@ static void kcryptd_queue_crypt(struct dm_crypt_io *io); * plain: the initial vector is the 32-bit little-endian version of the sector * number, padded with zeros if necessary. * + * plain64: the initial vector is the 64-bit little-endian version of the sector + * number, padded with zeros if necessary. + * * essiv: "encrypted sector|salt initial vector", the sector number is * encrypted with the bulk cipher using a salt as key. The salt * should be derived from the bulk cipher's key via hashing. @@ -169,88 +183,123 @@ static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv, sector_t sector) return 0; } -static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti, - const char *opts) +static int crypt_iv_plain64_gen(struct crypt_config *cc, u8 *iv, + sector_t sector) { - struct crypto_cipher *essiv_tfm; - struct crypto_hash *hash_tfm; + memset(iv, 0, cc->iv_size); + *(u64 *)iv = cpu_to_le64(sector); + + return 0; +} + +/* Initialise ESSIV - compute salt but no local memory allocations */ +static int crypt_iv_essiv_init(struct crypt_config *cc) +{ + struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; struct hash_desc desc; struct scatterlist sg; - unsigned int saltsize; - u8 *salt; int err; - if (opts == NULL) { + sg_init_one(&sg, cc->key, cc->key_size); + desc.tfm = essiv->hash_tfm; + desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; + + err = crypto_hash_digest(&desc, &sg, cc->key_size, essiv->salt); + if (err) + return err; + + return crypto_cipher_setkey(essiv->tfm, essiv->salt, + crypto_hash_digestsize(essiv->hash_tfm)); +} + +/* Wipe salt and reset key derived from volume key */ +static int crypt_iv_essiv_wipe(struct crypt_config *cc) +{ + struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; + unsigned salt_size = crypto_hash_digestsize(essiv->hash_tfm); + + memset(essiv->salt, 0, salt_size); + + return crypto_cipher_setkey(essiv->tfm, essiv->salt, salt_size); +} + +static void crypt_iv_essiv_dtr(struct crypt_config *cc) +{ + struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; + + crypto_free_cipher(essiv->tfm); + essiv->tfm = NULL; + + crypto_free_hash(essiv->hash_tfm); + essiv->hash_tfm = NULL; + + kzfree(essiv->salt); + essiv->salt = NULL; +} + +static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti, + const char *opts) +{ + struct crypto_cipher *essiv_tfm = NULL; + struct crypto_hash *hash_tfm = NULL; + u8 *salt = NULL; + int err; + + if (!opts) { ti->error = "Digest algorithm missing for ESSIV mode"; return -EINVAL; } - /* Hash the cipher key with the given hash algorithm */ + /* Allocate hash algorithm */ hash_tfm = crypto_alloc_hash(opts, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(hash_tfm)) { ti->error = "Error initializing ESSIV hash"; - return PTR_ERR(hash_tfm); + err = PTR_ERR(hash_tfm); + goto bad; } - saltsize = crypto_hash_digestsize(hash_tfm); - salt = kmalloc(saltsize, GFP_KERNEL); - if (salt == NULL) { + salt = kzalloc(crypto_hash_digestsize(hash_tfm), GFP_KERNEL); + if (!salt) { ti->error = "Error kmallocing salt storage in ESSIV"; - crypto_free_hash(hash_tfm); - return -ENOMEM; + err = -ENOMEM; + goto bad; } - sg_init_one(&sg, cc->key, cc->key_size); - desc.tfm = hash_tfm; - desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; - err = crypto_hash_digest(&desc, &sg, cc->key_size, salt); - crypto_free_hash(hash_tfm); - - if (err) { - ti->error = "Error calculating hash in ESSIV"; - kfree(salt); - return err; - } - - /* Setup the essiv_tfm with the given salt */ + /* Allocate essiv_tfm */ essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(essiv_tfm)) { ti->error = "Error allocating crypto tfm for ESSIV"; - kfree(salt); - return PTR_ERR(essiv_tfm); + err = PTR_ERR(essiv_tfm); + goto bad; } if (crypto_cipher_blocksize(essiv_tfm) != crypto_ablkcipher_ivsize(cc->tfm)) { ti->error = "Block size of ESSIV cipher does " "not match IV size of block cipher"; - crypto_free_cipher(essiv_tfm); - kfree(salt); - return -EINVAL; + err = -EINVAL; + goto bad; } - err = crypto_cipher_setkey(essiv_tfm, salt, saltsize); - if (err) { - ti->error = "Failed to set key for ESSIV cipher"; - crypto_free_cipher(essiv_tfm); - kfree(salt); - return err; - } - kfree(salt); - cc->iv_gen_private.essiv_tfm = essiv_tfm; + cc->iv_gen_private.essiv.salt = salt; + cc->iv_gen_private.essiv.tfm = essiv_tfm; + cc->iv_gen_private.essiv.hash_tfm = hash_tfm; + return 0; -} -static void crypt_iv_essiv_dtr(struct crypt_config *cc) -{ - crypto_free_cipher(cc->iv_gen_private.essiv_tfm); - cc->iv_gen_private.essiv_tfm = NULL; +bad: + if (essiv_tfm && !IS_ERR(essiv_tfm)) + crypto_free_cipher(essiv_tfm); + if (hash_tfm && !IS_ERR(hash_tfm)) + crypto_free_hash(hash_tfm); + kfree(salt); + return err; } static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, sector_t sector) { memset(iv, 0, cc->iv_size); *(u64 *)iv = cpu_to_le64(sector); - crypto_cipher_encrypt_one(cc->iv_gen_private.essiv_tfm, iv, iv); + crypto_cipher_encrypt_one(cc->iv_gen_private.essiv.tfm, iv, iv); return 0; } @@ -273,7 +322,7 @@ static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti, return -EINVAL; } - cc->iv_gen_private.benbi_shift = 9 - log; + cc->iv_gen_private.benbi.shift = 9 - log; return 0; } @@ -288,7 +337,7 @@ static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv, sector_t sector) memset(iv, 0, cc->iv_size - sizeof(u64)); /* rest is cleared below */ - val = cpu_to_be64(((u64)sector << cc->iv_gen_private.benbi_shift) + 1); + val = cpu_to_be64(((u64)sector << cc->iv_gen_private.benbi.shift) + 1); put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64))); return 0; @@ -305,9 +354,15 @@ static struct crypt_iv_operations crypt_iv_plain_ops = { .generator = crypt_iv_plain_gen }; +static struct crypt_iv_operations crypt_iv_plain64_ops = { + .generator = crypt_iv_plain64_gen +}; + static struct crypt_iv_operations crypt_iv_essiv_ops = { .ctr = crypt_iv_essiv_ctr, .dtr = crypt_iv_essiv_dtr, + .init = crypt_iv_essiv_init, + .wipe = crypt_iv_essiv_wipe, .generator = crypt_iv_essiv_gen }; @@ -934,14 +989,14 @@ static int crypt_set_key(struct crypt_config *cc, char *key) set_bit(DM_CRYPT_KEY_VALID, &cc->flags); - return 0; + return crypto_ablkcipher_setkey(cc->tfm, cc->key, cc->key_size); } static int crypt_wipe_key(struct crypt_config *cc) { clear_bit(DM_CRYPT_KEY_VALID, &cc->flags); memset(&cc->key, 0, cc->key_size * sizeof(u8)); - return 0; + return crypto_ablkcipher_setkey(cc->tfm, cc->key, cc->key_size); } /* @@ -983,12 +1038,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) return -ENOMEM; } - if (crypt_set_key(cc, argv[1])) { - ti->error = "Error decoding key"; - goto bad_cipher; - } - - /* Compatiblity mode for old dm-crypt cipher strings */ + /* Compatibility mode for old dm-crypt cipher strings */ if (!chainmode || (strcmp(chainmode, "plain") == 0 && !ivmode)) { chainmode = "cbc"; ivmode = "plain"; @@ -1015,6 +1065,11 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) strcpy(cc->chainmode, chainmode); cc->tfm = tfm; + if (crypt_set_key(cc, argv[1]) < 0) { + ti->error = "Error decoding and setting key"; + goto bad_ivmode; + } + /* * Choose ivmode. Valid modes: "plain", "essiv:<esshash>", "benbi". * See comments at iv code @@ -1024,6 +1079,8 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) cc->iv_gen_ops = NULL; else if (strcmp(ivmode, "plain") == 0) cc->iv_gen_ops = &crypt_iv_plain_ops; + else if (strcmp(ivmode, "plain64") == 0) + cc->iv_gen_ops = &crypt_iv_plain64_ops; else if (strcmp(ivmode, "essiv") == 0) cc->iv_gen_ops = &crypt_iv_essiv_ops; else if (strcmp(ivmode, "benbi") == 0) @@ -1039,6 +1096,12 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) cc->iv_gen_ops->ctr(cc, ti, ivopts) < 0) goto bad_ivmode; + if (cc->iv_gen_ops && cc->iv_gen_ops->init && + cc->iv_gen_ops->init(cc) < 0) { + ti->error = "Error initialising IV"; + goto bad_slab_pool; + } + cc->iv_size = crypto_ablkcipher_ivsize(tfm); if (cc->iv_size) /* at least a 64 bit sector number should fit in our buffer */ @@ -1085,11 +1148,6 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) goto bad_bs; } - if (crypto_ablkcipher_setkey(tfm, cc->key, key_size) < 0) { - ti->error = "Error setting key"; - goto bad_device; - } - if (sscanf(argv[2], "%llu", &tmpll) != 1) { ti->error = "Invalid iv_offset sector"; goto bad_device; @@ -1278,6 +1336,7 @@ static void crypt_resume(struct dm_target *ti) static int crypt_message(struct dm_target *ti, unsigned argc, char **argv) { struct crypt_config *cc = ti->private; + int ret = -EINVAL; if (argc < 2) goto error; @@ -1287,10 +1346,22 @@ static int crypt_message(struct dm_target *ti, unsigned argc, char **argv) DMWARN("not suspended during key manipulation."); return -EINVAL; } - if (argc == 3 && !strnicmp(argv[1], MESG_STR("set"))) - return crypt_set_key(cc, argv[2]); - if (argc == 2 && !strnicmp(argv[1], MESG_STR("wipe"))) + if (argc == 3 && !strnicmp(argv[1], MESG_STR("set"))) { + ret = crypt_set_key(cc, argv[2]); + if (ret) + return ret; + if (cc->iv_gen_ops && cc->iv_gen_ops->init) + ret = cc->iv_gen_ops->init(cc); + return ret; + } + if (argc == 2 && !strnicmp(argv[1], MESG_STR("wipe"))) { + if (cc->iv_gen_ops && cc->iv_gen_ops->wipe) { + ret = cc->iv_gen_ops->wipe(cc); + if (ret) + return ret; + } return crypt_wipe_key(cc); + } } error: diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c index 7dbe652efb5..2b7907b6dd0 100644 --- a/drivers/md/dm-exception-store.c +++ b/drivers/md/dm-exception-store.c @@ -172,7 +172,8 @@ int dm_exception_store_set_chunk_size(struct dm_exception_store *store, } /* Validate the chunk size against the device block size */ - if (chunk_size % (bdev_logical_block_size(store->cow->bdev) >> 9)) { + if (chunk_size % + (bdev_logical_block_size(dm_snap_cow(store->snap)->bdev) >> 9)) { *error = "Chunk size is not a multiple of device blocksize"; return -EINVAL; } @@ -190,6 +191,7 @@ int dm_exception_store_set_chunk_size(struct dm_exception_store *store, } int dm_exception_store_create(struct dm_target *ti, int argc, char **argv, + struct dm_snapshot *snap, unsigned *args_used, struct dm_exception_store **store) { @@ -198,7 +200,7 @@ int dm_exception_store_create(struct dm_target *ti, int argc, char **argv, struct dm_exception_store *tmp_store; char persistent; - if (argc < 3) { + if (argc < 2) { ti->error = "Insufficient exception store arguments"; return -EINVAL; } @@ -209,14 +211,15 @@ int dm_exception_store_create(struct dm_target *ti, int argc, char **argv, return -ENOMEM; } - persistent = toupper(*argv[1]); + persistent = toupper(*argv[0]); if (persistent == 'P') type = get_type("P"); else if (persistent == 'N') type = get_type("N"); else { ti->error = "Persistent flag is not P or N"; - return -EINVAL; + r = -EINVAL; + goto bad_type; } if (!type) { @@ -226,32 +229,23 @@ int dm_exception_store_create(struct dm_target *ti, int argc, char **argv, } tmp_store->type = type; - tmp_store->ti = ti; - - r = dm_get_device(ti, argv[0], 0, 0, - FMODE_READ | FMODE_WRITE, &tmp_store->cow); - if (r) { - ti->error = "Cannot get COW device"; - goto bad_cow; - } + tmp_store->snap = snap; - r = set_chunk_size(tmp_store, argv[2], &ti->error); + r = set_chunk_size(tmp_store, argv[1], &ti->error); if (r) - goto bad_ctr; + goto bad; r = type->ctr(tmp_store, 0, NULL); if (r) { ti->error = "Exception store type constructor failed"; - goto bad_ctr; + goto bad; } - *args_used = 3; + *args_used = 2; *store = tmp_store; return 0; -bad_ctr: - dm_put_device(ti, tmp_store->cow); -bad_cow: +bad: put_type(type); bad_type: kfree(tmp_store); @@ -262,7 +256,6 @@ EXPORT_SYMBOL(dm_exception_store_create); void dm_exception_store_destroy(struct dm_exception_store *store) { store->type->dtr(store); - dm_put_device(store->ti, store->cow); put_type(store->type); kfree(store); } diff --git a/drivers/md/dm-exception-store.h b/drivers/md/dm-exception-store.h index 8a223a48802..e8dfa06af3b 100644 --- a/drivers/md/dm-exception-store.h +++ b/drivers/md/dm-exception-store.h @@ -26,7 +26,7 @@ typedef sector_t chunk_t; * of chunks that follow contiguously. Remaining bits hold the number of the * chunk within the device. */ -struct dm_snap_exception { +struct dm_exception { struct list_head hash_list; chunk_t old_chunk; @@ -64,17 +64,34 @@ struct dm_exception_store_type { * Find somewhere to store the next exception. */ int (*prepare_exception) (struct dm_exception_store *store, - struct dm_snap_exception *e); + struct dm_exception *e); /* * Update the metadata with this exception. */ void (*commit_exception) (struct dm_exception_store *store, - struct dm_snap_exception *e, + struct dm_exception *e, void (*callback) (void *, int success), void *callback_context); /* + * Returns 0 if the exception store is empty. + * + * If there are exceptions still to be merged, sets + * *last_old_chunk and *last_new_chunk to the most recent + * still-to-be-merged chunk and returns the number of + * consecutive previous ones. + */ + int (*prepare_merge) (struct dm_exception_store *store, + chunk_t *last_old_chunk, chunk_t *last_new_chunk); + + /* + * Clear the last n exceptions. + * nr_merged must be <= the value returned by prepare_merge. + */ + int (*commit_merge) (struct dm_exception_store *store, int nr_merged); + + /* * The snapshot is invalid, note this in the metadata. */ void (*drop_snapshot) (struct dm_exception_store *store); @@ -86,19 +103,19 @@ struct dm_exception_store_type { /* * Return how full the snapshot is. */ - void (*fraction_full) (struct dm_exception_store *store, - sector_t *numerator, - sector_t *denominator); + void (*usage) (struct dm_exception_store *store, + sector_t *total_sectors, sector_t *sectors_allocated, + sector_t *metadata_sectors); /* For internal device-mapper use only. */ struct list_head list; }; +struct dm_snapshot; + struct dm_exception_store { struct dm_exception_store_type *type; - struct dm_target *ti; - - struct dm_dev *cow; + struct dm_snapshot *snap; /* Size of data blocks saved - must be a power of 2 */ unsigned chunk_size; @@ -109,6 +126,11 @@ struct dm_exception_store { }; /* + * Obtain the cow device used by a given snapshot. + */ +struct dm_dev *dm_snap_cow(struct dm_snapshot *snap); + +/* * Funtions to manipulate consecutive chunks */ # if defined(CONFIG_LBDAF) || (BITS_PER_LONG == 64) @@ -120,18 +142,25 @@ static inline chunk_t dm_chunk_number(chunk_t chunk) return chunk & (chunk_t)((1ULL << DM_CHUNK_NUMBER_BITS) - 1ULL); } -static inline unsigned dm_consecutive_chunk_count(struct dm_snap_exception *e) +static inline unsigned dm_consecutive_chunk_count(struct dm_exception *e) { return e->new_chunk >> DM_CHUNK_NUMBER_BITS; } -static inline void dm_consecutive_chunk_count_inc(struct dm_snap_exception *e) +static inline void dm_consecutive_chunk_count_inc(struct dm_exception *e) { e->new_chunk += (1ULL << DM_CHUNK_NUMBER_BITS); BUG_ON(!dm_consecutive_chunk_count(e)); } +static inline void dm_consecutive_chunk_count_dec(struct dm_exception *e) +{ + BUG_ON(!dm_consecutive_chunk_count(e)); + + e->new_chunk -= (1ULL << DM_CHUNK_NUMBER_BITS); +} + # else # define DM_CHUNK_CONSECUTIVE_BITS 0 @@ -140,12 +169,16 @@ static inline chunk_t dm_chunk_number(chunk_t chunk) return chunk; } -static inline unsigned dm_consecutive_chunk_count(struct dm_snap_exception *e) +static inline unsigned dm_consecutive_chunk_count(struct dm_exception *e) { return 0; } -static inline void dm_consecutive_chunk_count_inc(struct dm_snap_exception *e) +static inline void dm_consecutive_chunk_count_inc(struct dm_exception *e) +{ +} + +static inline void dm_consecutive_chunk_count_dec(struct dm_exception *e) { } @@ -162,7 +195,7 @@ static inline sector_t get_dev_size(struct block_device *bdev) static inline chunk_t sector_to_chunk(struct dm_exception_store *store, sector_t sector) { - return (sector & ~store->chunk_mask) >> store->chunk_shift; + return sector >> store->chunk_shift; } int dm_exception_store_type_register(struct dm_exception_store_type *type); @@ -173,6 +206,7 @@ int dm_exception_store_set_chunk_size(struct dm_exception_store *store, char **error); int dm_exception_store_create(struct dm_target *ti, int argc, char **argv, + struct dm_snapshot *snap, unsigned *args_used, struct dm_exception_store **store); void dm_exception_store_destroy(struct dm_exception_store *store); diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c index 3a2e6a2f8bd..10f457ca6af 100644 --- a/drivers/md/dm-io.c +++ b/drivers/md/dm-io.c @@ -5,6 +5,8 @@ * This file is released under the GPL. */ +#include "dm.h" + #include <linux/device-mapper.h> #include <linux/bio.h> @@ -14,12 +16,19 @@ #include <linux/slab.h> #include <linux/dm-io.h> +#define DM_MSG_PREFIX "io" + +#define DM_IO_MAX_REGIONS BITS_PER_LONG + struct dm_io_client { mempool_t *pool; struct bio_set *bios; }; -/* FIXME: can we shrink this ? */ +/* + * Aligning 'struct io' reduces the number of bits required to store + * its address. Refer to store_io_and_region_in_bio() below. + */ struct io { unsigned long error_bits; unsigned long eopnotsupp_bits; @@ -28,7 +37,9 @@ struct io { struct dm_io_client *client; io_notify_fn callback; void *context; -}; +} __attribute__((aligned(DM_IO_MAX_REGIONS))); + +static struct kmem_cache *_dm_io_cache; /* * io contexts are only dynamically allocated for asynchronous @@ -53,7 +64,7 @@ struct dm_io_client *dm_io_client_create(unsigned num_pages) if (!client) return ERR_PTR(-ENOMEM); - client->pool = mempool_create_kmalloc_pool(ios, sizeof(struct io)); + client->pool = mempool_create_slab_pool(ios, _dm_io_cache); if (!client->pool) goto bad; @@ -88,18 +99,29 @@ EXPORT_SYMBOL(dm_io_client_destroy); /*----------------------------------------------------------------- * We need to keep track of which region a bio is doing io for. - * In order to save a memory allocation we store this the last - * bvec which we know is unused (blech). - * XXX This is ugly and can OOPS with some configs... find another way. + * To avoid a memory allocation to store just 5 or 6 bits, we + * ensure the 'struct io' pointer is aligned so enough low bits are + * always zero and then combine it with the region number directly in + * bi_private. *---------------------------------------------------------------*/ -static inline void bio_set_region(struct bio *bio, unsigned region) +static void store_io_and_region_in_bio(struct bio *bio, struct io *io, + unsigned region) { - bio->bi_io_vec[bio->bi_max_vecs].bv_len = region; + if (unlikely(!IS_ALIGNED((unsigned long)io, DM_IO_MAX_REGIONS))) { + DMCRIT("Unaligned struct io pointer %p", io); + BUG(); + } + + bio->bi_private = (void *)((unsigned long)io | region); } -static inline unsigned bio_get_region(struct bio *bio) +static void retrieve_io_and_region_from_bio(struct bio *bio, struct io **io, + unsigned *region) { - return bio->bi_io_vec[bio->bi_max_vecs].bv_len; + unsigned long val = (unsigned long)bio->bi_private; + + *io = (void *)(val & -(unsigned long)DM_IO_MAX_REGIONS); + *region = val & (DM_IO_MAX_REGIONS - 1); } /*----------------------------------------------------------------- @@ -140,10 +162,8 @@ static void endio(struct bio *bio, int error) /* * The bio destructor in bio_put() may use the io object. */ - io = bio->bi_private; - region = bio_get_region(bio); + retrieve_io_and_region_from_bio(bio, &io, ®ion); - bio->bi_max_vecs++; bio_put(bio); dec_count(io, region, error); @@ -243,7 +263,10 @@ static void vm_dp_init(struct dpages *dp, void *data) static void dm_bio_destructor(struct bio *bio) { - struct io *io = bio->bi_private; + unsigned region; + struct io *io; + + retrieve_io_and_region_from_bio(bio, &io, ®ion); bio_free(bio, io->client->bios); } @@ -286,26 +309,23 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where, unsigned num_bvecs; sector_t remaining = where->count; - while (remaining) { + /* + * where->count may be zero if rw holds a write barrier and we + * need to send a zero-sized barrier. + */ + do { /* - * Allocate a suitably sized-bio: we add an extra - * bvec for bio_get/set_region() and decrement bi_max_vecs - * to hide it from bio_add_page(). + * Allocate a suitably sized-bio. */ num_bvecs = dm_sector_div_up(remaining, (PAGE_SIZE >> SECTOR_SHIFT)); - num_bvecs = 1 + min_t(int, bio_get_nr_vecs(where->bdev), - num_bvecs); - if (unlikely(num_bvecs > BIO_MAX_PAGES)) - num_bvecs = BIO_MAX_PAGES; + num_bvecs = min_t(int, bio_get_nr_vecs(where->bdev), num_bvecs); bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios); bio->bi_sector = where->sector + (where->count - remaining); bio->bi_bdev = where->bdev; bio->bi_end_io = endio; - bio->bi_private = io; bio->bi_destructor = dm_bio_destructor; - bio->bi_max_vecs--; - bio_set_region(bio, region); + store_io_and_region_in_bio(bio, io, region); /* * Try and add as many pages as possible. @@ -323,7 +343,7 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where, atomic_inc(&io->count); submit_bio(rw, bio); - } + } while (remaining); } static void dispatch_io(int rw, unsigned int num_regions, @@ -333,6 +353,8 @@ static void dispatch_io(int rw, unsigned int num_regions, int i; struct dpages old_pages = *dp; + BUG_ON(num_regions > DM_IO_MAX_REGIONS); + if (sync) rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG); @@ -342,7 +364,7 @@ static void dispatch_io(int rw, unsigned int num_regions, */ for (i = 0; i < num_regions; i++) { *dp = old_pages; - if (where[i].count) + if (where[i].count || (rw & (1 << BIO_RW_BARRIER))) do_region(rw, i, where + i, dp, io); } @@ -357,7 +379,14 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions, struct dm_io_region *where, int rw, struct dpages *dp, unsigned long *error_bits) { - struct io io; + /* + * gcc <= 4.3 can't do the alignment for stack variables, so we must + * align it on our own. + * volatile prevents the optimizer from removing or reusing + * "io_" field from the stack frame (allowed in ANSI C). + */ + volatile char io_[sizeof(struct io) + __alignof__(struct io) - 1]; + struct io *io = (struct io *)PTR_ALIGN(&io_, __alignof__(struct io)); if (num_regions > 1 && (rw & RW_MASK) != WRITE) { WARN_ON(1); @@ -365,33 +394,33 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions, } retry: - io.error_bits = 0; - io.eopnotsupp_bits = 0; - atomic_set(&io.count, 1); /* see dispatch_io() */ - io.sleeper = current; - io.client = client; + io->error_bits = 0; + io->eopnotsupp_bits = 0; + atomic_set(&io->count, 1); /* see dispatch_io() */ + io->sleeper = current; + io->client = client; - dispatch_io(rw, num_regions, where, dp, &io, 1); + dispatch_io(rw, num_regions, where, dp, io, 1); while (1) { set_current_state(TASK_UNINTERRUPTIBLE); - if (!atomic_read(&io.count)) + if (!atomic_read(&io->count)) break; io_schedule(); } set_current_state(TASK_RUNNING); - if (io.eopnotsupp_bits && (rw & (1 << BIO_RW_BARRIER))) { + if (io->eopnotsupp_bits && (rw & (1 << BIO_RW_BARRIER))) { rw &= ~(1 << BIO_RW_BARRIER); goto retry; } if (error_bits) - *error_bits = io.error_bits; + *error_bits = io->error_bits; - return io.error_bits ? -EIO : 0; + return io->error_bits ? -EIO : 0; } static int async_io(struct dm_io_client *client, unsigned int num_regions, @@ -472,3 +501,18 @@ int dm_io(struct dm_io_request *io_req, unsigned num_regions, &dp, io_req->notify.fn, io_req->notify.context); } EXPORT_SYMBOL(dm_io); + +int __init dm_io_init(void) +{ + _dm_io_cache = KMEM_CACHE(io, 0); + if (!_dm_io_cache) + return -ENOMEM; + + return 0; +} + +void dm_io_exit(void) +{ + kmem_cache_destroy(_dm_io_cache); + _dm_io_cache = NULL; +} diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index a6794293158..1d669322b27 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c @@ -56,6 +56,11 @@ static void dm_hash_remove_all(int keep_open_devices); */ static DECLARE_RWSEM(_hash_lock); +/* + * Protects use of mdptr to obtain hash cell name and uuid from mapped device. + */ +static DEFINE_MUTEX(dm_hash_cells_mutex); + static void init_buckets(struct list_head *buckets) { unsigned int i; @@ -206,7 +211,9 @@ static int dm_hash_insert(const char *name, const char *uuid, struct mapped_devi list_add(&cell->uuid_list, _uuid_buckets + hash_str(uuid)); } dm_get(md); + mutex_lock(&dm_hash_cells_mutex); dm_set_mdptr(md, cell); + mutex_unlock(&dm_hash_cells_mutex); up_write(&_hash_lock); return 0; @@ -224,9 +231,11 @@ static void __hash_remove(struct hash_cell *hc) /* remove from the dev hash */ list_del(&hc->uuid_list); list_del(&hc->name_list); + mutex_lock(&dm_hash_cells_mutex); dm_set_mdptr(hc->md, NULL); + mutex_unlock(&dm_hash_cells_mutex); - table = dm_get_table(hc->md); + table = dm_get_live_table(hc->md); if (table) { dm_table_event(table); dm_table_put(table); @@ -321,13 +330,15 @@ static int dm_hash_rename(uint32_t cookie, const char *old, const char *new) */ list_del(&hc->name_list); old_name = hc->name; + mutex_lock(&dm_hash_cells_mutex); hc->name = new_name; + mutex_unlock(&dm_hash_cells_mutex); list_add(&hc->name_list, _name_buckets + hash_str(new_name)); /* * Wake up any dm event waiters. */ - table = dm_get_table(hc->md); + table = dm_get_live_table(hc->md); if (table) { dm_table_event(table); dm_table_put(table); @@ -512,8 +523,6 @@ static int list_versions(struct dm_ioctl *param, size_t param_size) return 0; } - - static int check_name(const char *name) { if (strchr(name, '/')) { @@ -525,6 +534,40 @@ static int check_name(const char *name) } /* + * On successful return, the caller must not attempt to acquire + * _hash_lock without first calling dm_table_put, because dm_table_destroy + * waits for this dm_table_put and could be called under this lock. + */ +static struct dm_table *dm_get_inactive_table(struct mapped_device *md) +{ + struct hash_cell *hc; + struct dm_table *table = NULL; + + down_read(&_hash_lock); + hc = dm_get_mdptr(md); + if (!hc || hc->md != md) { + DMWARN("device has been removed from the dev hash table."); + goto out; + } + + table = hc->new_map; + if (table) + dm_table_get(table); + +out: + up_read(&_hash_lock); + + return table; +} + +static struct dm_table *dm_get_live_or_inactive_table(struct mapped_device *md, + struct dm_ioctl *param) +{ + return (param->flags & DM_QUERY_INACTIVE_TABLE_FLAG) ? + dm_get_inactive_table(md) : dm_get_live_table(md); +} + +/* * Fills in a dm_ioctl structure, ready for sending back to * userland. */ @@ -536,7 +579,7 @@ static int __dev_status(struct mapped_device *md, struct dm_ioctl *param) param->flags &= ~(DM_SUSPEND_FLAG | DM_READONLY_FLAG | DM_ACTIVE_PRESENT_FLAG); - if (dm_suspended(md)) + if (dm_suspended_md(md)) param->flags |= DM_SUSPEND_FLAG; param->dev = huge_encode_dev(disk_devt(disk)); @@ -548,18 +591,30 @@ static int __dev_status(struct mapped_device *md, struct dm_ioctl *param) */ param->open_count = dm_open_count(md); - if (get_disk_ro(disk)) - param->flags |= DM_READONLY_FLAG; - param->event_nr = dm_get_event_nr(md); + param->target_count = 0; - table = dm_get_table(md); + table = dm_get_live_table(md); if (table) { - param->flags |= DM_ACTIVE_PRESENT_FLAG; - param->target_count = dm_table_get_num_targets(table); + if (!(param->flags & DM_QUERY_INACTIVE_TABLE_FLAG)) { + if (get_disk_ro(disk)) + param->flags |= DM_READONLY_FLAG; + param->target_count = dm_table_get_num_targets(table); + } dm_table_put(table); - } else - param->target_count = 0; + + param->flags |= DM_ACTIVE_PRESENT_FLAG; + } + + if (param->flags & DM_QUERY_INACTIVE_TABLE_FLAG) { + table = dm_get_inactive_table(md); + if (table) { + if (!(dm_table_get_mode(table) & FMODE_WRITE)) + param->flags |= DM_READONLY_FLAG; + param->target_count = dm_table_get_num_targets(table); + dm_table_put(table); + } + } return 0; } @@ -634,9 +689,9 @@ static struct mapped_device *find_device(struct dm_ioctl *param) * Sneakily write in both the name and the uuid * while we have the cell. */ - strncpy(param->name, hc->name, sizeof(param->name)); + strlcpy(param->name, hc->name, sizeof(param->name)); if (hc->uuid) - strncpy(param->uuid, hc->uuid, sizeof(param->uuid)-1); + strlcpy(param->uuid, hc->uuid, sizeof(param->uuid)); else param->uuid[0] = '\0'; @@ -784,7 +839,7 @@ static int do_suspend(struct dm_ioctl *param) if (param->flags & DM_NOFLUSH_FLAG) suspend_flags |= DM_SUSPEND_NOFLUSH_FLAG; - if (!dm_suspended(md)) + if (!dm_suspended_md(md)) r = dm_suspend(md, suspend_flags); if (!r) @@ -800,7 +855,7 @@ static int do_resume(struct dm_ioctl *param) unsigned suspend_flags = DM_SUSPEND_LOCKFS_FLAG; struct hash_cell *hc; struct mapped_device *md; - struct dm_table *new_map; + struct dm_table *new_map, *old_map = NULL; down_write(&_hash_lock); @@ -826,14 +881,14 @@ static int do_resume(struct dm_ioctl *param) suspend_flags &= ~DM_SUSPEND_LOCKFS_FLAG; if (param->flags & DM_NOFLUSH_FLAG) suspend_flags |= DM_SUSPEND_NOFLUSH_FLAG; - if (!dm_suspended(md)) + if (!dm_suspended_md(md)) dm_suspend(md, suspend_flags); - r = dm_swap_table(md, new_map); - if (r) { + old_map = dm_swap_table(md, new_map); + if (IS_ERR(old_map)) { dm_table_destroy(new_map); dm_put(md); - return r; + return PTR_ERR(old_map); } if (dm_table_get_mode(new_map) & FMODE_WRITE) @@ -842,9 +897,11 @@ static int do_resume(struct dm_ioctl *param) set_disk_ro(dm_disk(md), 1); } - if (dm_suspended(md)) + if (dm_suspended_md(md)) r = dm_resume(md); + if (old_map) + dm_table_destroy(old_map); if (!r) { dm_kobject_uevent(md, KOBJ_CHANGE, param->event_nr); @@ -982,7 +1039,7 @@ static int dev_wait(struct dm_ioctl *param, size_t param_size) if (r) goto out; - table = dm_get_table(md); + table = dm_get_live_or_inactive_table(md, param); if (table) { retrieve_status(table, param, param_size); dm_table_put(table); @@ -1215,7 +1272,7 @@ static int table_deps(struct dm_ioctl *param, size_t param_size) if (r) goto out; - table = dm_get_table(md); + table = dm_get_live_or_inactive_table(md, param); if (table) { retrieve_deps(table, param, param_size); dm_table_put(table); @@ -1244,13 +1301,13 @@ static int table_status(struct dm_ioctl *param, size_t param_size) if (r) goto out; - table = dm_get_table(md); + table = dm_get_live_or_inactive_table(md, param); if (table) { retrieve_status(table, param, param_size); dm_table_put(table); } - out: +out: dm_put(md); return r; } @@ -1288,10 +1345,15 @@ static int target_message(struct dm_ioctl *param, size_t param_size) goto out; } - table = dm_get_table(md); + table = dm_get_live_table(md); if (!table) goto out_argv; + if (dm_deleting_md(md)) { + r = -ENXIO; + goto out_table; + } + ti = dm_table_find_target(table, tmsg->sector); if (!dm_target_is_valid(ti)) { DMWARN("Target message sector outside device."); @@ -1303,6 +1365,7 @@ static int target_message(struct dm_ioctl *param, size_t param_size) r = -EINVAL; } + out_table: dm_table_put(table); out_argv: kfree(argv); @@ -1582,8 +1645,7 @@ int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid) if (!md) return -ENXIO; - dm_get(md); - down_read(&_hash_lock); + mutex_lock(&dm_hash_cells_mutex); hc = dm_get_mdptr(md); if (!hc || hc->md != md) { r = -ENXIO; @@ -1596,8 +1658,7 @@ int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid) strcpy(uuid, hc->uuid ? : ""); out: - up_read(&_hash_lock); - dm_put(md); + mutex_unlock(&dm_hash_cells_mutex); return r; } diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c index 3e3fc06cb86..addf8347504 100644 --- a/drivers/md/dm-kcopyd.c +++ b/drivers/md/dm-kcopyd.c @@ -450,7 +450,10 @@ static void dispatch_job(struct kcopyd_job *job) { struct dm_kcopyd_client *kc = job->kc; atomic_inc(&kc->nr_jobs); - push(&kc->pages_jobs, job); + if (unlikely(!job->source.count)) + push(&kc->complete_jobs, job); + else + push(&kc->pages_jobs, job); wake(kc); } diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c index 9443896ede0..7035582786f 100644 --- a/drivers/md/dm-log.c +++ b/drivers/md/dm-log.c @@ -145,8 +145,9 @@ int dm_dirty_log_type_unregister(struct dm_dirty_log_type *type) EXPORT_SYMBOL(dm_dirty_log_type_unregister); struct dm_dirty_log *dm_dirty_log_create(const char *type_name, - struct dm_target *ti, - unsigned int argc, char **argv) + struct dm_target *ti, + int (*flush_callback_fn)(struct dm_target *ti), + unsigned int argc, char **argv) { struct dm_dirty_log_type *type; struct dm_dirty_log *log; @@ -161,6 +162,7 @@ struct dm_dirty_log *dm_dirty_log_create(const char *type_name, return NULL; } + log->flush_callback_fn = flush_callback_fn; log->type = type; if (type->ctr(log, ti, argc, argv)) { kfree(log); @@ -208,7 +210,9 @@ struct log_header { struct log_c { struct dm_target *ti; - int touched; + int touched_dirtied; + int touched_cleaned; + int flush_failed; uint32_t region_size; unsigned int region_count; region_t sync_count; @@ -233,6 +237,7 @@ struct log_c { * Disk log fields */ int log_dev_failed; + int log_dev_flush_failed; struct dm_dev *log_dev; struct log_header header; @@ -253,14 +258,14 @@ static inline void log_set_bit(struct log_c *l, uint32_t *bs, unsigned bit) { ext2_set_bit(bit, (unsigned long *) bs); - l->touched = 1; + l->touched_cleaned = 1; } static inline void log_clear_bit(struct log_c *l, uint32_t *bs, unsigned bit) { ext2_clear_bit(bit, (unsigned long *) bs); - l->touched = 1; + l->touched_dirtied = 1; } /*---------------------------------------------------------------- @@ -287,6 +292,19 @@ static int rw_header(struct log_c *lc, int rw) return dm_io(&lc->io_req, 1, &lc->header_location, NULL); } +static int flush_header(struct log_c *lc) +{ + struct dm_io_region null_location = { + .bdev = lc->header_location.bdev, + .sector = 0, + .count = 0, + }; + + lc->io_req.bi_rw = WRITE_BARRIER; + + return dm_io(&lc->io_req, 1, &null_location, NULL); +} + static int read_header(struct log_c *log) { int r; @@ -378,7 +396,9 @@ static int create_log_context(struct dm_dirty_log *log, struct dm_target *ti, } lc->ti = ti; - lc->touched = 0; + lc->touched_dirtied = 0; + lc->touched_cleaned = 0; + lc->flush_failed = 0; lc->region_size = region_size; lc->region_count = region_count; lc->sync = sync; @@ -406,6 +426,7 @@ static int create_log_context(struct dm_dirty_log *log, struct dm_target *ti, } else { lc->log_dev = dev; lc->log_dev_failed = 0; + lc->log_dev_flush_failed = 0; lc->header_location.bdev = lc->log_dev->bdev; lc->header_location.sector = 0; @@ -614,6 +635,11 @@ static int disk_resume(struct dm_dirty_log *log) /* write the new header */ r = rw_header(lc, WRITE); + if (!r) { + r = flush_header(lc); + if (r) + lc->log_dev_flush_failed = 1; + } if (r) { DMWARN("%s: Failed to write header on dirty region log device", lc->log_dev->name); @@ -656,18 +682,40 @@ static int core_flush(struct dm_dirty_log *log) static int disk_flush(struct dm_dirty_log *log) { - int r; - struct log_c *lc = (struct log_c *) log->context; + int r, i; + struct log_c *lc = log->context; /* only write if the log has changed */ - if (!lc->touched) + if (!lc->touched_cleaned && !lc->touched_dirtied) return 0; + if (lc->touched_cleaned && log->flush_callback_fn && + log->flush_callback_fn(lc->ti)) { + /* + * At this point it is impossible to determine which + * regions are clean and which are dirty (without + * re-reading the log off disk). So mark all of them + * dirty. + */ + lc->flush_failed = 1; + for (i = 0; i < lc->region_count; i++) + log_clear_bit(lc, lc->clean_bits, i); + } + r = rw_header(lc, WRITE); if (r) fail_log_device(lc); - else - lc->touched = 0; + else { + if (lc->touched_dirtied) { + r = flush_header(lc); + if (r) { + lc->log_dev_flush_failed = 1; + fail_log_device(lc); + } else + lc->touched_dirtied = 0; + } + lc->touched_cleaned = 0; + } return r; } @@ -681,7 +729,8 @@ static void core_mark_region(struct dm_dirty_log *log, region_t region) static void core_clear_region(struct dm_dirty_log *log, region_t region) { struct log_c *lc = (struct log_c *) log->context; - log_set_bit(lc, lc->clean_bits, region); + if (likely(!lc->flush_failed)) + log_set_bit(lc, lc->clean_bits, region); } static int core_get_resync_work(struct dm_dirty_log *log, region_t *region) @@ -762,7 +811,9 @@ static int disk_status(struct dm_dirty_log *log, status_type_t status, switch(status) { case STATUSTYPE_INFO: DMEMIT("3 %s %s %c", log->type->name, lc->log_dev->name, - lc->log_dev_failed ? 'D' : 'A'); + lc->log_dev_flush_failed ? 'F' : + lc->log_dev_failed ? 'D' : + 'A'); break; case STATUSTYPE_TABLE: diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 32d0b878ecc..e81345a1d08 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -93,6 +93,10 @@ struct multipath { * can resubmit bios on error. */ mempool_t *mpio_pool; + + struct mutex work_mutex; + + unsigned suspended; /* Don't create new I/O internally when set. */ }; /* @@ -198,6 +202,7 @@ static struct multipath *alloc_multipath(struct dm_target *ti) m->queue_io = 1; INIT_WORK(&m->process_queued_ios, process_queued_ios); INIT_WORK(&m->trigger_event, trigger_event); + mutex_init(&m->work_mutex); m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache); if (!m->mpio_pool) { kfree(m); @@ -885,13 +890,18 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc, return r; } -static void multipath_dtr(struct dm_target *ti) +static void flush_multipath_work(void) { - struct multipath *m = (struct multipath *) ti->private; - flush_workqueue(kmpath_handlerd); flush_workqueue(kmultipathd); flush_scheduled_work(); +} + +static void multipath_dtr(struct dm_target *ti) +{ + struct multipath *m = ti->private; + + flush_multipath_work(); free_multipath(m); } @@ -1116,8 +1126,9 @@ static int pg_init_limit_reached(struct multipath *m, struct pgpath *pgpath) return limit_reached; } -static void pg_init_done(struct dm_path *path, int errors) +static void pg_init_done(void *data, int errors) { + struct dm_path *path = data; struct pgpath *pgpath = path_to_pgpath(path); struct priority_group *pg = pgpath->pg; struct multipath *m = pg->m; @@ -1183,12 +1194,11 @@ static void pg_init_done(struct dm_path *path, int errors) static void activate_path(struct work_struct *work) { - int ret; struct pgpath *pgpath = container_of(work, struct pgpath, activate_path); - ret = scsi_dh_activate(bdev_get_queue(pgpath->path.dev->bdev)); - pg_init_done(&pgpath->path, ret); + scsi_dh_activate(bdev_get_queue(pgpath->path.dev->bdev), + pg_init_done, &pgpath->path); } /* @@ -1261,6 +1271,16 @@ static void multipath_presuspend(struct dm_target *ti) queue_if_no_path(m, 0, 1); } +static void multipath_postsuspend(struct dm_target *ti) +{ + struct multipath *m = ti->private; + + mutex_lock(&m->work_mutex); + m->suspended = 1; + flush_multipath_work(); + mutex_unlock(&m->work_mutex); +} + /* * Restore the queue_if_no_path setting. */ @@ -1269,6 +1289,10 @@ static void multipath_resume(struct dm_target *ti) struct multipath *m = (struct multipath *) ti->private; unsigned long flags; + mutex_lock(&m->work_mutex); + m->suspended = 0; + mutex_unlock(&m->work_mutex); + spin_lock_irqsave(&m->lock, flags); m->queue_if_no_path = m->saved_queue_if_no_path; spin_unlock_irqrestore(&m->lock, flags); @@ -1397,51 +1421,71 @@ static int multipath_status(struct dm_target *ti, status_type_t type, static int multipath_message(struct dm_target *ti, unsigned argc, char **argv) { - int r; + int r = -EINVAL; struct dm_dev *dev; struct multipath *m = (struct multipath *) ti->private; action_fn action; + mutex_lock(&m->work_mutex); + + if (m->suspended) { + r = -EBUSY; + goto out; + } + + if (dm_suspended(ti)) { + r = -EBUSY; + goto out; + } + if (argc == 1) { - if (!strnicmp(argv[0], MESG_STR("queue_if_no_path"))) - return queue_if_no_path(m, 1, 0); - else if (!strnicmp(argv[0], MESG_STR("fail_if_no_path"))) - return queue_if_no_path(m, 0, 0); + if (!strnicmp(argv[0], MESG_STR("queue_if_no_path"))) { + r = queue_if_no_path(m, 1, 0); + goto out; + } else if (!strnicmp(argv[0], MESG_STR("fail_if_no_path"))) { + r = queue_if_no_path(m, 0, 0); + goto out; + } } - if (argc != 2) - goto error; + if (argc != 2) { + DMWARN("Unrecognised multipath message received."); + goto out; + } - if (!strnicmp(argv[0], MESG_STR("disable_group"))) - return bypass_pg_num(m, argv[1], 1); - else if (!strnicmp(argv[0], MESG_STR("enable_group"))) - return bypass_pg_num(m, argv[1], 0); - else if (!strnicmp(argv[0], MESG_STR("switch_group"))) - return switch_pg_num(m, argv[1]); - else if (!strnicmp(argv[0], MESG_STR("reinstate_path"))) + if (!strnicmp(argv[0], MESG_STR("disable_group"))) { + r = bypass_pg_num(m, argv[1], 1); + goto out; + } else if (!strnicmp(argv[0], MESG_STR("enable_group"))) { + r = bypass_pg_num(m, argv[1], 0); + goto out; + } else if (!strnicmp(argv[0], MESG_STR("switch_group"))) { + r = switch_pg_num(m, argv[1]); + goto out; + } else if (!strnicmp(argv[0], MESG_STR("reinstate_path"))) action = reinstate_path; else if (!strnicmp(argv[0], MESG_STR("fail_path"))) action = fail_path; - else - goto error; + else { + DMWARN("Unrecognised multipath message received."); + goto out; + } r = dm_get_device(ti, argv[1], ti->begin, ti->len, dm_table_get_mode(ti->table), &dev); if (r) { DMWARN("message: error getting device %s", argv[1]); - return -EINVAL; + goto out; } r = action_dev(m, dev, action); dm_put_device(ti, dev); +out: + mutex_unlock(&m->work_mutex); return r; - -error: - DMWARN("Unrecognised multipath message received."); - return -EINVAL; } static int multipath_ioctl(struct dm_target *ti, unsigned int cmd, @@ -1567,13 +1611,14 @@ out: *---------------------------------------------------------------*/ static struct target_type multipath_target = { .name = "multipath", - .version = {1, 1, 0}, + .version = {1, 1, 1}, .module = THIS_MODULE, .ctr = multipath_ctr, .dtr = multipath_dtr, .map_rq = multipath_map, .rq_end_io = multipath_end_io, .presuspend = multipath_presuspend, + .postsuspend = multipath_postsuspend, .resume = multipath_resume, .status = multipath_status, .message = multipath_message, diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index cc9dc79b078..ad779bd13ae 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c @@ -35,6 +35,7 @@ static DECLARE_WAIT_QUEUE_HEAD(_kmirrord_recovery_stopped); *---------------------------------------------------------------*/ enum dm_raid1_error { DM_RAID1_WRITE_ERROR, + DM_RAID1_FLUSH_ERROR, DM_RAID1_SYNC_ERROR, DM_RAID1_READ_ERROR }; @@ -57,6 +58,7 @@ struct mirror_set { struct bio_list reads; struct bio_list writes; struct bio_list failures; + struct bio_list holds; /* bios are waiting until suspend */ struct dm_region_hash *rh; struct dm_kcopyd_client *kcopyd_client; @@ -67,6 +69,7 @@ struct mirror_set { region_t nr_regions; int in_sync; int log_failure; + int leg_failure; atomic_t suspend; atomic_t default_mirror; /* Default mirror */ @@ -179,6 +182,17 @@ static void set_default_mirror(struct mirror *m) atomic_set(&ms->default_mirror, m - m0); } +static struct mirror *get_valid_mirror(struct mirror_set *ms) +{ + struct mirror *m; + + for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++) + if (!atomic_read(&m->error_count)) + return m; + + return NULL; +} + /* fail_mirror * @m: mirror device to fail * @error_type: one of the enum's, DM_RAID1_*_ERROR @@ -198,6 +212,8 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type) struct mirror_set *ms = m->ms; struct mirror *new; + ms->leg_failure = 1; + /* * error_count is used for nothing more than a * simple way to tell if a device has encountered @@ -224,19 +240,50 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type) goto out; } - for (new = ms->mirror; new < ms->mirror + ms->nr_mirrors; new++) - if (!atomic_read(&new->error_count)) { - set_default_mirror(new); - break; - } - - if (unlikely(new == ms->mirror + ms->nr_mirrors)) + new = get_valid_mirror(ms); + if (new) + set_default_mirror(new); + else DMWARN("All sides of mirror have failed."); out: schedule_work(&ms->trigger_event); } +static int mirror_flush(struct dm_target *ti) +{ + struct mirror_set *ms = ti->private; + unsigned long error_bits; + + unsigned int i; + struct dm_io_region io[ms->nr_mirrors]; + struct mirror *m; + struct dm_io_request io_req = { + .bi_rw = WRITE_BARRIER, + .mem.type = DM_IO_KMEM, + .mem.ptr.bvec = NULL, + .client = ms->io_client, + }; + + for (i = 0, m = ms->mirror; i < ms->nr_mirrors; i++, m++) { + io[i].bdev = m->dev->bdev; + io[i].sector = 0; + io[i].count = 0; + } + + error_bits = -1; + dm_io(&io_req, ms->nr_mirrors, io, &error_bits); + if (unlikely(error_bits != 0)) { + for (i = 0; i < ms->nr_mirrors; i++) + if (test_bit(i, &error_bits)) + fail_mirror(ms->mirror + i, + DM_RAID1_FLUSH_ERROR); + return -EIO; + } + + return 0; +} + /*----------------------------------------------------------------- * Recovery. * @@ -396,6 +443,8 @@ static int mirror_available(struct mirror_set *ms, struct bio *bio) */ static sector_t map_sector(struct mirror *m, struct bio *bio) { + if (unlikely(!bio->bi_size)) + return 0; return m->offset + (bio->bi_sector - m->ms->ti->begin); } @@ -413,6 +462,27 @@ static void map_region(struct dm_io_region *io, struct mirror *m, io->count = bio->bi_size >> 9; } +static void hold_bio(struct mirror_set *ms, struct bio *bio) +{ + /* + * If device is suspended, complete the bio. + */ + if (atomic_read(&ms->suspend)) { + if (dm_noflush_suspending(ms->ti)) + bio_endio(bio, DM_ENDIO_REQUEUE); + else + bio_endio(bio, -EIO); + return; + } + + /* + * Hold bio until the suspend is complete. + */ + spin_lock_irq(&ms->lock); + bio_list_add(&ms->holds, bio); + spin_unlock_irq(&ms->lock); +} + /*----------------------------------------------------------------- * Reads *---------------------------------------------------------------*/ @@ -511,7 +581,6 @@ static void write_callback(unsigned long error, void *context) unsigned i, ret = 0; struct bio *bio = (struct bio *) context; struct mirror_set *ms; - int uptodate = 0; int should_wake = 0; unsigned long flags; @@ -524,36 +593,27 @@ static void write_callback(unsigned long error, void *context) * This way we handle both writes to SYNC and NOSYNC * regions with the same code. */ - if (likely(!error)) - goto out; + if (likely(!error)) { + bio_endio(bio, ret); + return; + } for (i = 0; i < ms->nr_mirrors; i++) if (test_bit(i, &error)) fail_mirror(ms->mirror + i, DM_RAID1_WRITE_ERROR); - else - uptodate = 1; - if (unlikely(!uptodate)) { - DMERR("All replicated volumes dead, failing I/O"); - /* None of the writes succeeded, fail the I/O. */ - ret = -EIO; - } else if (errors_handled(ms)) { - /* - * Need to raise event. Since raising - * events can block, we need to do it in - * the main thread. - */ - spin_lock_irqsave(&ms->lock, flags); - if (!ms->failures.head) - should_wake = 1; - bio_list_add(&ms->failures, bio); - spin_unlock_irqrestore(&ms->lock, flags); - if (should_wake) - wakeup_mirrord(ms); - return; - } -out: - bio_endio(bio, ret); + /* + * Need to raise event. Since raising + * events can block, we need to do it in + * the main thread. + */ + spin_lock_irqsave(&ms->lock, flags); + if (!ms->failures.head) + should_wake = 1; + bio_list_add(&ms->failures, bio); + spin_unlock_irqrestore(&ms->lock, flags); + if (should_wake) + wakeup_mirrord(ms); } static void do_write(struct mirror_set *ms, struct bio *bio) @@ -562,7 +622,7 @@ static void do_write(struct mirror_set *ms, struct bio *bio) struct dm_io_region io[ms->nr_mirrors], *dest = io; struct mirror *m; struct dm_io_request io_req = { - .bi_rw = WRITE, + .bi_rw = WRITE | (bio->bi_rw & WRITE_BARRIER), .mem.type = DM_IO_BVEC, .mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx, .notify.fn = write_callback, @@ -603,6 +663,11 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes) bio_list_init(&requeue); while ((bio = bio_list_pop(writes))) { + if (unlikely(bio_empty_barrier(bio))) { + bio_list_add(&sync, bio); + continue; + } + region = dm_rh_bio_to_region(ms->rh, bio); if (log->type->is_remote_recovering && @@ -672,8 +737,12 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes) dm_rh_delay(ms->rh, bio); while ((bio = bio_list_pop(&nosync))) { - map_bio(get_default_mirror(ms), bio); - generic_make_request(bio); + if (unlikely(ms->leg_failure) && errors_handled(ms)) + hold_bio(ms, bio); + else { + map_bio(get_default_mirror(ms), bio); + generic_make_request(bio); + } } } @@ -681,20 +750,12 @@ static void do_failures(struct mirror_set *ms, struct bio_list *failures) { struct bio *bio; - if (!failures->head) - return; - - if (!ms->log_failure) { - while ((bio = bio_list_pop(failures))) { - ms->in_sync = 0; - dm_rh_mark_nosync(ms->rh, bio, bio->bi_size, 0); - } + if (likely(!failures->head)) return; - } /* * If the log has failed, unattempted writes are being - * put on the failures list. We can't issue those writes + * put on the holds list. We can't issue those writes * until a log has been marked, so we must store them. * * If a 'noflush' suspend is in progress, we can requeue @@ -709,23 +770,27 @@ static void do_failures(struct mirror_set *ms, struct bio_list *failures) * for us to treat them the same and requeue them * as well. */ - if (dm_noflush_suspending(ms->ti)) { - while ((bio = bio_list_pop(failures))) - bio_endio(bio, DM_ENDIO_REQUEUE); - return; - } + while ((bio = bio_list_pop(failures))) { + if (!ms->log_failure) { + ms->in_sync = 0; + dm_rh_mark_nosync(ms->rh, bio); + } - if (atomic_read(&ms->suspend)) { - while ((bio = bio_list_pop(failures))) + /* + * If all the legs are dead, fail the I/O. + * If we have been told to handle errors, hold the bio + * and wait for userspace to deal with the problem. + * Otherwise pretend that the I/O succeeded. (This would + * be wrong if the failed leg returned after reboot and + * got replicated back to the good legs.) + */ + if (!get_valid_mirror(ms)) bio_endio(bio, -EIO); - return; + else if (errors_handled(ms)) + hold_bio(ms, bio); + else + bio_endio(bio, 0); } - - spin_lock_irq(&ms->lock); - bio_list_merge(&ms->failures, failures); - spin_unlock_irq(&ms->lock); - - delayed_wake(ms); } static void trigger_event(struct work_struct *work) @@ -784,12 +849,17 @@ static struct mirror_set *alloc_context(unsigned int nr_mirrors, } spin_lock_init(&ms->lock); + bio_list_init(&ms->reads); + bio_list_init(&ms->writes); + bio_list_init(&ms->failures); + bio_list_init(&ms->holds); ms->ti = ti; ms->nr_mirrors = nr_mirrors; ms->nr_regions = dm_sector_div_up(ti->len, region_size); ms->in_sync = 0; ms->log_failure = 0; + ms->leg_failure = 0; atomic_set(&ms->suspend, 0); atomic_set(&ms->default_mirror, DEFAULT_MIRROR); @@ -889,7 +959,8 @@ static struct dm_dirty_log *create_dirty_log(struct dm_target *ti, return NULL; } - dl = dm_dirty_log_create(argv[0], ti, param_count, argv + 2); + dl = dm_dirty_log_create(argv[0], ti, mirror_flush, param_count, + argv + 2); if (!dl) { ti->error = "Error creating mirror dirty log"; return NULL; @@ -995,6 +1066,7 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv) ti->private = ms; ti->split_io = dm_rh_get_region_size(ms->rh); + ti->num_flush_requests = 1; ms->kmirrord_wq = create_singlethread_workqueue("kmirrord"); if (!ms->kmirrord_wq) { @@ -1122,7 +1194,8 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, * We need to dec pending if this was a write. */ if (rw == WRITE) { - dm_rh_dec(ms->rh, map_context->ll); + if (likely(!bio_empty_barrier(bio))) + dm_rh_dec(ms->rh, map_context->ll); return error; } @@ -1180,6 +1253,9 @@ static void mirror_presuspend(struct dm_target *ti) struct mirror_set *ms = (struct mirror_set *) ti->private; struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); + struct bio_list holds; + struct bio *bio; + atomic_set(&ms->suspend, 1); /* @@ -1202,6 +1278,22 @@ static void mirror_presuspend(struct dm_target *ti) * we know that all of our I/O has been pushed. */ flush_workqueue(ms->kmirrord_wq); + + /* + * Now set ms->suspend is set and the workqueue flushed, no more + * entries can be added to ms->hold list, so process it. + * + * Bios can still arrive concurrently with or after this + * presuspend function, but they cannot join the hold list + * because ms->suspend is set. + */ + spin_lock_irq(&ms->lock); + holds = ms->holds; + bio_list_init(&ms->holds); + spin_unlock_irq(&ms->lock); + + while ((bio = bio_list_pop(&holds))) + hold_bio(ms, bio); } static void mirror_postsuspend(struct dm_target *ti) @@ -1244,7 +1336,8 @@ static char device_status_char(struct mirror *m) if (!atomic_read(&(m->error_count))) return 'A'; - return (test_bit(DM_RAID1_WRITE_ERROR, &(m->error_type))) ? 'D' : + return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' : + (test_bit(DM_RAID1_WRITE_ERROR, &(m->error_type))) ? 'D' : (test_bit(DM_RAID1_SYNC_ERROR, &(m->error_type))) ? 'S' : (test_bit(DM_RAID1_READ_ERROR, &(m->error_type))) ? 'R' : 'U'; } diff --git a/drivers/md/dm-region-hash.c b/drivers/md/dm-region-hash.c index 36dbe29f2fd..5f19ceb6fe9 100644 --- a/drivers/md/dm-region-hash.c +++ b/drivers/md/dm-region-hash.c @@ -79,6 +79,11 @@ struct dm_region_hash { struct list_head recovered_regions; struct list_head failed_recovered_regions; + /* + * If there was a barrier failure no regions can be marked clean. + */ + int barrier_failure; + void *context; sector_t target_begin; @@ -211,6 +216,7 @@ struct dm_region_hash *dm_region_hash_create( INIT_LIST_HEAD(&rh->quiesced_regions); INIT_LIST_HEAD(&rh->recovered_regions); INIT_LIST_HEAD(&rh->failed_recovered_regions); + rh->barrier_failure = 0; rh->region_pool = mempool_create_kmalloc_pool(MIN_REGIONS, sizeof(struct dm_region)); @@ -377,8 +383,6 @@ static void complete_resync_work(struct dm_region *reg, int success) /* dm_rh_mark_nosync * @ms * @bio - * @done - * @error * * The bio was written on some mirror(s) but failed on other mirror(s). * We can successfully endio the bio but should avoid the region being @@ -386,8 +390,7 @@ static void complete_resync_work(struct dm_region *reg, int success) * * This function is _not_ safe in interrupt context! */ -void dm_rh_mark_nosync(struct dm_region_hash *rh, - struct bio *bio, unsigned done, int error) +void dm_rh_mark_nosync(struct dm_region_hash *rh, struct bio *bio) { unsigned long flags; struct dm_dirty_log *log = rh->log; @@ -395,6 +398,11 @@ void dm_rh_mark_nosync(struct dm_region_hash *rh, region_t region = dm_rh_bio_to_region(rh, bio); int recovering = 0; + if (bio_empty_barrier(bio)) { + rh->barrier_failure = 1; + return; + } + /* We must inform the log that the sync count has changed. */ log->type->set_region_sync(log, region, 0); @@ -419,7 +427,6 @@ void dm_rh_mark_nosync(struct dm_region_hash *rh, BUG_ON(!list_empty(®->list)); spin_unlock_irqrestore(&rh->region_lock, flags); - bio_endio(bio, error); if (recovering) complete_resync_work(reg, 0); } @@ -515,8 +522,11 @@ void dm_rh_inc_pending(struct dm_region_hash *rh, struct bio_list *bios) { struct bio *bio; - for (bio = bios->head; bio; bio = bio->bi_next) + for (bio = bios->head; bio; bio = bio->bi_next) { + if (bio_empty_barrier(bio)) + continue; rh_inc(rh, dm_rh_bio_to_region(rh, bio)); + } } EXPORT_SYMBOL_GPL(dm_rh_inc_pending); @@ -544,7 +554,14 @@ void dm_rh_dec(struct dm_region_hash *rh, region_t region) */ /* do nothing for DM_RH_NOSYNC */ - if (reg->state == DM_RH_RECOVERING) { + if (unlikely(rh->barrier_failure)) { + /* + * If a write barrier failed some time ago, we + * don't know whether or not this write made it + * to the disk, so we must resync the device. + */ + reg->state = DM_RH_NOSYNC; + } else if (reg->state == DM_RH_RECOVERING) { list_add_tail(®->list, &rh->quiesced_regions); } else if (reg->state == DM_RH_DIRTY) { reg->state = DM_RH_CLEAN; diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c index 0c746420c00..7d08879689a 100644 --- a/drivers/md/dm-snap-persistent.c +++ b/drivers/md/dm-snap-persistent.c @@ -55,6 +55,8 @@ */ #define SNAPSHOT_DISK_VERSION 1 +#define NUM_SNAPSHOT_HDR_CHUNKS 1 + struct disk_header { uint32_t magic; @@ -120,7 +122,22 @@ struct pstore { /* * The next free chunk for an exception. + * + * When creating exceptions, all the chunks here and above are + * free. It holds the next chunk to be allocated. On rare + * occasions (e.g. after a system crash) holes can be left in + * the exception store because chunks can be committed out of + * order. + * + * When merging exceptions, it does not necessarily mean all the + * chunks here and above are free. It holds the value it would + * have held if all chunks had been committed in order of + * allocation. Consequently the value may occasionally be + * slightly too low, but since it's only used for 'status' and + * it can never reach its minimum value too early this doesn't + * matter. */ + chunk_t next_free; /* @@ -214,7 +231,7 @@ static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int rw, int metadata) { struct dm_io_region where = { - .bdev = ps->store->cow->bdev, + .bdev = dm_snap_cow(ps->store->snap)->bdev, .sector = ps->store->chunk_size * chunk, .count = ps->store->chunk_size, }; @@ -294,7 +311,8 @@ static int read_header(struct pstore *ps, int *new_snapshot) */ if (!ps->store->chunk_size) { ps->store->chunk_size = max(DM_CHUNK_SIZE_DEFAULT_SECTORS, - bdev_logical_block_size(ps->store->cow->bdev) >> 9); + bdev_logical_block_size(dm_snap_cow(ps->store->snap)-> + bdev) >> 9); ps->store->chunk_mask = ps->store->chunk_size - 1; ps->store->chunk_shift = ffs(ps->store->chunk_size) - 1; chunk_size_supplied = 0; @@ -408,6 +426,15 @@ static void write_exception(struct pstore *ps, e->new_chunk = cpu_to_le64(de->new_chunk); } +static void clear_exception(struct pstore *ps, uint32_t index) +{ + struct disk_exception *e = get_exception(ps, index); + + /* clear it */ + e->old_chunk = 0; + e->new_chunk = 0; +} + /* * Registers the exceptions that are present in the current area. * 'full' is filled in to indicate if the area has been @@ -489,11 +516,23 @@ static struct pstore *get_info(struct dm_exception_store *store) return (struct pstore *) store->context; } -static void persistent_fraction_full(struct dm_exception_store *store, - sector_t *numerator, sector_t *denominator) +static void persistent_usage(struct dm_exception_store *store, + sector_t *total_sectors, + sector_t *sectors_allocated, + sector_t *metadata_sectors) { - *numerator = get_info(store)->next_free * store->chunk_size; - *denominator = get_dev_size(store->cow->bdev); + struct pstore *ps = get_info(store); + + *sectors_allocated = ps->next_free * store->chunk_size; + *total_sectors = get_dev_size(dm_snap_cow(store->snap)->bdev); + + /* + * First chunk is the fixed header. + * Then there are (ps->current_area + 1) metadata chunks, each one + * separated from the next by ps->exceptions_per_area data chunks. + */ + *metadata_sectors = (ps->current_area + 1 + NUM_SNAPSHOT_HDR_CHUNKS) * + store->chunk_size; } static void persistent_dtr(struct dm_exception_store *store) @@ -552,44 +591,40 @@ static int persistent_read_metadata(struct dm_exception_store *store, ps->current_area = 0; zero_memory_area(ps); r = zero_disk_area(ps, 0); - if (r) { + if (r) DMWARN("zero_disk_area(0) failed"); - return r; - } - } else { - /* - * Sanity checks. - */ - if (ps->version != SNAPSHOT_DISK_VERSION) { - DMWARN("unable to handle snapshot disk version %d", - ps->version); - return -EINVAL; - } + return r; + } + /* + * Sanity checks. + */ + if (ps->version != SNAPSHOT_DISK_VERSION) { + DMWARN("unable to handle snapshot disk version %d", + ps->version); + return -EINVAL; + } - /* - * Metadata are valid, but snapshot is invalidated - */ - if (!ps->valid) - return 1; + /* + * Metadata are valid, but snapshot is invalidated + */ + if (!ps->valid) + return 1; - /* - * Read the metadata. - */ - r = read_exceptions(ps, callback, callback_context); - if (r) - return r; - } + /* + * Read the metadata. + */ + r = read_exceptions(ps, callback, callback_context); - return 0; + return r; } static int persistent_prepare_exception(struct dm_exception_store *store, - struct dm_snap_exception *e) + struct dm_exception *e) { struct pstore *ps = get_info(store); uint32_t stride; chunk_t next_free; - sector_t size = get_dev_size(store->cow->bdev); + sector_t size = get_dev_size(dm_snap_cow(store->snap)->bdev); /* Is there enough room ? */ if (size < ((ps->next_free + 1) * store->chunk_size)) @@ -611,7 +646,7 @@ static int persistent_prepare_exception(struct dm_exception_store *store, } static void persistent_commit_exception(struct dm_exception_store *store, - struct dm_snap_exception *e, + struct dm_exception *e, void (*callback) (void *, int success), void *callback_context) { @@ -672,6 +707,85 @@ static void persistent_commit_exception(struct dm_exception_store *store, ps->callback_count = 0; } +static int persistent_prepare_merge(struct dm_exception_store *store, + chunk_t *last_old_chunk, + chunk_t *last_new_chunk) +{ + struct pstore *ps = get_info(store); + struct disk_exception de; + int nr_consecutive; + int r; + + /* + * When current area is empty, move back to preceding area. + */ + if (!ps->current_committed) { + /* + * Have we finished? + */ + if (!ps->current_area) + return 0; + + ps->current_area--; + r = area_io(ps, READ); + if (r < 0) + return r; + ps->current_committed = ps->exceptions_per_area; + } + + read_exception(ps, ps->current_committed - 1, &de); + *last_old_chunk = de.old_chunk; + *last_new_chunk = de.new_chunk; + + /* + * Find number of consecutive chunks within the current area, + * working backwards. + */ + for (nr_consecutive = 1; nr_consecutive < ps->current_committed; + nr_consecutive++) { + read_exception(ps, ps->current_committed - 1 - nr_consecutive, + &de); + if (de.old_chunk != *last_old_chunk - nr_consecutive || + de.new_chunk != *last_new_chunk - nr_consecutive) + break; + } + + return nr_consecutive; +} + +static int persistent_commit_merge(struct dm_exception_store *store, + int nr_merged) +{ + int r, i; + struct pstore *ps = get_info(store); + + BUG_ON(nr_merged > ps->current_committed); + + for (i = 0; i < nr_merged; i++) + clear_exception(ps, ps->current_committed - 1 - i); + + r = area_io(ps, WRITE); + if (r < 0) + return r; + + ps->current_committed -= nr_merged; + + /* + * At this stage, only persistent_usage() uses ps->next_free, so + * we make no attempt to keep ps->next_free strictly accurate + * as exceptions may have been committed out-of-order originally. + * Once a snapshot has become merging, we set it to the value it + * would have held had all the exceptions been committed in order. + * + * ps->current_area does not get reduced by prepare_merge() until + * after commit_merge() has removed the nr_merged previous exceptions. + */ + ps->next_free = (area_location(ps, ps->current_area) - 1) + + (ps->current_committed + 1) + NUM_SNAPSHOT_HDR_CHUNKS; + + return 0; +} + static void persistent_drop_snapshot(struct dm_exception_store *store) { struct pstore *ps = get_info(store); @@ -697,7 +811,7 @@ static int persistent_ctr(struct dm_exception_store *store, ps->area = NULL; ps->zero_area = NULL; ps->header_area = NULL; - ps->next_free = 2; /* skipping the header and first area */ + ps->next_free = NUM_SNAPSHOT_HDR_CHUNKS + 1; /* header and 1st area */ ps->current_committed = 0; ps->callback_count = 0; @@ -726,8 +840,7 @@ static unsigned persistent_status(struct dm_exception_store *store, case STATUSTYPE_INFO: break; case STATUSTYPE_TABLE: - DMEMIT(" %s P %llu", store->cow->name, - (unsigned long long)store->chunk_size); + DMEMIT(" P %llu", (unsigned long long)store->chunk_size); } return sz; @@ -741,8 +854,10 @@ static struct dm_exception_store_type _persistent_type = { .read_metadata = persistent_read_metadata, .prepare_exception = persistent_prepare_exception, .commit_exception = persistent_commit_exception, + .prepare_merge = persistent_prepare_merge, + .commit_merge = persistent_commit_merge, .drop_snapshot = persistent_drop_snapshot, - .fraction_full = persistent_fraction_full, + .usage = persistent_usage, .status = persistent_status, }; @@ -754,8 +869,10 @@ static struct dm_exception_store_type _persistent_compat_type = { .read_metadata = persistent_read_metadata, .prepare_exception = persistent_prepare_exception, .commit_exception = persistent_commit_exception, + .prepare_merge = persistent_prepare_merge, + .commit_merge = persistent_commit_merge, .drop_snapshot = persistent_drop_snapshot, - .fraction_full = persistent_fraction_full, + .usage = persistent_usage, .status = persistent_status, }; diff --git a/drivers/md/dm-snap-transient.c b/drivers/md/dm-snap-transient.c index cde5aa558e6..a0898a66a2f 100644 --- a/drivers/md/dm-snap-transient.c +++ b/drivers/md/dm-snap-transient.c @@ -36,10 +36,10 @@ static int transient_read_metadata(struct dm_exception_store *store, } static int transient_prepare_exception(struct dm_exception_store *store, - struct dm_snap_exception *e) + struct dm_exception *e) { struct transient_c *tc = store->context; - sector_t size = get_dev_size(store->cow->bdev); + sector_t size = get_dev_size(dm_snap_cow(store->snap)->bdev); if (size < (tc->next_free + store->chunk_size)) return -1; @@ -51,7 +51,7 @@ static int transient_prepare_exception(struct dm_exception_store *store, } static void transient_commit_exception(struct dm_exception_store *store, - struct dm_snap_exception *e, + struct dm_exception *e, void (*callback) (void *, int success), void *callback_context) { @@ -59,11 +59,14 @@ static void transient_commit_exception(struct dm_exception_store *store, callback(callback_context, 1); } -static void transient_fraction_full(struct dm_exception_store *store, - sector_t *numerator, sector_t *denominator) +static void transient_usage(struct dm_exception_store *store, + sector_t *total_sectors, + sector_t *sectors_allocated, + sector_t *metadata_sectors) { - *numerator = ((struct transient_c *) store->context)->next_free; - *denominator = get_dev_size(store->cow->bdev); + *sectors_allocated = ((struct transient_c *) store->context)->next_free; + *total_sectors = get_dev_size(dm_snap_cow(store->snap)->bdev); + *metadata_sectors = 0; } static int transient_ctr(struct dm_exception_store *store, @@ -91,8 +94,7 @@ static unsigned transient_status(struct dm_exception_store *store, case STATUSTYPE_INFO: break; case STATUSTYPE_TABLE: - DMEMIT(" %s N %llu", store->cow->name, - (unsigned long long)store->chunk_size); + DMEMIT(" N %llu", (unsigned long long)store->chunk_size); } return sz; @@ -106,7 +108,7 @@ static struct dm_exception_store_type _transient_type = { .read_metadata = transient_read_metadata, .prepare_exception = transient_prepare_exception, .commit_exception = transient_commit_exception, - .fraction_full = transient_fraction_full, + .usage = transient_usage, .status = transient_status, }; @@ -118,7 +120,7 @@ static struct dm_exception_store_type _transient_compat_type = { .read_metadata = transient_read_metadata, .prepare_exception = transient_prepare_exception, .commit_exception = transient_commit_exception, - .fraction_full = transient_fraction_full, + .usage = transient_usage, .status = transient_status, }; diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index 3a3ba46e6d4..ee8eb283650 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c @@ -25,6 +25,11 @@ #define DM_MSG_PREFIX "snapshots" +static const char dm_snapshot_merge_target_name[] = "snapshot-merge"; + +#define dm_target_is_snapshot_merge(ti) \ + ((ti)->type->name == dm_snapshot_merge_target_name) + /* * The percentage increment we will wake up users at */ @@ -49,7 +54,7 @@ #define DM_TRACKED_CHUNK_HASH(x) ((unsigned long)(x) & \ (DM_TRACKED_CHUNK_HASH_SIZE - 1)) -struct exception_table { +struct dm_exception_table { uint32_t hash_mask; unsigned hash_shift; struct list_head *table; @@ -59,22 +64,31 @@ struct dm_snapshot { struct rw_semaphore lock; struct dm_dev *origin; + struct dm_dev *cow; + + struct dm_target *ti; /* List of snapshots per Origin */ struct list_head list; - /* You can't use a snapshot if this is 0 (e.g. if full) */ + /* + * You can't use a snapshot if this is 0 (e.g. if full). + * A snapshot-merge target never clears this. + */ int valid; /* Origin writes don't trigger exceptions until this is set */ int active; + /* Whether or not owning mapped_device is suspended */ + int suspended; + mempool_t *pending_pool; atomic_t pending_exceptions_count; - struct exception_table pending; - struct exception_table complete; + struct dm_exception_table pending; + struct dm_exception_table complete; /* * pe_lock protects all pending_exception operations and access @@ -95,8 +109,51 @@ struct dm_snapshot { mempool_t *tracked_chunk_pool; spinlock_t tracked_chunk_lock; struct hlist_head tracked_chunk_hash[DM_TRACKED_CHUNK_HASH_SIZE]; + + /* + * The merge operation failed if this flag is set. + * Failure modes are handled as follows: + * - I/O error reading the header + * => don't load the target; abort. + * - Header does not have "valid" flag set + * => use the origin; forget about the snapshot. + * - I/O error when reading exceptions + * => don't load the target; abort. + * (We can't use the intermediate origin state.) + * - I/O error while merging + * => stop merging; set merge_failed; process I/O normally. + */ + int merge_failed; + + /* Wait for events based on state_bits */ + unsigned long state_bits; + + /* Range of chunks currently being merged. */ + chunk_t first_merging_chunk; + int num_merging_chunks; + + /* + * Incoming bios that overlap with chunks being merged must wait + * for them to be committed. + */ + struct bio_list bios_queued_during_merge; }; +/* + * state_bits: + * RUNNING_MERGE - Merge operation is in progress. + * SHUTDOWN_MERGE - Set to signal that merge needs to be stopped; + * cleared afterwards. + */ +#define RUNNING_MERGE 0 +#define SHUTDOWN_MERGE 1 + +struct dm_dev *dm_snap_cow(struct dm_snapshot *s) +{ + return s->cow; +} +EXPORT_SYMBOL(dm_snap_cow); + static struct workqueue_struct *ksnapd; static void flush_queued_bios(struct work_struct *work); @@ -116,7 +173,7 @@ static int bdev_equal(struct block_device *lhs, struct block_device *rhs) } struct dm_snap_pending_exception { - struct dm_snap_exception e; + struct dm_exception e; /* * Origin buffers waiting for this to complete are held @@ -125,28 +182,6 @@ struct dm_snap_pending_exception { struct bio_list origin_bios; struct bio_list snapshot_bios; - /* - * Short-term queue of pending exceptions prior to submission. - */ - struct list_head list; - - /* - * The primary pending_exception is the one that holds - * the ref_count and the list of origin_bios for a - * group of pending_exceptions. It is always last to get freed. - * These fields get set up when writing to the origin. - */ - struct dm_snap_pending_exception *primary_pe; - - /* - * Number of pending_exceptions processing this chunk. - * When this drops to zero we must complete the origin bios. - * If incrementing or decrementing this, hold pe->snap->lock for - * the sibling concerned and not pe->primary_pe->snap->lock unless - * they are the same. - */ - atomic_t ref_count; - /* Pointer back to snapshot context */ struct dm_snapshot *snap; @@ -222,6 +257,16 @@ static int __chunk_is_tracked(struct dm_snapshot *s, chunk_t chunk) } /* + * This conflicting I/O is extremely improbable in the caller, + * so msleep(1) is sufficient and there is no need for a wait queue. + */ +static void __check_for_conflicting_io(struct dm_snapshot *s, chunk_t chunk) +{ + while (__chunk_is_tracked(s, chunk)) + msleep(1); +} + +/* * One of these per registered origin, held in the snapshot_origins hash */ struct origin { @@ -243,6 +288,10 @@ struct origin { static struct list_head *_origins; static struct rw_semaphore _origins_lock; +static DECLARE_WAIT_QUEUE_HEAD(_pending_exceptions_done); +static DEFINE_SPINLOCK(_pending_exceptions_done_spinlock); +static uint64_t _pending_exceptions_done_count; + static int init_origin_hash(void) { int i; @@ -291,22 +340,144 @@ static void __insert_origin(struct origin *o) } /* + * _origins_lock must be held when calling this function. + * Returns number of snapshots registered using the supplied cow device, plus: + * snap_src - a snapshot suitable for use as a source of exception handover + * snap_dest - a snapshot capable of receiving exception handover. + * snap_merge - an existing snapshot-merge target linked to the same origin. + * There can be at most one snapshot-merge target. The parameter is optional. + * + * Possible return values and states of snap_src and snap_dest. + * 0: NULL, NULL - first new snapshot + * 1: snap_src, NULL - normal snapshot + * 2: snap_src, snap_dest - waiting for handover + * 2: snap_src, NULL - handed over, waiting for old to be deleted + * 1: NULL, snap_dest - source got destroyed without handover + */ +static int __find_snapshots_sharing_cow(struct dm_snapshot *snap, + struct dm_snapshot **snap_src, + struct dm_snapshot **snap_dest, + struct dm_snapshot **snap_merge) +{ + struct dm_snapshot *s; + struct origin *o; + int count = 0; + int active; + + o = __lookup_origin(snap->origin->bdev); + if (!o) + goto out; + + list_for_each_entry(s, &o->snapshots, list) { + if (dm_target_is_snapshot_merge(s->ti) && snap_merge) + *snap_merge = s; + if (!bdev_equal(s->cow->bdev, snap->cow->bdev)) + continue; + + down_read(&s->lock); + active = s->active; + up_read(&s->lock); + + if (active) { + if (snap_src) + *snap_src = s; + } else if (snap_dest) + *snap_dest = s; + + count++; + } + +out: + return count; +} + +/* + * On success, returns 1 if this snapshot is a handover destination, + * otherwise returns 0. + */ +static int __validate_exception_handover(struct dm_snapshot *snap) +{ + struct dm_snapshot *snap_src = NULL, *snap_dest = NULL; + struct dm_snapshot *snap_merge = NULL; + + /* Does snapshot need exceptions handed over to it? */ + if ((__find_snapshots_sharing_cow(snap, &snap_src, &snap_dest, + &snap_merge) == 2) || + snap_dest) { + snap->ti->error = "Snapshot cow pairing for exception " + "table handover failed"; + return -EINVAL; + } + + /* + * If no snap_src was found, snap cannot become a handover + * destination. + */ + if (!snap_src) + return 0; + + /* + * Non-snapshot-merge handover? + */ + if (!dm_target_is_snapshot_merge(snap->ti)) + return 1; + + /* + * Do not allow more than one merging snapshot. + */ + if (snap_merge) { + snap->ti->error = "A snapshot is already merging."; + return -EINVAL; + } + + if (!snap_src->store->type->prepare_merge || + !snap_src->store->type->commit_merge) { + snap->ti->error = "Snapshot exception store does not " + "support snapshot-merge."; + return -EINVAL; + } + + return 1; +} + +static void __insert_snapshot(struct origin *o, struct dm_snapshot *s) +{ + struct dm_snapshot *l; + + /* Sort the list according to chunk size, largest-first smallest-last */ + list_for_each_entry(l, &o->snapshots, list) + if (l->store->chunk_size < s->store->chunk_size) + break; + list_add_tail(&s->list, &l->list); +} + +/* * Make a note of the snapshot and its origin so we can look it * up when the origin has a write on it. + * + * Also validate snapshot exception store handovers. + * On success, returns 1 if this registration is a handover destination, + * otherwise returns 0. */ static int register_snapshot(struct dm_snapshot *snap) { - struct dm_snapshot *l; - struct origin *o, *new_o; + struct origin *o, *new_o = NULL; struct block_device *bdev = snap->origin->bdev; + int r = 0; new_o = kmalloc(sizeof(*new_o), GFP_KERNEL); if (!new_o) return -ENOMEM; down_write(&_origins_lock); - o = __lookup_origin(bdev); + r = __validate_exception_handover(snap); + if (r < 0) { + kfree(new_o); + goto out; + } + + o = __lookup_origin(bdev); if (o) kfree(new_o); else { @@ -320,14 +491,27 @@ static int register_snapshot(struct dm_snapshot *snap) __insert_origin(o); } - /* Sort the list according to chunk size, largest-first smallest-last */ - list_for_each_entry(l, &o->snapshots, list) - if (l->store->chunk_size < snap->store->chunk_size) - break; - list_add_tail(&snap->list, &l->list); + __insert_snapshot(o, snap); + +out: + up_write(&_origins_lock); + + return r; +} + +/* + * Move snapshot to correct place in list according to chunk size. + */ +static void reregister_snapshot(struct dm_snapshot *s) +{ + struct block_device *bdev = s->origin->bdev; + + down_write(&_origins_lock); + + list_del(&s->list); + __insert_snapshot(__lookup_origin(bdev), s); up_write(&_origins_lock); - return 0; } static void unregister_snapshot(struct dm_snapshot *s) @@ -338,7 +522,7 @@ static void unregister_snapshot(struct dm_snapshot *s) o = __lookup_origin(s->origin->bdev); list_del(&s->list); - if (list_empty(&o->snapshots)) { + if (o && list_empty(&o->snapshots)) { list_del(&o->hash_list); kfree(o); } @@ -351,8 +535,8 @@ static void unregister_snapshot(struct dm_snapshot *s) * The lowest hash_shift bits of the chunk number are ignored, allowing * some consecutive chunks to be grouped together. */ -static int init_exception_table(struct exception_table *et, uint32_t size, - unsigned hash_shift) +static int dm_exception_table_init(struct dm_exception_table *et, + uint32_t size, unsigned hash_shift) { unsigned int i; @@ -368,10 +552,11 @@ static int init_exception_table(struct exception_table *et, uint32_t size, return 0; } -static void exit_exception_table(struct exception_table *et, struct kmem_cache *mem) +static void dm_exception_table_exit(struct dm_exception_table *et, + struct kmem_cache *mem) { struct list_head *slot; - struct dm_snap_exception *ex, *next; + struct dm_exception *ex, *next; int i, size; size = et->hash_mask + 1; @@ -385,19 +570,12 @@ static void exit_exception_table(struct exception_table *et, struct kmem_cache * vfree(et->table); } -static uint32_t exception_hash(struct exception_table *et, chunk_t chunk) +static uint32_t exception_hash(struct dm_exception_table *et, chunk_t chunk) { return (chunk >> et->hash_shift) & et->hash_mask; } -static void insert_exception(struct exception_table *eh, - struct dm_snap_exception *e) -{ - struct list_head *l = &eh->table[exception_hash(eh, e->old_chunk)]; - list_add(&e->hash_list, l); -} - -static void remove_exception(struct dm_snap_exception *e) +static void dm_remove_exception(struct dm_exception *e) { list_del(&e->hash_list); } @@ -406,11 +584,11 @@ static void remove_exception(struct dm_snap_exception *e) * Return the exception data for a sector, or NULL if not * remapped. */ -static struct dm_snap_exception *lookup_exception(struct exception_table *et, - chunk_t chunk) +static struct dm_exception *dm_lookup_exception(struct dm_exception_table *et, + chunk_t chunk) { struct list_head *slot; - struct dm_snap_exception *e; + struct dm_exception *e; slot = &et->table[exception_hash(et, chunk)]; list_for_each_entry (e, slot, hash_list) @@ -421,9 +599,9 @@ static struct dm_snap_exception *lookup_exception(struct exception_table *et, return NULL; } -static struct dm_snap_exception *alloc_exception(void) +static struct dm_exception *alloc_completed_exception(void) { - struct dm_snap_exception *e; + struct dm_exception *e; e = kmem_cache_alloc(exception_cache, GFP_NOIO); if (!e) @@ -432,7 +610,7 @@ static struct dm_snap_exception *alloc_exception(void) return e; } -static void free_exception(struct dm_snap_exception *e) +static void free_completed_exception(struct dm_exception *e) { kmem_cache_free(exception_cache, e); } @@ -457,12 +635,11 @@ static void free_pending_exception(struct dm_snap_pending_exception *pe) atomic_dec(&s->pending_exceptions_count); } -static void insert_completed_exception(struct dm_snapshot *s, - struct dm_snap_exception *new_e) +static void dm_insert_exception(struct dm_exception_table *eh, + struct dm_exception *new_e) { - struct exception_table *eh = &s->complete; struct list_head *l; - struct dm_snap_exception *e = NULL; + struct dm_exception *e = NULL; l = &eh->table[exception_hash(eh, new_e->old_chunk)]; @@ -478,7 +655,7 @@ static void insert_completed_exception(struct dm_snapshot *s, new_e->new_chunk == (dm_chunk_number(e->new_chunk) + dm_consecutive_chunk_count(e) + 1)) { dm_consecutive_chunk_count_inc(e); - free_exception(new_e); + free_completed_exception(new_e); return; } @@ -488,7 +665,7 @@ static void insert_completed_exception(struct dm_snapshot *s, dm_consecutive_chunk_count_inc(e); e->old_chunk--; e->new_chunk--; - free_exception(new_e); + free_completed_exception(new_e); return; } @@ -507,9 +684,9 @@ out: static int dm_add_exception(void *context, chunk_t old, chunk_t new) { struct dm_snapshot *s = context; - struct dm_snap_exception *e; + struct dm_exception *e; - e = alloc_exception(); + e = alloc_completed_exception(); if (!e) return -ENOMEM; @@ -518,11 +695,30 @@ static int dm_add_exception(void *context, chunk_t old, chunk_t new) /* Consecutive_count is implicitly initialised to zero */ e->new_chunk = new; - insert_completed_exception(s, e); + dm_insert_exception(&s->complete, e); return 0; } +#define min_not_zero(l, r) (((l) == 0) ? (r) : (((r) == 0) ? (l) : min(l, r))) + +/* + * Return a minimum chunk size of all snapshots that have the specified origin. + * Return zero if the origin has no snapshots. + */ +static sector_t __minimum_chunk_size(struct origin *o) +{ + struct dm_snapshot *snap; + unsigned chunk_size = 0; + + if (o) + list_for_each_entry(snap, &o->snapshots, list) + chunk_size = min_not_zero(chunk_size, + snap->store->chunk_size); + + return chunk_size; +} + /* * Hard coded magic. */ @@ -546,16 +742,18 @@ static int init_hash_tables(struct dm_snapshot *s) * Calculate based on the size of the original volume or * the COW volume... */ - cow_dev_size = get_dev_size(s->store->cow->bdev); + cow_dev_size = get_dev_size(s->cow->bdev); origin_dev_size = get_dev_size(s->origin->bdev); max_buckets = calc_max_buckets(); hash_size = min(origin_dev_size, cow_dev_size) >> s->store->chunk_shift; hash_size = min(hash_size, max_buckets); + if (hash_size < 64) + hash_size = 64; hash_size = rounddown_pow_of_two(hash_size); - if (init_exception_table(&s->complete, hash_size, - DM_CHUNK_CONSECUTIVE_BITS)) + if (dm_exception_table_init(&s->complete, hash_size, + DM_CHUNK_CONSECUTIVE_BITS)) return -ENOMEM; /* @@ -566,14 +764,284 @@ static int init_hash_tables(struct dm_snapshot *s) if (hash_size < 64) hash_size = 64; - if (init_exception_table(&s->pending, hash_size, 0)) { - exit_exception_table(&s->complete, exception_cache); + if (dm_exception_table_init(&s->pending, hash_size, 0)) { + dm_exception_table_exit(&s->complete, exception_cache); return -ENOMEM; } return 0; } +static void merge_shutdown(struct dm_snapshot *s) +{ + clear_bit_unlock(RUNNING_MERGE, &s->state_bits); + smp_mb__after_clear_bit(); + wake_up_bit(&s->state_bits, RUNNING_MERGE); +} + +static struct bio *__release_queued_bios_after_merge(struct dm_snapshot *s) +{ + s->first_merging_chunk = 0; + s->num_merging_chunks = 0; + + return bio_list_get(&s->bios_queued_during_merge); +} + +/* + * Remove one chunk from the index of completed exceptions. + */ +static int __remove_single_exception_chunk(struct dm_snapshot *s, + chunk_t old_chunk) +{ + struct dm_exception *e; + + e = dm_lookup_exception(&s->complete, old_chunk); + if (!e) { + DMERR("Corruption detected: exception for block %llu is " + "on disk but not in memory", + (unsigned long long)old_chunk); + return -EINVAL; + } + + /* + * If this is the only chunk using this exception, remove exception. + */ + if (!dm_consecutive_chunk_count(e)) { + dm_remove_exception(e); + free_completed_exception(e); + return 0; + } + + /* + * The chunk may be either at the beginning or the end of a + * group of consecutive chunks - never in the middle. We are + * removing chunks in the opposite order to that in which they + * were added, so this should always be true. + * Decrement the consecutive chunk counter and adjust the + * starting point if necessary. + */ + if (old_chunk == e->old_chunk) { + e->old_chunk++; + e->new_chunk++; + } else if (old_chunk != e->old_chunk + + dm_consecutive_chunk_count(e)) { + DMERR("Attempt to merge block %llu from the " + "middle of a chunk range [%llu - %llu]", + (unsigned long long)old_chunk, + (unsigned long long)e->old_chunk, + (unsigned long long) + e->old_chunk + dm_consecutive_chunk_count(e)); + return -EINVAL; + } + + dm_consecutive_chunk_count_dec(e); + + return 0; +} + +static void flush_bios(struct bio *bio); + +static int remove_single_exception_chunk(struct dm_snapshot *s) +{ + struct bio *b = NULL; + int r; + chunk_t old_chunk = s->first_merging_chunk + s->num_merging_chunks - 1; + + down_write(&s->lock); + + /* + * Process chunks (and associated exceptions) in reverse order + * so that dm_consecutive_chunk_count_dec() accounting works. + */ + do { + r = __remove_single_exception_chunk(s, old_chunk); + if (r) + goto out; + } while (old_chunk-- > s->first_merging_chunk); + + b = __release_queued_bios_after_merge(s); + +out: + up_write(&s->lock); + if (b) + flush_bios(b); + + return r; +} + +static int origin_write_extent(struct dm_snapshot *merging_snap, + sector_t sector, unsigned chunk_size); + +static void merge_callback(int read_err, unsigned long write_err, + void *context); + +static uint64_t read_pending_exceptions_done_count(void) +{ + uint64_t pending_exceptions_done; + + spin_lock(&_pending_exceptions_done_spinlock); + pending_exceptions_done = _pending_exceptions_done_count; + spin_unlock(&_pending_exceptions_done_spinlock); + + return pending_exceptions_done; +} + +static void increment_pending_exceptions_done_count(void) +{ + spin_lock(&_pending_exceptions_done_spinlock); + _pending_exceptions_done_count++; + spin_unlock(&_pending_exceptions_done_spinlock); + + wake_up_all(&_pending_exceptions_done); +} + +static void snapshot_merge_next_chunks(struct dm_snapshot *s) +{ + int i, linear_chunks; + chunk_t old_chunk, new_chunk; + struct dm_io_region src, dest; + sector_t io_size; + uint64_t previous_count; + + BUG_ON(!test_bit(RUNNING_MERGE, &s->state_bits)); + if (unlikely(test_bit(SHUTDOWN_MERGE, &s->state_bits))) + goto shut; + + /* + * valid flag never changes during merge, so no lock required. + */ + if (!s->valid) { + DMERR("Snapshot is invalid: can't merge"); + goto shut; + } + + linear_chunks = s->store->type->prepare_merge(s->store, &old_chunk, + &new_chunk); + if (linear_chunks <= 0) { + if (linear_chunks < 0) { + DMERR("Read error in exception store: " + "shutting down merge"); + down_write(&s->lock); + s->merge_failed = 1; + up_write(&s->lock); + } + goto shut; + } + + /* Adjust old_chunk and new_chunk to reflect start of linear region */ + old_chunk = old_chunk + 1 - linear_chunks; + new_chunk = new_chunk + 1 - linear_chunks; + + /* + * Use one (potentially large) I/O to copy all 'linear_chunks' + * from the exception store to the origin + */ + io_size = linear_chunks * s->store->chunk_size; + + dest.bdev = s->origin->bdev; + dest.sector = chunk_to_sector(s->store, old_chunk); + dest.count = min(io_size, get_dev_size(dest.bdev) - dest.sector); + + src.bdev = s->cow->bdev; + src.sector = chunk_to_sector(s->store, new_chunk); + src.count = dest.count; + + /* + * Reallocate any exceptions needed in other snapshots then + * wait for the pending exceptions to complete. + * Each time any pending exception (globally on the system) + * completes we are woken and repeat the process to find out + * if we can proceed. While this may not seem a particularly + * efficient algorithm, it is not expected to have any + * significant impact on performance. + */ + previous_count = read_pending_exceptions_done_count(); + while (origin_write_extent(s, dest.sector, io_size)) { + wait_event(_pending_exceptions_done, + (read_pending_exceptions_done_count() != + previous_count)); + /* Retry after the wait, until all exceptions are done. */ + previous_count = read_pending_exceptions_done_count(); + } + + down_write(&s->lock); + s->first_merging_chunk = old_chunk; + s->num_merging_chunks = linear_chunks; + up_write(&s->lock); + + /* Wait until writes to all 'linear_chunks' drain */ + for (i = 0; i < linear_chunks; i++) + __check_for_conflicting_io(s, old_chunk + i); + + dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, merge_callback, s); + return; + +shut: + merge_shutdown(s); +} + +static void error_bios(struct bio *bio); + +static void merge_callback(int read_err, unsigned long write_err, void *context) +{ + struct dm_snapshot *s = context; + struct bio *b = NULL; + + if (read_err || write_err) { + if (read_err) + DMERR("Read error: shutting down merge."); + else + DMERR("Write error: shutting down merge."); + goto shut; + } + + if (s->store->type->commit_merge(s->store, + s->num_merging_chunks) < 0) { + DMERR("Write error in exception store: shutting down merge"); + goto shut; + } + + if (remove_single_exception_chunk(s) < 0) + goto shut; + + snapshot_merge_next_chunks(s); + + return; + +shut: + down_write(&s->lock); + s->merge_failed = 1; + b = __release_queued_bios_after_merge(s); + up_write(&s->lock); + error_bios(b); + + merge_shutdown(s); +} + +static void start_merge(struct dm_snapshot *s) +{ + if (!test_and_set_bit(RUNNING_MERGE, &s->state_bits)) + snapshot_merge_next_chunks(s); +} + +static int wait_schedule(void *ptr) +{ + schedule(); + + return 0; +} + +/* + * Stop the merging process and wait until it finishes. + */ +static void stop_merge(struct dm_snapshot *s) +{ + set_bit(SHUTDOWN_MERGE, &s->state_bits); + wait_on_bit(&s->state_bits, RUNNING_MERGE, wait_schedule, + TASK_UNINTERRUPTIBLE); + clear_bit(SHUTDOWN_MERGE, &s->state_bits); +} + /* * Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size> */ @@ -582,50 +1050,73 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) struct dm_snapshot *s; int i; int r = -EINVAL; - char *origin_path; - struct dm_exception_store *store; - unsigned args_used; + char *origin_path, *cow_path; + unsigned args_used, num_flush_requests = 1; + fmode_t origin_mode = FMODE_READ; if (argc != 4) { ti->error = "requires exactly 4 arguments"; r = -EINVAL; - goto bad_args; + goto bad; + } + + if (dm_target_is_snapshot_merge(ti)) { + num_flush_requests = 2; + origin_mode = FMODE_WRITE; } origin_path = argv[0]; argv++; argc--; - r = dm_exception_store_create(ti, argc, argv, &args_used, &store); + s = kmalloc(sizeof(*s), GFP_KERNEL); + if (!s) { + ti->error = "Cannot allocate snapshot context private " + "structure"; + r = -ENOMEM; + goto bad; + } + + cow_path = argv[0]; + argv++; + argc--; + + r = dm_get_device(ti, cow_path, 0, 0, + FMODE_READ | FMODE_WRITE, &s->cow); + if (r) { + ti->error = "Cannot get COW device"; + goto bad_cow; + } + + r = dm_exception_store_create(ti, argc, argv, s, &args_used, &s->store); if (r) { ti->error = "Couldn't create exception store"; r = -EINVAL; - goto bad_args; + goto bad_store; } argv += args_used; argc -= args_used; - s = kmalloc(sizeof(*s), GFP_KERNEL); - if (!s) { - ti->error = "Cannot allocate snapshot context private " - "structure"; - r = -ENOMEM; - goto bad_snap; - } - - r = dm_get_device(ti, origin_path, 0, ti->len, FMODE_READ, &s->origin); + r = dm_get_device(ti, origin_path, 0, ti->len, origin_mode, &s->origin); if (r) { ti->error = "Cannot get origin device"; goto bad_origin; } - s->store = store; + s->ti = ti; s->valid = 1; s->active = 0; + s->suspended = 0; atomic_set(&s->pending_exceptions_count, 0); init_rwsem(&s->lock); + INIT_LIST_HEAD(&s->list); spin_lock_init(&s->pe_lock); + s->state_bits = 0; + s->merge_failed = 0; + s->first_merging_chunk = 0; + s->num_merging_chunks = 0; + bio_list_init(&s->bios_queued_during_merge); /* Allocate hash table for COW data */ if (init_hash_tables(s)) { @@ -659,39 +1150,55 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) spin_lock_init(&s->tracked_chunk_lock); - /* Metadata must only be loaded into one table at once */ + bio_list_init(&s->queued_bios); + INIT_WORK(&s->queued_bios_work, flush_queued_bios); + + ti->private = s; + ti->num_flush_requests = num_flush_requests; + + /* Add snapshot to the list of snapshots for this origin */ + /* Exceptions aren't triggered till snapshot_resume() is called */ + r = register_snapshot(s); + if (r == -ENOMEM) { + ti->error = "Snapshot origin struct allocation failed"; + goto bad_load_and_register; + } else if (r < 0) { + /* invalid handover, register_snapshot has set ti->error */ + goto bad_load_and_register; + } + + /* + * Metadata must only be loaded into one table at once, so skip this + * if metadata will be handed over during resume. + * Chunk size will be set during the handover - set it to zero to + * ensure it's ignored. + */ + if (r > 0) { + s->store->chunk_size = 0; + return 0; + } + r = s->store->type->read_metadata(s->store, dm_add_exception, (void *)s); if (r < 0) { ti->error = "Failed to read snapshot metadata"; - goto bad_load_and_register; + goto bad_read_metadata; } else if (r > 0) { s->valid = 0; DMWARN("Snapshot is marked invalid."); } - bio_list_init(&s->queued_bios); - INIT_WORK(&s->queued_bios_work, flush_queued_bios); - if (!s->store->chunk_size) { ti->error = "Chunk size not set"; - goto bad_load_and_register; - } - - /* Add snapshot to the list of snapshots for this origin */ - /* Exceptions aren't triggered till snapshot_resume() is called */ - if (register_snapshot(s)) { - r = -EINVAL; - ti->error = "Cannot register snapshot origin"; - goto bad_load_and_register; + goto bad_read_metadata; } - - ti->private = s; ti->split_io = s->store->chunk_size; - ti->num_flush_requests = 1; return 0; +bad_read_metadata: + unregister_snapshot(s); + bad_load_and_register: mempool_destroy(s->tracked_chunk_pool); @@ -702,19 +1209,22 @@ bad_pending_pool: dm_kcopyd_client_destroy(s->kcopyd_client); bad_kcopyd: - exit_exception_table(&s->pending, pending_cache); - exit_exception_table(&s->complete, exception_cache); + dm_exception_table_exit(&s->pending, pending_cache); + dm_exception_table_exit(&s->complete, exception_cache); bad_hash_tables: dm_put_device(ti, s->origin); bad_origin: - kfree(s); + dm_exception_store_destroy(s->store); -bad_snap: - dm_exception_store_destroy(store); +bad_store: + dm_put_device(ti, s->cow); + +bad_cow: + kfree(s); -bad_args: +bad: return r; } @@ -723,8 +1233,39 @@ static void __free_exceptions(struct dm_snapshot *s) dm_kcopyd_client_destroy(s->kcopyd_client); s->kcopyd_client = NULL; - exit_exception_table(&s->pending, pending_cache); - exit_exception_table(&s->complete, exception_cache); + dm_exception_table_exit(&s->pending, pending_cache); + dm_exception_table_exit(&s->complete, exception_cache); +} + +static void __handover_exceptions(struct dm_snapshot *snap_src, + struct dm_snapshot *snap_dest) +{ + union { + struct dm_exception_table table_swap; + struct dm_exception_store *store_swap; + } u; + + /* + * Swap all snapshot context information between the two instances. + */ + u.table_swap = snap_dest->complete; + snap_dest->complete = snap_src->complete; + snap_src->complete = u.table_swap; + + u.store_swap = snap_dest->store; + snap_dest->store = snap_src->store; + snap_src->store = u.store_swap; + + snap_dest->store->snap = snap_dest; + snap_src->store->snap = snap_src; + + snap_dest->ti->split_io = snap_dest->store->chunk_size; + snap_dest->valid = snap_src->valid; + + /* + * Set source invalid to ensure it receives no further I/O. + */ + snap_src->valid = 0; } static void snapshot_dtr(struct dm_target *ti) @@ -733,9 +1274,24 @@ static void snapshot_dtr(struct dm_target *ti) int i; #endif struct dm_snapshot *s = ti->private; + struct dm_snapshot *snap_src = NULL, *snap_dest = NULL; flush_workqueue(ksnapd); + down_read(&_origins_lock); + /* Check whether exception handover must be cancelled */ + (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL); + if (snap_src && snap_dest && (s == snap_src)) { + down_write(&snap_dest->lock); + snap_dest->valid = 0; + up_write(&snap_dest->lock); + DMERR("Cancelling snapshot handover."); + } + up_read(&_origins_lock); + + if (dm_target_is_snapshot_merge(ti)) + stop_merge(s); + /* Prevent further origin writes from using this snapshot. */ /* After this returns there can be no new kcopyd jobs. */ unregister_snapshot(s); @@ -763,6 +1319,8 @@ static void snapshot_dtr(struct dm_target *ti) dm_exception_store_destroy(s->store); + dm_put_device(ti, s->cow); + kfree(s); } @@ -795,6 +1353,26 @@ static void flush_queued_bios(struct work_struct *work) flush_bios(queued_bios); } +static int do_origin(struct dm_dev *origin, struct bio *bio); + +/* + * Flush a list of buffers. + */ +static void retry_origin_bios(struct dm_snapshot *s, struct bio *bio) +{ + struct bio *n; + int r; + + while (bio) { + n = bio->bi_next; + bio->bi_next = NULL; + r = do_origin(s->origin, bio); + if (r == DM_MAPIO_REMAPPED) + generic_make_request(bio); + bio = n; + } +} + /* * Error a list of buffers. */ @@ -825,45 +1403,12 @@ static void __invalidate_snapshot(struct dm_snapshot *s, int err) s->valid = 0; - dm_table_event(s->store->ti->table); -} - -static void get_pending_exception(struct dm_snap_pending_exception *pe) -{ - atomic_inc(&pe->ref_count); -} - -static struct bio *put_pending_exception(struct dm_snap_pending_exception *pe) -{ - struct dm_snap_pending_exception *primary_pe; - struct bio *origin_bios = NULL; - - primary_pe = pe->primary_pe; - - /* - * If this pe is involved in a write to the origin and - * it is the last sibling to complete then release - * the bios for the original write to the origin. - */ - if (primary_pe && - atomic_dec_and_test(&primary_pe->ref_count)) { - origin_bios = bio_list_get(&primary_pe->origin_bios); - free_pending_exception(primary_pe); - } - - /* - * Free the pe if it's not linked to an origin write or if - * it's not itself a primary pe. - */ - if (!primary_pe || primary_pe != pe) - free_pending_exception(pe); - - return origin_bios; + dm_table_event(s->ti->table); } static void pending_complete(struct dm_snap_pending_exception *pe, int success) { - struct dm_snap_exception *e; + struct dm_exception *e; struct dm_snapshot *s = pe->snap; struct bio *origin_bios = NULL; struct bio *snapshot_bios = NULL; @@ -877,7 +1422,7 @@ static void pending_complete(struct dm_snap_pending_exception *pe, int success) goto out; } - e = alloc_exception(); + e = alloc_completed_exception(); if (!e) { down_write(&s->lock); __invalidate_snapshot(s, -ENOMEM); @@ -888,28 +1433,27 @@ static void pending_complete(struct dm_snap_pending_exception *pe, int success) down_write(&s->lock); if (!s->valid) { - free_exception(e); + free_completed_exception(e); error = 1; goto out; } - /* - * Check for conflicting reads. This is extremely improbable, - * so msleep(1) is sufficient and there is no need for a wait queue. - */ - while (__chunk_is_tracked(s, pe->e.old_chunk)) - msleep(1); + /* Check for conflicting reads */ + __check_for_conflicting_io(s, pe->e.old_chunk); /* * Add a proper exception, and remove the * in-flight exception from the list. */ - insert_completed_exception(s, e); + dm_insert_exception(&s->complete, e); out: - remove_exception(&pe->e); + dm_remove_exception(&pe->e); snapshot_bios = bio_list_get(&pe->snapshot_bios); - origin_bios = put_pending_exception(pe); + origin_bios = bio_list_get(&pe->origin_bios); + free_pending_exception(pe); + + increment_pending_exceptions_done_count(); up_write(&s->lock); @@ -919,7 +1463,7 @@ static void pending_complete(struct dm_snap_pending_exception *pe, int success) else flush_bios(snapshot_bios); - flush_bios(origin_bios); + retry_origin_bios(s, origin_bios); } static void commit_callback(void *context, int success) @@ -963,7 +1507,7 @@ static void start_copy(struct dm_snap_pending_exception *pe) src.sector = chunk_to_sector(s->store, pe->e.old_chunk); src.count = min((sector_t)s->store->chunk_size, dev_size - src.sector); - dest.bdev = s->store->cow->bdev; + dest.bdev = s->cow->bdev; dest.sector = chunk_to_sector(s->store, pe->e.new_chunk); dest.count = src.count; @@ -975,7 +1519,7 @@ static void start_copy(struct dm_snap_pending_exception *pe) static struct dm_snap_pending_exception * __lookup_pending_exception(struct dm_snapshot *s, chunk_t chunk) { - struct dm_snap_exception *e = lookup_exception(&s->pending, chunk); + struct dm_exception *e = dm_lookup_exception(&s->pending, chunk); if (!e) return NULL; @@ -1006,8 +1550,6 @@ __find_pending_exception(struct dm_snapshot *s, pe->e.old_chunk = chunk; bio_list_init(&pe->origin_bios); bio_list_init(&pe->snapshot_bios); - pe->primary_pe = NULL; - atomic_set(&pe->ref_count, 0); pe->started = 0; if (s->store->type->prepare_exception(s->store, &pe->e)) { @@ -1015,16 +1557,15 @@ __find_pending_exception(struct dm_snapshot *s, return NULL; } - get_pending_exception(pe); - insert_exception(&s->pending, &pe->e); + dm_insert_exception(&s->pending, &pe->e); return pe; } -static void remap_exception(struct dm_snapshot *s, struct dm_snap_exception *e, +static void remap_exception(struct dm_snapshot *s, struct dm_exception *e, struct bio *bio, chunk_t chunk) { - bio->bi_bdev = s->store->cow->bdev; + bio->bi_bdev = s->cow->bdev; bio->bi_sector = chunk_to_sector(s->store, dm_chunk_number(e->new_chunk) + (chunk - e->old_chunk)) + @@ -1035,14 +1576,14 @@ static void remap_exception(struct dm_snapshot *s, struct dm_snap_exception *e, static int snapshot_map(struct dm_target *ti, struct bio *bio, union map_info *map_context) { - struct dm_snap_exception *e; + struct dm_exception *e; struct dm_snapshot *s = ti->private; int r = DM_MAPIO_REMAPPED; chunk_t chunk; struct dm_snap_pending_exception *pe = NULL; if (unlikely(bio_empty_barrier(bio))) { - bio->bi_bdev = s->store->cow->bdev; + bio->bi_bdev = s->cow->bdev; return DM_MAPIO_REMAPPED; } @@ -1063,7 +1604,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio, } /* If the block is already remapped - use that, else remap it */ - e = lookup_exception(&s->complete, chunk); + e = dm_lookup_exception(&s->complete, chunk); if (e) { remap_exception(s, e, bio, chunk); goto out_unlock; @@ -1087,7 +1628,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio, goto out_unlock; } - e = lookup_exception(&s->complete, chunk); + e = dm_lookup_exception(&s->complete, chunk); if (e) { free_pending_exception(pe); remap_exception(s, e, bio, chunk); @@ -1125,6 +1666,78 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio, return r; } +/* + * A snapshot-merge target behaves like a combination of a snapshot + * target and a snapshot-origin target. It only generates new + * exceptions in other snapshots and not in the one that is being + * merged. + * + * For each chunk, if there is an existing exception, it is used to + * redirect I/O to the cow device. Otherwise I/O is sent to the origin, + * which in turn might generate exceptions in other snapshots. + * If merging is currently taking place on the chunk in question, the + * I/O is deferred by adding it to s->bios_queued_during_merge. + */ +static int snapshot_merge_map(struct dm_target *ti, struct bio *bio, + union map_info *map_context) +{ + struct dm_exception *e; + struct dm_snapshot *s = ti->private; + int r = DM_MAPIO_REMAPPED; + chunk_t chunk; + + if (unlikely(bio_empty_barrier(bio))) { + if (!map_context->flush_request) + bio->bi_bdev = s->origin->bdev; + else + bio->bi_bdev = s->cow->bdev; + map_context->ptr = NULL; + return DM_MAPIO_REMAPPED; + } + + chunk = sector_to_chunk(s->store, bio->bi_sector); + + down_write(&s->lock); + + /* Full merging snapshots are redirected to the origin */ + if (!s->valid) + goto redirect_to_origin; + + /* If the block is already remapped - use that */ + e = dm_lookup_exception(&s->complete, chunk); + if (e) { + /* Queue writes overlapping with chunks being merged */ + if (bio_rw(bio) == WRITE && + chunk >= s->first_merging_chunk && + chunk < (s->first_merging_chunk + + s->num_merging_chunks)) { + bio->bi_bdev = s->origin->bdev; + bio_list_add(&s->bios_queued_during_merge, bio); + r = DM_MAPIO_SUBMITTED; + goto out_unlock; + } + + remap_exception(s, e, bio, chunk); + + if (bio_rw(bio) == WRITE) + map_context->ptr = track_chunk(s, chunk); + goto out_unlock; + } + +redirect_to_origin: + bio->bi_bdev = s->origin->bdev; + + if (bio_rw(bio) == WRITE) { + up_write(&s->lock); + return do_origin(s->origin, bio); + } + +out_unlock: + up_write(&s->lock); + + return r; +} + static int snapshot_end_io(struct dm_target *ti, struct bio *bio, int error, union map_info *map_context) { @@ -1137,40 +1750,135 @@ static int snapshot_end_io(struct dm_target *ti, struct bio *bio, return 0; } +static void snapshot_merge_presuspend(struct dm_target *ti) +{ + struct dm_snapshot *s = ti->private; + + stop_merge(s); +} + +static void snapshot_postsuspend(struct dm_target *ti) +{ + struct dm_snapshot *s = ti->private; + + down_write(&s->lock); + s->suspended = 1; + up_write(&s->lock); +} + +static int snapshot_preresume(struct dm_target *ti) +{ + int r = 0; + struct dm_snapshot *s = ti->private; + struct dm_snapshot *snap_src = NULL, *snap_dest = NULL; + + down_read(&_origins_lock); + (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL); + if (snap_src && snap_dest) { + down_read(&snap_src->lock); + if (s == snap_src) { + DMERR("Unable to resume snapshot source until " + "handover completes."); + r = -EINVAL; + } else if (!snap_src->suspended) { + DMERR("Unable to perform snapshot handover until " + "source is suspended."); + r = -EINVAL; + } + up_read(&snap_src->lock); + } + up_read(&_origins_lock); + + return r; +} + static void snapshot_resume(struct dm_target *ti) { struct dm_snapshot *s = ti->private; + struct dm_snapshot *snap_src = NULL, *snap_dest = NULL; + + down_read(&_origins_lock); + (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL); + if (snap_src && snap_dest) { + down_write(&snap_src->lock); + down_write_nested(&snap_dest->lock, SINGLE_DEPTH_NESTING); + __handover_exceptions(snap_src, snap_dest); + up_write(&snap_dest->lock); + up_write(&snap_src->lock); + } + up_read(&_origins_lock); + + /* Now we have correct chunk size, reregister */ + reregister_snapshot(s); down_write(&s->lock); s->active = 1; + s->suspended = 0; up_write(&s->lock); } +static sector_t get_origin_minimum_chunksize(struct block_device *bdev) +{ + sector_t min_chunksize; + + down_read(&_origins_lock); + min_chunksize = __minimum_chunk_size(__lookup_origin(bdev)); + up_read(&_origins_lock); + + return min_chunksize; +} + +static void snapshot_merge_resume(struct dm_target *ti) +{ + struct dm_snapshot *s = ti->private; + + /* + * Handover exceptions from existing snapshot. + */ + snapshot_resume(ti); + + /* + * snapshot-merge acts as an origin, so set ti->split_io + */ + ti->split_io = get_origin_minimum_chunksize(s->origin->bdev); + + start_merge(s); +} + static int snapshot_status(struct dm_target *ti, status_type_t type, char *result, unsigned int maxlen) { unsigned sz = 0; struct dm_snapshot *snap = ti->private; - down_write(&snap->lock); - switch (type) { case STATUSTYPE_INFO: + + down_write(&snap->lock); + if (!snap->valid) DMEMIT("Invalid"); + else if (snap->merge_failed) + DMEMIT("Merge failed"); else { - if (snap->store->type->fraction_full) { - sector_t numerator, denominator; - snap->store->type->fraction_full(snap->store, - &numerator, - &denominator); - DMEMIT("%llu/%llu", - (unsigned long long)numerator, - (unsigned long long)denominator); + if (snap->store->type->usage) { + sector_t total_sectors, sectors_allocated, + metadata_sectors; + snap->store->type->usage(snap->store, + &total_sectors, + §ors_allocated, + &metadata_sectors); + DMEMIT("%llu/%llu %llu", + (unsigned long long)sectors_allocated, + (unsigned long long)total_sectors, + (unsigned long long)metadata_sectors); } else DMEMIT("Unknown"); } + + up_write(&snap->lock); + break; case STATUSTYPE_TABLE: @@ -1179,14 +1887,12 @@ static int snapshot_status(struct dm_target *ti, status_type_t type, * to make private copies if the output is to * make sense. */ - DMEMIT("%s", snap->origin->name); + DMEMIT("%s %s", snap->origin->name, snap->cow->name); snap->store->type->status(snap->store, type, result + sz, maxlen - sz); break; } - up_write(&snap->lock); - return 0; } @@ -1202,17 +1908,36 @@ static int snapshot_iterate_devices(struct dm_target *ti, /*----------------------------------------------------------------- * Origin methods *---------------------------------------------------------------*/ -static int __origin_write(struct list_head *snapshots, struct bio *bio) + +/* + * If no exceptions need creating, DM_MAPIO_REMAPPED is returned and any + * supplied bio was ignored. The caller may submit it immediately. + * (No remapping actually occurs as the origin is always a direct linear + * map.) + * + * If further exceptions are required, DM_MAPIO_SUBMITTED is returned + * and any supplied bio is added to a list to be submitted once all + * the necessary exceptions exist. + */ +static int __origin_write(struct list_head *snapshots, sector_t sector, + struct bio *bio) { - int r = DM_MAPIO_REMAPPED, first = 0; + int r = DM_MAPIO_REMAPPED; struct dm_snapshot *snap; - struct dm_snap_exception *e; - struct dm_snap_pending_exception *pe, *next_pe, *primary_pe = NULL; + struct dm_exception *e; + struct dm_snap_pending_exception *pe; + struct dm_snap_pending_exception *pe_to_start_now = NULL; + struct dm_snap_pending_exception *pe_to_start_last = NULL; chunk_t chunk; - LIST_HEAD(pe_queue); /* Do all the snapshots on this origin */ list_for_each_entry (snap, snapshots, list) { + /* + * Don't make new exceptions in a merging snapshot + * because it has effectively been deleted + */ + if (dm_target_is_snapshot_merge(snap->ti)) + continue; down_write(&snap->lock); @@ -1221,24 +1946,21 @@ static int __origin_write(struct list_head *snapshots, struct bio *bio) goto next_snapshot; /* Nothing to do if writing beyond end of snapshot */ - if (bio->bi_sector >= dm_table_get_size(snap->store->ti->table)) + if (sector >= dm_table_get_size(snap->ti->table)) goto next_snapshot; /* * Remember, different snapshots can have * different chunk sizes. */ - chunk = sector_to_chunk(snap->store, bio->bi_sector); + chunk = sector_to_chunk(snap->store, sector); /* * Check exception table to see if block * is already remapped in this snapshot * and trigger an exception if not. - * - * ref_count is initialised to 1 so pending_complete() - * won't destroy the primary_pe while we're inside this loop. */ - e = lookup_exception(&snap->complete, chunk); + e = dm_lookup_exception(&snap->complete, chunk); if (e) goto next_snapshot; @@ -1253,7 +1975,7 @@ static int __origin_write(struct list_head *snapshots, struct bio *bio) goto next_snapshot; } - e = lookup_exception(&snap->complete, chunk); + e = dm_lookup_exception(&snap->complete, chunk); if (e) { free_pending_exception(pe); goto next_snapshot; @@ -1266,59 +1988,43 @@ static int __origin_write(struct list_head *snapshots, struct bio *bio) } } - if (!primary_pe) { - /* - * Either every pe here has same - * primary_pe or none has one yet. - */ - if (pe->primary_pe) - primary_pe = pe->primary_pe; - else { - primary_pe = pe; - first = 1; - } - - bio_list_add(&primary_pe->origin_bios, bio); + r = DM_MAPIO_SUBMITTED; - r = DM_MAPIO_SUBMITTED; - } + /* + * If an origin bio was supplied, queue it to wait for the + * completion of this exception, and start this one last, + * at the end of the function. + */ + if (bio) { + bio_list_add(&pe->origin_bios, bio); + bio = NULL; - if (!pe->primary_pe) { - pe->primary_pe = primary_pe; - get_pending_exception(primary_pe); + if (!pe->started) { + pe->started = 1; + pe_to_start_last = pe; + } } if (!pe->started) { pe->started = 1; - list_add_tail(&pe->list, &pe_queue); + pe_to_start_now = pe; } next_snapshot: up_write(&snap->lock); - } - if (!primary_pe) - return r; - - /* - * If this is the first time we're processing this chunk and - * ref_count is now 1 it means all the pending exceptions - * got completed while we were in the loop above, so it falls to - * us here to remove the primary_pe and submit any origin_bios. - */ - - if (first && atomic_dec_and_test(&primary_pe->ref_count)) { - flush_bios(bio_list_get(&primary_pe->origin_bios)); - free_pending_exception(primary_pe); - /* If we got here, pe_queue is necessarily empty. */ - return r; + if (pe_to_start_now) { + start_copy(pe_to_start_now); + pe_to_start_now = NULL; + } } /* - * Now that we have a complete pe list we can start the copying. + * Submit the exception against which the bio is queued last, + * to give the other exceptions a head start. */ - list_for_each_entry_safe(pe, next_pe, &pe_queue, list) - start_copy(pe); + if (pe_to_start_last) + start_copy(pe_to_start_last); return r; } @@ -1334,13 +2040,48 @@ static int do_origin(struct dm_dev *origin, struct bio *bio) down_read(&_origins_lock); o = __lookup_origin(origin->bdev); if (o) - r = __origin_write(&o->snapshots, bio); + r = __origin_write(&o->snapshots, bio->bi_sector, bio); up_read(&_origins_lock); return r; } /* + * Trigger exceptions in all non-merging snapshots. + * + * The chunk size of the merging snapshot may be larger than the chunk + * size of some other snapshot so we may need to reallocate multiple + * chunks in other snapshots. + * + * We scan all the overlapping exceptions in the other snapshots. + * Returns 1 if anything was reallocated and must be waited for, + * otherwise returns 0. + * + * size must be a multiple of merging_snap's chunk_size. + */ +static int origin_write_extent(struct dm_snapshot *merging_snap, + sector_t sector, unsigned size) +{ + int must_wait = 0; + sector_t n; + struct origin *o; + + /* + * The origin's __minimum_chunk_size() got stored in split_io + * by snapshot_merge_resume(). + */ + down_read(&_origins_lock); + o = __lookup_origin(merging_snap->origin->bdev); + for (n = 0; n < size; n += merging_snap->ti->split_io) + if (__origin_write(&o->snapshots, sector + n, NULL) == + DM_MAPIO_SUBMITTED) + must_wait = 1; + up_read(&_origins_lock); + + return must_wait; +} + +/* * Origin: maps a linear range of a device, with hooks for snapshotting. */ @@ -1391,8 +2132,6 @@ static int origin_map(struct dm_target *ti, struct bio *bio, return (bio_rw(bio) == WRITE) ? do_origin(dev, bio) : DM_MAPIO_REMAPPED; } -#define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r)) - /* * Set the target "split_io" field to the minimum of all the snapshots' * chunk sizes. @@ -1400,19 +2139,8 @@ static int origin_map(struct dm_target *ti, struct bio *bio, static void origin_resume(struct dm_target *ti) { struct dm_dev *dev = ti->private; - struct dm_snapshot *snap; - struct origin *o; - unsigned chunk_size = 0; - - down_read(&_origins_lock); - o = __lookup_origin(dev->bdev); - if (o) - list_for_each_entry (snap, &o->snapshots, list) - chunk_size = min_not_zero(chunk_size, - snap->store->chunk_size); - up_read(&_origins_lock); - ti->split_io = chunk_size; + ti->split_io = get_origin_minimum_chunksize(dev->bdev); } static int origin_status(struct dm_target *ti, status_type_t type, char *result, @@ -1455,17 +2183,35 @@ static struct target_type origin_target = { static struct target_type snapshot_target = { .name = "snapshot", - .version = {1, 7, 0}, + .version = {1, 9, 0}, .module = THIS_MODULE, .ctr = snapshot_ctr, .dtr = snapshot_dtr, .map = snapshot_map, .end_io = snapshot_end_io, + .postsuspend = snapshot_postsuspend, + .preresume = snapshot_preresume, .resume = snapshot_resume, .status = snapshot_status, .iterate_devices = snapshot_iterate_devices, }; +static struct target_type merge_target = { + .name = dm_snapshot_merge_target_name, + .version = {1, 0, 0}, + .module = THIS_MODULE, + .ctr = snapshot_ctr, + .dtr = snapshot_dtr, + .map = snapshot_merge_map, + .end_io = snapshot_end_io, + .presuspend = snapshot_merge_presuspend, + .postsuspend = snapshot_postsuspend, + .preresume = snapshot_preresume, + .resume = snapshot_merge_resume, + .status = snapshot_status, + .iterate_devices = snapshot_iterate_devices, +}; + static int __init dm_snapshot_init(void) { int r; @@ -1477,7 +2223,7 @@ static int __init dm_snapshot_init(void) } r = dm_register_target(&snapshot_target); - if (r) { + if (r < 0) { DMERR("snapshot target register failed %d", r); goto bad_register_snapshot_target; } @@ -1485,34 +2231,40 @@ static int __init dm_snapshot_init(void) r = dm_register_target(&origin_target); if (r < 0) { DMERR("Origin target register failed %d", r); - goto bad1; + goto bad_register_origin_target; + } + + r = dm_register_target(&merge_target); + if (r < 0) { + DMERR("Merge target register failed %d", r); + goto bad_register_merge_target; } r = init_origin_hash(); if (r) { DMERR("init_origin_hash failed."); - goto bad2; + goto bad_origin_hash; } - exception_cache = KMEM_CACHE(dm_snap_exception, 0); + exception_cache = KMEM_CACHE(dm_exception, 0); if (!exception_cache) { DMERR("Couldn't create exception cache."); r = -ENOMEM; - goto bad3; + goto bad_exception_cache; } pending_cache = KMEM_CACHE(dm_snap_pending_exception, 0); if (!pending_cache) { DMERR("Couldn't create pending cache."); r = -ENOMEM; - goto bad4; + goto bad_pending_cache; } tracked_chunk_cache = KMEM_CACHE(dm_snap_tracked_chunk, 0); if (!tracked_chunk_cache) { DMERR("Couldn't create cache to track chunks in use."); r = -ENOMEM; - goto bad5; + goto bad_tracked_chunk_cache; } ksnapd = create_singlethread_workqueue("ksnapd"); @@ -1526,19 +2278,21 @@ static int __init dm_snapshot_init(void) bad_pending_pool: kmem_cache_destroy(tracked_chunk_cache); -bad5: +bad_tracked_chunk_cache: kmem_cache_destroy(pending_cache); -bad4: +bad_pending_cache: kmem_cache_destroy(exception_cache); -bad3: +bad_exception_cache: exit_origin_hash(); -bad2: +bad_origin_hash: + dm_unregister_target(&merge_target); +bad_register_merge_target: dm_unregister_target(&origin_target); -bad1: +bad_register_origin_target: dm_unregister_target(&snapshot_target); - bad_register_snapshot_target: dm_exception_store_exit(); + return r; } @@ -1548,6 +2302,7 @@ static void __exit dm_snapshot_exit(void) dm_unregister_target(&snapshot_target); dm_unregister_target(&origin_target); + dm_unregister_target(&merge_target); exit_origin_hash(); kmem_cache_destroy(pending_cache); diff --git a/drivers/md/dm-sysfs.c b/drivers/md/dm-sysfs.c index 4b045903a4e..f53392df7b9 100644 --- a/drivers/md/dm-sysfs.c +++ b/drivers/md/dm-sysfs.c @@ -59,7 +59,7 @@ static ssize_t dm_attr_uuid_show(struct mapped_device *md, char *buf) static ssize_t dm_attr_suspended_show(struct mapped_device *md, char *buf) { - sprintf(buf, "%d\n", dm_suspended(md)); + sprintf(buf, "%d\n", dm_suspended_md(md)); return strlen(buf); } @@ -80,12 +80,20 @@ static struct sysfs_ops dm_sysfs_ops = { }; /* + * The sysfs structure is embedded in md struct, nothing to do here + */ +static void dm_sysfs_release(struct kobject *kobj) +{ +} + +/* * dm kobject is embedded in mapped_device structure * no need to define release function here */ static struct kobj_type dm_ktype = { .sysfs_ops = &dm_sysfs_ops, .default_attrs = dm_attrs, + .release = dm_sysfs_release }; /* diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 1a6cb3c7822..be625475cf6 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -12,6 +12,7 @@ #include <linux/blkdev.h> #include <linux/namei.h> #include <linux/ctype.h> +#include <linux/string.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/mutex.h> @@ -237,6 +238,9 @@ void dm_table_destroy(struct dm_table *t) { unsigned int i; + if (!t) + return; + while (atomic_read(&t->holders)) msleep(1); smp_mb(); @@ -600,11 +604,8 @@ int dm_split_args(int *argc, char ***argvp, char *input) return -ENOMEM; while (1) { - start = end; - /* Skip whitespace */ - while (*start && isspace(*start)) - start++; + start = skip_spaces(end); if (!*start) break; /* success, we hit the end */ diff --git a/drivers/md/dm-uevent.c b/drivers/md/dm-uevent.c index 6f65883aef1..c7c555a8c7b 100644 --- a/drivers/md/dm-uevent.c +++ b/drivers/md/dm-uevent.c @@ -139,14 +139,13 @@ void dm_send_uevents(struct list_head *events, struct kobject *kobj) list_del_init(&event->elist); /* - * Need to call dm_copy_name_and_uuid from here for now. - * Context of previous var adds and locking used for - * hash_cell not compatable. + * When a device is being removed this copy fails and we + * discard these unsent events. */ if (dm_copy_name_and_uuid(event->md, event->name, event->uuid)) { - DMERR("%s: dm_copy_name_and_uuid() failed", - __func__); + DMINFO("%s: skipping sending uevent for lost device", + __func__); goto uevent_free; } diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 724efc63904..3167480b532 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -143,9 +143,19 @@ struct mapped_device { int barrier_error; /* + * Protect barrier_error from concurrent endio processing + * in request-based dm. + */ + spinlock_t barrier_error_lock; + + /* * Processing queue (flush/barriers) */ struct workqueue_struct *wq; + struct work_struct barrier_work; + + /* A pointer to the currently processing pre/post flush request */ + struct request *flush_request; /* * The current mapping. @@ -178,9 +188,6 @@ struct mapped_device { /* forced geometry settings */ struct hd_geometry geometry; - /* marker of flush suspend for request-based dm */ - struct request suspend_rq; - /* For saving the address of __make_request for request based dm */ make_request_fn *saved_make_request_fn; @@ -275,6 +282,7 @@ static int (*_inits[])(void) __initdata = { dm_target_init, dm_linear_init, dm_stripe_init, + dm_io_init, dm_kcopyd_init, dm_interface_init, }; @@ -284,6 +292,7 @@ static void (*_exits[])(void) = { dm_target_exit, dm_linear_exit, dm_stripe_exit, + dm_io_exit, dm_kcopyd_exit, dm_interface_exit, }; @@ -320,6 +329,11 @@ static void __exit dm_exit(void) /* * Block device functions */ +int dm_deleting_md(struct mapped_device *md) +{ + return test_bit(DMF_DELETING, &md->flags); +} + static int dm_blk_open(struct block_device *bdev, fmode_t mode) { struct mapped_device *md; @@ -331,7 +345,7 @@ static int dm_blk_open(struct block_device *bdev, fmode_t mode) goto out; if (test_bit(DMF_FREEING, &md->flags) || - test_bit(DMF_DELETING, &md->flags)) { + dm_deleting_md(md)) { md = NULL; goto out; } @@ -388,7 +402,7 @@ static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) { struct mapped_device *md = bdev->bd_disk->private_data; - struct dm_table *map = dm_get_table(md); + struct dm_table *map = dm_get_live_table(md); struct dm_target *tgt; int r = -ENOTTY; @@ -401,7 +415,7 @@ static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode, tgt = dm_table_get_target(map, 0); - if (dm_suspended(md)) { + if (dm_suspended_md(md)) { r = -EAGAIN; goto out; } @@ -430,9 +444,10 @@ static void free_tio(struct mapped_device *md, struct dm_target_io *tio) mempool_free(tio, md->tio_pool); } -static struct dm_rq_target_io *alloc_rq_tio(struct mapped_device *md) +static struct dm_rq_target_io *alloc_rq_tio(struct mapped_device *md, + gfp_t gfp_mask) { - return mempool_alloc(md->tio_pool, GFP_ATOMIC); + return mempool_alloc(md->tio_pool, gfp_mask); } static void free_rq_tio(struct dm_rq_target_io *tio) @@ -450,6 +465,12 @@ static void free_bio_info(struct dm_rq_clone_bio_info *info) mempool_free(info, info->tio->md->io_pool); } +static int md_in_flight(struct mapped_device *md) +{ + return atomic_read(&md->pending[READ]) + + atomic_read(&md->pending[WRITE]); +} + static void start_io_acct(struct dm_io *io) { struct mapped_device *md = io->md; @@ -512,7 +533,7 @@ static void queue_io(struct mapped_device *md, struct bio *bio) * function to access the md->map field, and make sure they call * dm_table_put() when finished. */ -struct dm_table *dm_get_table(struct mapped_device *md) +struct dm_table *dm_get_live_table(struct mapped_device *md) { struct dm_table *t; unsigned long flags; @@ -716,28 +737,38 @@ static void end_clone_bio(struct bio *clone, int error) blk_update_request(tio->orig, 0, nr_bytes); } +static void store_barrier_error(struct mapped_device *md, int error) +{ + unsigned long flags; + + spin_lock_irqsave(&md->barrier_error_lock, flags); + /* + * Basically, the first error is taken, but: + * -EOPNOTSUPP supersedes any I/O error. + * Requeue request supersedes any I/O error but -EOPNOTSUPP. + */ + if (!md->barrier_error || error == -EOPNOTSUPP || + (md->barrier_error != -EOPNOTSUPP && + error == DM_ENDIO_REQUEUE)) + md->barrier_error = error; + spin_unlock_irqrestore(&md->barrier_error_lock, flags); +} + /* * Don't touch any member of the md after calling this function because * the md may be freed in dm_put() at the end of this function. * Or do dm_get() before calling this function and dm_put() later. */ -static void rq_completed(struct mapped_device *md, int run_queue) +static void rq_completed(struct mapped_device *md, int rw, int run_queue) { - int wakeup_waiters = 0; - struct request_queue *q = md->queue; - unsigned long flags; - - spin_lock_irqsave(q->queue_lock, flags); - if (!queue_in_flight(q)) - wakeup_waiters = 1; - spin_unlock_irqrestore(q->queue_lock, flags); + atomic_dec(&md->pending[rw]); /* nudge anyone waiting on suspend queue */ - if (wakeup_waiters) + if (!md_in_flight(md)) wake_up(&md->wait); if (run_queue) - blk_run_queue(q); + blk_run_queue(md->queue); /* * dm_put() must be at the end of this function. See the comment above @@ -753,6 +784,44 @@ static void free_rq_clone(struct request *clone) free_rq_tio(tio); } +/* + * Complete the clone and the original request. + * Must be called without queue lock. + */ +static void dm_end_request(struct request *clone, int error) +{ + int rw = rq_data_dir(clone); + int run_queue = 1; + bool is_barrier = blk_barrier_rq(clone); + struct dm_rq_target_io *tio = clone->end_io_data; + struct mapped_device *md = tio->md; + struct request *rq = tio->orig; + + if (blk_pc_request(rq) && !is_barrier) { + rq->errors = clone->errors; + rq->resid_len = clone->resid_len; + + if (rq->sense) + /* + * We are using the sense buffer of the original + * request. + * So setting the length of the sense data is enough. + */ + rq->sense_len = clone->sense_len; + } + + free_rq_clone(clone); + + if (unlikely(is_barrier)) { + if (unlikely(error)) + store_barrier_error(md, error); + run_queue = 0; + } else + blk_end_request_all(rq, error); + + rq_completed(md, rw, run_queue); +} + static void dm_unprep_request(struct request *rq) { struct request *clone = rq->special; @@ -768,12 +837,23 @@ static void dm_unprep_request(struct request *rq) */ void dm_requeue_unmapped_request(struct request *clone) { + int rw = rq_data_dir(clone); struct dm_rq_target_io *tio = clone->end_io_data; struct mapped_device *md = tio->md; struct request *rq = tio->orig; struct request_queue *q = rq->q; unsigned long flags; + if (unlikely(blk_barrier_rq(clone))) { + /* + * Barrier clones share an original request. + * Leave it to dm_end_request(), which handles this special + * case. + */ + dm_end_request(clone, DM_ENDIO_REQUEUE); + return; + } + dm_unprep_request(rq); spin_lock_irqsave(q->queue_lock, flags); @@ -782,7 +862,7 @@ void dm_requeue_unmapped_request(struct request *clone) blk_requeue_request(q, rq); spin_unlock_irqrestore(q->queue_lock, flags); - rq_completed(md, 0); + rq_completed(md, rw, 0); } EXPORT_SYMBOL_GPL(dm_requeue_unmapped_request); @@ -815,34 +895,28 @@ static void start_queue(struct request_queue *q) spin_unlock_irqrestore(q->queue_lock, flags); } -/* - * Complete the clone and the original request. - * Must be called without queue lock. - */ -static void dm_end_request(struct request *clone, int error) +static void dm_done(struct request *clone, int error, bool mapped) { + int r = error; struct dm_rq_target_io *tio = clone->end_io_data; - struct mapped_device *md = tio->md; - struct request *rq = tio->orig; + dm_request_endio_fn rq_end_io = tio->ti->type->rq_end_io; - if (blk_pc_request(rq)) { - rq->errors = clone->errors; - rq->resid_len = clone->resid_len; + if (mapped && rq_end_io) + r = rq_end_io(tio->ti, clone, error, &tio->info); - if (rq->sense) - /* - * We are using the sense buffer of the original - * request. - * So setting the length of the sense data is enough. - */ - rq->sense_len = clone->sense_len; + if (r <= 0) + /* The target wants to complete the I/O */ + dm_end_request(clone, r); + else if (r == DM_ENDIO_INCOMPLETE) + /* The target will handle the I/O */ + return; + else if (r == DM_ENDIO_REQUEUE) + /* The target wants to requeue the I/O */ + dm_requeue_unmapped_request(clone); + else { + DMWARN("unimplemented target endio return value: %d", r); + BUG(); } - - free_rq_clone(clone); - - blk_end_request_all(rq, error); - - rq_completed(md, 1); } /* @@ -850,27 +924,14 @@ static void dm_end_request(struct request *clone, int error) */ static void dm_softirq_done(struct request *rq) { + bool mapped = true; struct request *clone = rq->completion_data; struct dm_rq_target_io *tio = clone->end_io_data; - dm_request_endio_fn rq_end_io = tio->ti->type->rq_end_io; - int error = tio->error; - if (!(rq->cmd_flags & REQ_FAILED) && rq_end_io) - error = rq_end_io(tio->ti, clone, error, &tio->info); + if (rq->cmd_flags & REQ_FAILED) + mapped = false; - if (error <= 0) - /* The target wants to complete the I/O */ - dm_end_request(clone, error); - else if (error == DM_ENDIO_INCOMPLETE) - /* The target will handle the I/O */ - return; - else if (error == DM_ENDIO_REQUEUE) - /* The target wants to requeue the I/O */ - dm_requeue_unmapped_request(clone); - else { - DMWARN("unimplemented target endio return value: %d", error); - BUG(); - } + dm_done(clone, tio->error, mapped); } /* @@ -882,6 +943,19 @@ static void dm_complete_request(struct request *clone, int error) struct dm_rq_target_io *tio = clone->end_io_data; struct request *rq = tio->orig; + if (unlikely(blk_barrier_rq(clone))) { + /* + * Barrier clones share an original request. So can't use + * softirq_done with the original. + * Pass the clone to dm_done() directly in this special case. + * It is safe (even if clone->q->queue_lock is held here) + * because there is no I/O dispatching during the completion + * of barrier clone. + */ + dm_done(clone, error, true); + return; + } + tio->error = error; rq->completion_data = clone; blk_complete_request(rq); @@ -898,6 +972,17 @@ void dm_kill_unmapped_request(struct request *clone, int error) struct dm_rq_target_io *tio = clone->end_io_data; struct request *rq = tio->orig; + if (unlikely(blk_barrier_rq(clone))) { + /* + * Barrier clones share an original request. + * Leave it to dm_end_request(), which handles this special + * case. + */ + BUG_ON(error > 0); + dm_end_request(clone, error); + return; + } + rq->cmd_flags |= REQ_FAILED; dm_complete_request(clone, error); } @@ -1214,7 +1299,7 @@ static void __split_and_process_bio(struct mapped_device *md, struct bio *bio) struct clone_info ci; int error = 0; - ci.map = dm_get_table(md); + ci.map = dm_get_live_table(md); if (unlikely(!ci.map)) { if (!bio_rw_flagged(bio, BIO_RW_BARRIER)) bio_io_error(bio); @@ -1255,7 +1340,7 @@ static int dm_merge_bvec(struct request_queue *q, struct bio_vec *biovec) { struct mapped_device *md = q->queuedata; - struct dm_table *map = dm_get_table(md); + struct dm_table *map = dm_get_live_table(md); struct dm_target *ti; sector_t max_sectors; int max_size = 0; @@ -1352,11 +1437,6 @@ static int dm_make_request(struct request_queue *q, struct bio *bio) { struct mapped_device *md = q->queuedata; - if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) { - bio_endio(bio, -EOPNOTSUPP); - return 0; - } - return md->saved_make_request_fn(q, bio); /* call __make_request() */ } @@ -1375,6 +1455,25 @@ static int dm_request(struct request_queue *q, struct bio *bio) return _dm_request(q, bio); } +/* + * Mark this request as flush request, so that dm_request_fn() can + * recognize. + */ +static void dm_rq_prepare_flush(struct request_queue *q, struct request *rq) +{ + rq->cmd_type = REQ_TYPE_LINUX_BLOCK; + rq->cmd[0] = REQ_LB_OP_FLUSH; +} + +static bool dm_rq_is_flush_request(struct request *rq) +{ + if (rq->cmd_type == REQ_TYPE_LINUX_BLOCK && + rq->cmd[0] == REQ_LB_OP_FLUSH) + return true; + else + return false; +} + void dm_dispatch_request(struct request *rq) { int r; @@ -1420,25 +1519,54 @@ static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig, static int setup_clone(struct request *clone, struct request *rq, struct dm_rq_target_io *tio) { - int r = blk_rq_prep_clone(clone, rq, tio->md->bs, GFP_ATOMIC, - dm_rq_bio_constructor, tio); + int r; - if (r) - return r; + if (dm_rq_is_flush_request(rq)) { + blk_rq_init(NULL, clone); + clone->cmd_type = REQ_TYPE_FS; + clone->cmd_flags |= (REQ_HARDBARRIER | WRITE); + } else { + r = blk_rq_prep_clone(clone, rq, tio->md->bs, GFP_ATOMIC, + dm_rq_bio_constructor, tio); + if (r) + return r; + + clone->cmd = rq->cmd; + clone->cmd_len = rq->cmd_len; + clone->sense = rq->sense; + clone->buffer = rq->buffer; + } - clone->cmd = rq->cmd; - clone->cmd_len = rq->cmd_len; - clone->sense = rq->sense; - clone->buffer = rq->buffer; clone->end_io = end_clone_request; clone->end_io_data = tio; return 0; } -static int dm_rq_flush_suspending(struct mapped_device *md) +static struct request *clone_rq(struct request *rq, struct mapped_device *md, + gfp_t gfp_mask) { - return !md->suspend_rq.special; + struct request *clone; + struct dm_rq_target_io *tio; + + tio = alloc_rq_tio(md, gfp_mask); + if (!tio) + return NULL; + + tio->md = md; + tio->ti = NULL; + tio->orig = rq; + tio->error = 0; + memset(&tio->info, 0, sizeof(tio->info)); + + clone = &tio->clone; + if (setup_clone(clone, rq, tio)) { + /* -ENOMEM */ + free_rq_tio(tio); + return NULL; + } + + return clone; } /* @@ -1447,39 +1575,19 @@ static int dm_rq_flush_suspending(struct mapped_device *md) static int dm_prep_fn(struct request_queue *q, struct request *rq) { struct mapped_device *md = q->queuedata; - struct dm_rq_target_io *tio; struct request *clone; - if (unlikely(rq == &md->suspend_rq)) { - if (dm_rq_flush_suspending(md)) - return BLKPREP_OK; - else - /* The flush suspend was interrupted */ - return BLKPREP_KILL; - } + if (unlikely(dm_rq_is_flush_request(rq))) + return BLKPREP_OK; if (unlikely(rq->special)) { DMWARN("Already has something in rq->special."); return BLKPREP_KILL; } - tio = alloc_rq_tio(md); /* Only one for each original request */ - if (!tio) - /* -ENOMEM */ - return BLKPREP_DEFER; - - tio->md = md; - tio->ti = NULL; - tio->orig = rq; - tio->error = 0; - memset(&tio->info, 0, sizeof(tio->info)); - - clone = &tio->clone; - if (setup_clone(clone, rq, tio)) { - /* -ENOMEM */ - free_rq_tio(tio); + clone = clone_rq(rq, md, GFP_ATOMIC); + if (!clone) return BLKPREP_DEFER; - } rq->special = clone; rq->cmd_flags |= REQ_DONTPREP; @@ -1487,11 +1595,10 @@ static int dm_prep_fn(struct request_queue *q, struct request *rq) return BLKPREP_OK; } -static void map_request(struct dm_target *ti, struct request *rq, +static void map_request(struct dm_target *ti, struct request *clone, struct mapped_device *md) { int r; - struct request *clone = rq->special; struct dm_rq_target_io *tio = clone->end_io_data; /* @@ -1511,6 +1618,8 @@ static void map_request(struct dm_target *ti, struct request *rq, break; case DM_MAPIO_REMAPPED: /* The target has remapped the I/O so dispatch it */ + trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)), + blk_rq_pos(tio->orig)); dm_dispatch_request(clone); break; case DM_MAPIO_REQUEUE: @@ -1536,29 +1645,26 @@ static void map_request(struct dm_target *ti, struct request *rq, static void dm_request_fn(struct request_queue *q) { struct mapped_device *md = q->queuedata; - struct dm_table *map = dm_get_table(md); + struct dm_table *map = dm_get_live_table(md); struct dm_target *ti; - struct request *rq; + struct request *rq, *clone; /* - * For noflush suspend, check blk_queue_stopped() to immediately - * quit I/O dispatching. + * For suspend, check blk_queue_stopped() and increment + * ->pending within a single queue_lock not to increment the + * number of in-flight I/Os after the queue is stopped in + * dm_suspend(). */ while (!blk_queue_plugged(q) && !blk_queue_stopped(q)) { rq = blk_peek_request(q); if (!rq) goto plug_and_out; - if (unlikely(rq == &md->suspend_rq)) { /* Flush suspend maker */ - if (queue_in_flight(q)) - /* Not quiet yet. Wait more */ - goto plug_and_out; - - /* This device should be quiet now */ - __stop_queue(q); + if (unlikely(dm_rq_is_flush_request(rq))) { + BUG_ON(md->flush_request); + md->flush_request = rq; blk_start_request(rq); - __blk_end_request_all(rq, 0); - wake_up(&md->wait); + queue_work(md->wq, &md->barrier_work); goto out; } @@ -1567,8 +1673,11 @@ static void dm_request_fn(struct request_queue *q) goto plug_and_out; blk_start_request(rq); + clone = rq->special; + atomic_inc(&md->pending[rq_data_dir(clone)]); + spin_unlock(q->queue_lock); - map_request(ti, rq, md); + map_request(ti, clone, md); spin_lock_irq(q->queue_lock); } @@ -1595,7 +1704,7 @@ static int dm_lld_busy(struct request_queue *q) { int r; struct mapped_device *md = q->queuedata; - struct dm_table *map = dm_get_table(md); + struct dm_table *map = dm_get_live_table(md); if (!map || test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) r = 1; @@ -1610,7 +1719,7 @@ static int dm_lld_busy(struct request_queue *q) static void dm_unplug_all(struct request_queue *q) { struct mapped_device *md = q->queuedata; - struct dm_table *map = dm_get_table(md); + struct dm_table *map = dm_get_live_table(md); if (map) { if (dm_request_based(md)) @@ -1628,7 +1737,7 @@ static int dm_any_congested(void *congested_data, int bdi_bits) struct dm_table *map; if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { - map = dm_get_table(md); + map = dm_get_live_table(md); if (map) { /* * Request-based dm cares about only own queue for @@ -1725,6 +1834,7 @@ out: static const struct block_device_operations dm_blk_dops; static void dm_wq_work(struct work_struct *work); +static void dm_rq_barrier_work(struct work_struct *work); /* * Allocate and initialise a blank device with a given minor. @@ -1754,6 +1864,7 @@ static struct mapped_device *alloc_dev(int minor) init_rwsem(&md->io_lock); mutex_init(&md->suspend_lock); spin_lock_init(&md->deferred_lock); + spin_lock_init(&md->barrier_error_lock); rwlock_init(&md->map_lock); atomic_set(&md->holders, 1); atomic_set(&md->open_count, 0); @@ -1788,6 +1899,8 @@ static struct mapped_device *alloc_dev(int minor) blk_queue_softirq_done(md->queue, dm_softirq_done); blk_queue_prep_rq(md->queue, dm_prep_fn); blk_queue_lld_busy(md->queue, dm_lld_busy); + blk_queue_ordered(md->queue, QUEUE_ORDERED_DRAIN_FLUSH, + dm_rq_prepare_flush); md->disk = alloc_disk(1); if (!md->disk) @@ -1797,6 +1910,7 @@ static struct mapped_device *alloc_dev(int minor) atomic_set(&md->pending[1], 0); init_waitqueue_head(&md->wait); INIT_WORK(&md->work, dm_wq_work); + INIT_WORK(&md->barrier_work, dm_rq_barrier_work); init_waitqueue_head(&md->eventq); md->disk->major = _major; @@ -1921,9 +2035,13 @@ static void __set_size(struct mapped_device *md, sector_t size) mutex_unlock(&md->bdev->bd_inode->i_mutex); } -static int __bind(struct mapped_device *md, struct dm_table *t, - struct queue_limits *limits) +/* + * Returns old map, which caller must destroy. + */ +static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, + struct queue_limits *limits) { + struct dm_table *old_map; struct request_queue *q = md->queue; sector_t size; unsigned long flags; @@ -1938,11 +2056,6 @@ static int __bind(struct mapped_device *md, struct dm_table *t, __set_size(md, size); - if (!size) { - dm_table_destroy(t); - return 0; - } - dm_table_event_callback(t, event_callback, md); /* @@ -1958,26 +2071,31 @@ static int __bind(struct mapped_device *md, struct dm_table *t, __bind_mempools(md, t); write_lock_irqsave(&md->map_lock, flags); + old_map = md->map; md->map = t; dm_table_set_restrictions(t, q, limits); write_unlock_irqrestore(&md->map_lock, flags); - return 0; + return old_map; } -static void __unbind(struct mapped_device *md) +/* + * Returns unbound table for the caller to free. + */ +static struct dm_table *__unbind(struct mapped_device *md) { struct dm_table *map = md->map; unsigned long flags; if (!map) - return; + return NULL; dm_table_event_callback(map, NULL, NULL); write_lock_irqsave(&md->map_lock, flags); md->map = NULL; write_unlock_irqrestore(&md->map_lock, flags); - dm_table_destroy(map); + + return map; } /* @@ -2059,18 +2177,18 @@ void dm_put(struct mapped_device *md) BUG_ON(test_bit(DMF_FREEING, &md->flags)); if (atomic_dec_and_lock(&md->holders, &_minor_lock)) { - map = dm_get_table(md); + map = dm_get_live_table(md); idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md)))); set_bit(DMF_FREEING, &md->flags); spin_unlock(&_minor_lock); - if (!dm_suspended(md)) { + if (!dm_suspended_md(md)) { dm_table_presuspend_targets(map); dm_table_postsuspend_targets(map); } dm_sysfs_exit(md); dm_table_put(map); - __unbind(md); + dm_table_destroy(__unbind(md)); free_dev(md); } } @@ -2080,8 +2198,6 @@ static int dm_wait_for_completion(struct mapped_device *md, int interruptible) { int r = 0; DECLARE_WAITQUEUE(wait, current); - struct request_queue *q = md->queue; - unsigned long flags; dm_unplug_all(md->queue); @@ -2091,15 +2207,7 @@ static int dm_wait_for_completion(struct mapped_device *md, int interruptible) set_current_state(interruptible); smp_mb(); - if (dm_request_based(md)) { - spin_lock_irqsave(q->queue_lock, flags); - if (!queue_in_flight(q) && blk_queue_stopped(q)) { - spin_unlock_irqrestore(q->queue_lock, flags); - break; - } - spin_unlock_irqrestore(q->queue_lock, flags); - } else if (!atomic_read(&md->pending[0]) && - !atomic_read(&md->pending[1])) + if (!md_in_flight(md)) break; if (interruptible == TASK_INTERRUPTIBLE && @@ -2194,98 +2302,106 @@ static void dm_queue_flush(struct mapped_device *md) queue_work(md->wq, &md->work); } -/* - * Swap in a new table (destroying old one). - */ -int dm_swap_table(struct mapped_device *md, struct dm_table *table) +static void dm_rq_set_flush_nr(struct request *clone, unsigned flush_nr) { - struct queue_limits limits; - int r = -EINVAL; + struct dm_rq_target_io *tio = clone->end_io_data; - mutex_lock(&md->suspend_lock); + tio->info.flush_request = flush_nr; +} - /* device must be suspended */ - if (!dm_suspended(md)) - goto out; +/* Issue barrier requests to targets and wait for their completion. */ +static int dm_rq_barrier(struct mapped_device *md) +{ + int i, j; + struct dm_table *map = dm_get_live_table(md); + unsigned num_targets = dm_table_get_num_targets(map); + struct dm_target *ti; + struct request *clone; - r = dm_calculate_queue_limits(table, &limits); - if (r) - goto out; + md->barrier_error = 0; - /* cannot change the device type, once a table is bound */ - if (md->map && - (dm_table_get_type(md->map) != dm_table_get_type(table))) { - DMWARN("can't change the device type after a table is bound"); - goto out; + for (i = 0; i < num_targets; i++) { + ti = dm_table_get_target(map, i); + for (j = 0; j < ti->num_flush_requests; j++) { + clone = clone_rq(md->flush_request, md, GFP_NOIO); + dm_rq_set_flush_nr(clone, j); + atomic_inc(&md->pending[rq_data_dir(clone)]); + map_request(ti, clone, md); + } } - __unbind(md); - r = __bind(md, table, &limits); - -out: - mutex_unlock(&md->suspend_lock); - return r; -} + dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE); + dm_table_put(map); -static void dm_rq_invalidate_suspend_marker(struct mapped_device *md) -{ - md->suspend_rq.special = (void *)0x1; + return md->barrier_error; } -static void dm_rq_abort_suspend(struct mapped_device *md, int noflush) +static void dm_rq_barrier_work(struct work_struct *work) { + int error; + struct mapped_device *md = container_of(work, struct mapped_device, + barrier_work); struct request_queue *q = md->queue; + struct request *rq; unsigned long flags; - spin_lock_irqsave(q->queue_lock, flags); - if (!noflush) - dm_rq_invalidate_suspend_marker(md); - __start_queue(q); - spin_unlock_irqrestore(q->queue_lock, flags); -} + /* + * Hold the md reference here and leave it at the last part so that + * the md can't be deleted by device opener when the barrier request + * completes. + */ + dm_get(md); -static void dm_rq_start_suspend(struct mapped_device *md, int noflush) -{ - struct request *rq = &md->suspend_rq; - struct request_queue *q = md->queue; + error = dm_rq_barrier(md); - if (noflush) - stop_queue(q); - else { - blk_rq_init(q, rq); - blk_insert_request(q, rq, 0, NULL); - } + rq = md->flush_request; + md->flush_request = NULL; + + if (error == DM_ENDIO_REQUEUE) { + spin_lock_irqsave(q->queue_lock, flags); + blk_requeue_request(q, rq); + spin_unlock_irqrestore(q->queue_lock, flags); + } else + blk_end_request_all(rq, error); + + blk_run_queue(q); + + dm_put(md); } -static int dm_rq_suspend_available(struct mapped_device *md, int noflush) +/* + * Swap in a new table, returning the old one for the caller to destroy. + */ +struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table) { - int r = 1; - struct request *rq = &md->suspend_rq; - struct request_queue *q = md->queue; - unsigned long flags; + struct dm_table *map = ERR_PTR(-EINVAL); + struct queue_limits limits; + int r; - if (noflush) - return r; + mutex_lock(&md->suspend_lock); - /* The marker must be protected by queue lock if it is in use */ - spin_lock_irqsave(q->queue_lock, flags); - if (unlikely(rq->ref_count)) { - /* - * This can happen, when the previous flush suspend was - * interrupted, the marker is still in the queue and - * this flush suspend has been invoked, because we don't - * remove the marker at the time of suspend interruption. - * We have only one marker per mapped_device, so we can't - * start another flush suspend while it is in use. - */ - BUG_ON(!rq->special); /* The marker should be invalidated */ - DMWARN("Invalidating the previous flush suspend is still in" - " progress. Please retry later."); - r = 0; + /* device must be suspended */ + if (!dm_suspended_md(md)) + goto out; + + r = dm_calculate_queue_limits(table, &limits); + if (r) { + map = ERR_PTR(r); + goto out; } - spin_unlock_irqrestore(q->queue_lock, flags); - return r; + /* cannot change the device type, once a table is bound */ + if (md->map && + (dm_table_get_type(md->map) != dm_table_get_type(table))) { + DMWARN("can't change the device type after a table is bound"); + goto out; + } + + map = __bind(md, table, &limits); + +out: + mutex_unlock(&md->suspend_lock); + return map; } /* @@ -2330,49 +2446,11 @@ static void unlock_fs(struct mapped_device *md) /* * Suspend mechanism in request-based dm. * - * After the suspend starts, further incoming requests are kept in - * the request_queue and deferred. - * Remaining requests in the request_queue at the start of suspend are flushed - * if it is flush suspend. - * The suspend completes when the following conditions have been satisfied, - * so wait for it: - * 1. q->in_flight is 0 (which means no in_flight request) - * 2. queue has been stopped (which means no request dispatching) - * + * 1. Flush all I/Os by lock_fs() if needed. + * 2. Stop dispatching any I/O by stopping the request_queue. + * 3. Wait for all in-flight I/Os to be completed or requeued. * - * Noflush suspend - * --------------- - * Noflush suspend doesn't need to dispatch remaining requests. - * So stop the queue immediately. Then, wait for all in_flight requests - * to be completed or requeued. - * - * To abort noflush suspend, start the queue. - * - * - * Flush suspend - * ------------- - * Flush suspend needs to dispatch remaining requests. So stop the queue - * after the remaining requests are completed. (Requeued request must be also - * re-dispatched and completed. Until then, we can't stop the queue.) - * - * During flushing the remaining requests, further incoming requests are also - * inserted to the same queue. To distinguish which requests are to be - * flushed, we insert a marker request to the queue at the time of starting - * flush suspend, like a barrier. - * The dispatching is blocked when the marker is found on the top of the queue. - * And the queue is stopped when all in_flight requests are completed, since - * that means the remaining requests are completely flushed. - * Then, the marker is removed from the queue. - * - * To abort flush suspend, we also need to take care of the marker, not only - * starting the queue. - * We don't remove the marker forcibly from the queue since it's against - * the block-layer manner. Instead, we put a invalidated mark on the marker. - * When the invalidated marker is found on the top of the queue, it is - * immediately removed from the queue, so it doesn't block dispatching. - * Because we have only one marker per mapped_device, we can't start another - * flush suspend until the invalidated marker is removed from the queue. - * So fail and return with -EBUSY in such a case. + * To abort suspend, start the request_queue. */ int dm_suspend(struct mapped_device *md, unsigned suspend_flags) { @@ -2383,17 +2461,12 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags) mutex_lock(&md->suspend_lock); - if (dm_suspended(md)) { + if (dm_suspended_md(md)) { r = -EINVAL; goto out_unlock; } - if (dm_request_based(md) && !dm_rq_suspend_available(md, noflush)) { - r = -EBUSY; - goto out_unlock; - } - - map = dm_get_table(md); + map = dm_get_live_table(md); /* * DMF_NOFLUSH_SUSPENDING must be set before presuspend. @@ -2406,8 +2479,10 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags) dm_table_presuspend_targets(map); /* - * Flush I/O to the device. noflush supersedes do_lockfs, - * because lock_fs() needs to flush I/Os. + * Flush I/O to the device. + * Any I/O submitted after lock_fs() may not be flushed. + * noflush takes precedence over do_lockfs. + * (lock_fs() flushes I/Os and waits for them to complete.) */ if (!noflush && do_lockfs) { r = lock_fs(md); @@ -2436,10 +2511,15 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags) set_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags); up_write(&md->io_lock); - flush_workqueue(md->wq); - + /* + * Request-based dm uses md->wq for barrier (dm_rq_barrier_work) which + * can be kicked until md->queue is stopped. So stop md->queue before + * flushing md->wq. + */ if (dm_request_based(md)) - dm_rq_start_suspend(md, noflush); + stop_queue(md->queue); + + flush_workqueue(md->wq); /* * At this point no more requests are entering target request routines. @@ -2458,7 +2538,7 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags) dm_queue_flush(md); if (dm_request_based(md)) - dm_rq_abort_suspend(md, noflush); + start_queue(md->queue); unlock_fs(md); goto out; /* pushback list is already flushed, so skip flush */ @@ -2470,10 +2550,10 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags) * requests are being added to md->deferred list. */ - dm_table_postsuspend_targets(map); - set_bit(DMF_SUSPENDED, &md->flags); + dm_table_postsuspend_targets(map); + out: dm_table_put(map); @@ -2488,10 +2568,10 @@ int dm_resume(struct mapped_device *md) struct dm_table *map = NULL; mutex_lock(&md->suspend_lock); - if (!dm_suspended(md)) + if (!dm_suspended_md(md)) goto out; - map = dm_get_table(md); + map = dm_get_live_table(md); if (!map || !dm_table_get_size(map)) goto out; @@ -2592,18 +2672,29 @@ struct mapped_device *dm_get_from_kobject(struct kobject *kobj) return NULL; if (test_bit(DMF_FREEING, &md->flags) || - test_bit(DMF_DELETING, &md->flags)) + dm_deleting_md(md)) return NULL; dm_get(md); return md; } -int dm_suspended(struct mapped_device *md) +int dm_suspended_md(struct mapped_device *md) { return test_bit(DMF_SUSPENDED, &md->flags); } +int dm_suspended(struct dm_target *ti) +{ + struct mapped_device *md = dm_table_get_md(ti->table); + int r = dm_suspended_md(md); + + dm_put(md); + + return r; +} +EXPORT_SYMBOL_GPL(dm_suspended); + int dm_noflush_suspending(struct dm_target *ti) { struct mapped_device *md = dm_table_get_md(ti->table); diff --git a/drivers/md/dm.h b/drivers/md/dm.h index a7663eba17e..8dadaa5bc39 100644 --- a/drivers/md/dm.h +++ b/drivers/md/dm.h @@ -89,6 +89,16 @@ int dm_target_iterate(void (*iter_func)(struct target_type *tt, int dm_split_args(int *argc, char ***argvp, char *input); /* + * Is this mapped_device being deleted? + */ +int dm_deleting_md(struct mapped_device *md); + +/* + * Is this mapped_device suspended? + */ +int dm_suspended_md(struct mapped_device *md); + +/* * The device-mapper can be driven through one of two interfaces; * ioctl or filesystem, depending which patch you have applied. */ @@ -118,6 +128,9 @@ int dm_lock_for_deletion(struct mapped_device *md); void dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, unsigned cookie); +int dm_io_init(void); +void dm_io_exit(void); + int dm_kcopyd_init(void); void dm_kcopyd_exit(void); diff --git a/drivers/md/faulty.c b/drivers/md/faulty.c index 87d88dbb667..713acd02ab3 100644 --- a/drivers/md/faulty.c +++ b/drivers/md/faulty.c @@ -360,6 +360,7 @@ static void raid_exit(void) module_init(raid_init); module_exit(raid_exit); MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Fault injection personality for MD"); MODULE_ALIAS("md-personality-10"); /* faulty */ MODULE_ALIAS("md-faulty"); MODULE_ALIAS("md-level--5"); diff --git a/drivers/md/linear.c b/drivers/md/linear.c index 1ceceb334d5..00435bd2069 100644 --- a/drivers/md/linear.c +++ b/drivers/md/linear.c @@ -292,7 +292,7 @@ static int linear_make_request (struct request_queue *q, struct bio *bio) int cpu; if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) { - bio_endio(bio, -EOPNOTSUPP); + md_barrier_request(mddev, bio); return 0; } @@ -383,6 +383,7 @@ static void linear_exit (void) module_init(linear_init); module_exit(linear_exit); MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Linear device concatenation personality for MD"); MODULE_ALIAS("md-personality-1"); /* LINEAR - deprecated*/ MODULE_ALIAS("md-linear"); MODULE_ALIAS("md-level--1"); diff --git a/drivers/md/md.c b/drivers/md/md.c index b182f86a19d..f4f5f82f9f5 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -39,11 +39,13 @@ #include <linux/buffer_head.h> /* for invalidate_bdev */ #include <linux/poll.h> #include <linux/ctype.h> +#include <linux/string.h> #include <linux/hdreg.h> #include <linux/proc_fs.h> #include <linux/random.h> #include <linux/reboot.h> #include <linux/file.h> +#include <linux/compat.h> #include <linux/delay.h> #include <linux/raid/md_p.h> #include <linux/raid/md_u.h> @@ -68,6 +70,12 @@ static DECLARE_WAIT_QUEUE_HEAD(resync_wait); #define MD_BUG(x...) { printk("md: bug in file %s, line %d\n", __FILE__, __LINE__); md_print_devices(); } /* + * Default number of read corrections we'll attempt on an rdev + * before ejecting it from the array. We divide the read error + * count by 2 for every hour elapsed between read errors. + */ +#define MD_DEFAULT_MAX_CORRECTED_READ_ERRORS 20 +/* * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit' * is 1000 KB/sec, so the extra system load does not show up that much. * Increase it if you want to have more _guaranteed_ speed. Note that @@ -98,44 +106,40 @@ static struct ctl_table_header *raid_table_header; static ctl_table raid_table[] = { { - .ctl_name = DEV_RAID_SPEED_LIMIT_MIN, .procname = "speed_limit_min", .data = &sysctl_speed_limit_min, .maxlen = sizeof(int), .mode = S_IRUGO|S_IWUSR, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { - .ctl_name = DEV_RAID_SPEED_LIMIT_MAX, .procname = "speed_limit_max", .data = &sysctl_speed_limit_max, .maxlen = sizeof(int), .mode = S_IRUGO|S_IWUSR, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, - { .ctl_name = 0 } + { } }; static ctl_table raid_dir_table[] = { { - .ctl_name = DEV_RAID, .procname = "raid", .maxlen = 0, .mode = S_IRUGO|S_IXUGO, .child = raid_table, }, - { .ctl_name = 0 } + { } }; static ctl_table raid_root_table[] = { { - .ctl_name = CTL_DEV, .procname = "dev", .maxlen = 0, .mode = 0555, .child = raid_dir_table, }, - { .ctl_name = 0 } + { } }; static const struct block_device_operations md_fops; @@ -217,12 +221,12 @@ static int md_make_request(struct request_queue *q, struct bio *bio) return 0; } rcu_read_lock(); - if (mddev->suspended) { + if (mddev->suspended || mddev->barrier) { DEFINE_WAIT(__wait); for (;;) { prepare_to_wait(&mddev->sb_wait, &__wait, TASK_UNINTERRUPTIBLE); - if (!mddev->suspended) + if (!mddev->suspended && !mddev->barrier) break; rcu_read_unlock(); schedule(); @@ -264,10 +268,110 @@ static void mddev_resume(mddev_t *mddev) int mddev_congested(mddev_t *mddev, int bits) { + if (mddev->barrier) + return 1; return mddev->suspended; } EXPORT_SYMBOL(mddev_congested); +/* + * Generic barrier handling for md + */ + +#define POST_REQUEST_BARRIER ((void*)1) + +static void md_end_barrier(struct bio *bio, int err) +{ + mdk_rdev_t *rdev = bio->bi_private; + mddev_t *mddev = rdev->mddev; + if (err == -EOPNOTSUPP && mddev->barrier != POST_REQUEST_BARRIER) + set_bit(BIO_EOPNOTSUPP, &mddev->barrier->bi_flags); + + rdev_dec_pending(rdev, mddev); + + if (atomic_dec_and_test(&mddev->flush_pending)) { + if (mddev->barrier == POST_REQUEST_BARRIER) { + /* This was a post-request barrier */ + mddev->barrier = NULL; + wake_up(&mddev->sb_wait); + } else + /* The pre-request barrier has finished */ + schedule_work(&mddev->barrier_work); + } + bio_put(bio); +} + +static void submit_barriers(mddev_t *mddev) +{ + mdk_rdev_t *rdev; + + rcu_read_lock(); + list_for_each_entry_rcu(rdev, &mddev->disks, same_set) + if (rdev->raid_disk >= 0 && + !test_bit(Faulty, &rdev->flags)) { + /* Take two references, one is dropped + * when request finishes, one after + * we reclaim rcu_read_lock + */ + struct bio *bi; + atomic_inc(&rdev->nr_pending); + atomic_inc(&rdev->nr_pending); + rcu_read_unlock(); + bi = bio_alloc(GFP_KERNEL, 0); + bi->bi_end_io = md_end_barrier; + bi->bi_private = rdev; + bi->bi_bdev = rdev->bdev; + atomic_inc(&mddev->flush_pending); + submit_bio(WRITE_BARRIER, bi); + rcu_read_lock(); + rdev_dec_pending(rdev, mddev); + } + rcu_read_unlock(); +} + +static void md_submit_barrier(struct work_struct *ws) +{ + mddev_t *mddev = container_of(ws, mddev_t, barrier_work); + struct bio *bio = mddev->barrier; + + atomic_set(&mddev->flush_pending, 1); + + if (test_bit(BIO_EOPNOTSUPP, &bio->bi_flags)) + bio_endio(bio, -EOPNOTSUPP); + else if (bio->bi_size == 0) + /* an empty barrier - all done */ + bio_endio(bio, 0); + else { + bio->bi_rw &= ~(1<<BIO_RW_BARRIER); + if (mddev->pers->make_request(mddev->queue, bio)) + generic_make_request(bio); + mddev->barrier = POST_REQUEST_BARRIER; + submit_barriers(mddev); + } + if (atomic_dec_and_test(&mddev->flush_pending)) { + mddev->barrier = NULL; + wake_up(&mddev->sb_wait); + } +} + +void md_barrier_request(mddev_t *mddev, struct bio *bio) +{ + spin_lock_irq(&mddev->write_lock); + wait_event_lock_irq(mddev->sb_wait, + !mddev->barrier, + mddev->write_lock, /*nothing*/); + mddev->barrier = bio; + spin_unlock_irq(&mddev->write_lock); + + atomic_set(&mddev->flush_pending, 1); + INIT_WORK(&mddev->barrier_work, md_submit_barrier); + + submit_barriers(mddev); + + if (atomic_dec_and_test(&mddev->flush_pending)) + schedule_work(&mddev->barrier_work); +} +EXPORT_SYMBOL(md_barrier_request); static inline mddev_t *mddev_get(mddev_t *mddev) { @@ -367,6 +471,7 @@ static mddev_t * mddev_find(dev_t unit) mutex_init(&new->open_mutex); mutex_init(&new->reconfig_mutex); + mutex_init(&new->bitmap_info.mutex); INIT_LIST_HEAD(&new->disks); INIT_LIST_HEAD(&new->all_mddevs); init_timer(&new->safemode_timer); @@ -374,6 +479,7 @@ static mddev_t * mddev_find(dev_t unit) atomic_set(&new->openers, 0); atomic_set(&new->active_io, 0); spin_lock_init(&new->write_lock); + atomic_set(&new->flush_pending, 0); init_waitqueue_head(&new->sb_wait); init_waitqueue_head(&new->recovery_wait); new->reshape_position = MaxSector; @@ -752,7 +858,7 @@ struct super_type { */ int md_check_no_bitmap(mddev_t *mddev) { - if (!mddev->bitmap_file && !mddev->bitmap_offset) + if (!mddev->bitmap_info.file && !mddev->bitmap_info.offset) return 0; printk(KERN_ERR "%s: bitmaps are not supported for %s\n", mdname(mddev), mddev->pers->name); @@ -880,8 +986,8 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev) mddev->raid_disks = sb->raid_disks; mddev->dev_sectors = sb->size * 2; mddev->events = ev1; - mddev->bitmap_offset = 0; - mddev->default_bitmap_offset = MD_SB_BYTES >> 9; + mddev->bitmap_info.offset = 0; + mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9; if (mddev->minor_version >= 91) { mddev->reshape_position = sb->reshape_position; @@ -915,8 +1021,9 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev) mddev->max_disks = MD_SB_DISKS; if (sb->state & (1<<MD_SB_BITMAP_PRESENT) && - mddev->bitmap_file == NULL) - mddev->bitmap_offset = mddev->default_bitmap_offset; + mddev->bitmap_info.file == NULL) + mddev->bitmap_info.offset = + mddev->bitmap_info.default_offset; } else if (mddev->pers == NULL) { /* Insist on good event counter while assembling */ @@ -1033,7 +1140,7 @@ static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev) sb->layout = mddev->layout; sb->chunk_size = mddev->chunk_sectors << 9; - if (mddev->bitmap && mddev->bitmap_file == NULL) + if (mddev->bitmap && mddev->bitmap_info.file == NULL) sb->state |= (1<<MD_SB_BITMAP_PRESENT); sb->disks[0].state = (1<<MD_DISK_REMOVED); @@ -1111,7 +1218,7 @@ super_90_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors) { if (num_sectors && num_sectors < rdev->mddev->dev_sectors) return 0; /* component must fit device */ - if (rdev->mddev->bitmap_offset) + if (rdev->mddev->bitmap_info.offset) return 0; /* can't move bitmap */ rdev->sb_start = calc_dev_sboffset(rdev->bdev); if (!num_sectors || num_sectors > rdev->sb_start) @@ -1290,8 +1397,8 @@ static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev) mddev->raid_disks = le32_to_cpu(sb->raid_disks); mddev->dev_sectors = le64_to_cpu(sb->size); mddev->events = ev1; - mddev->bitmap_offset = 0; - mddev->default_bitmap_offset = 1024 >> 9; + mddev->bitmap_info.offset = 0; + mddev->bitmap_info.default_offset = 1024 >> 9; mddev->recovery_cp = le64_to_cpu(sb->resync_offset); memcpy(mddev->uuid, sb->set_uuid, 16); @@ -1299,8 +1406,9 @@ static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev) mddev->max_disks = (4096-256)/2; if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) && - mddev->bitmap_file == NULL ) - mddev->bitmap_offset = (__s32)le32_to_cpu(sb->bitmap_offset); + mddev->bitmap_info.file == NULL ) + mddev->bitmap_info.offset = + (__s32)le32_to_cpu(sb->bitmap_offset); if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) { mddev->reshape_position = le64_to_cpu(sb->reshape_position); @@ -1394,19 +1502,17 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev) sb->level = cpu_to_le32(mddev->level); sb->layout = cpu_to_le32(mddev->layout); - if (mddev->bitmap && mddev->bitmap_file == NULL) { - sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_offset); + if (mddev->bitmap && mddev->bitmap_info.file == NULL) { + sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset); sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET); } if (rdev->raid_disk >= 0 && !test_bit(In_sync, &rdev->flags)) { - if (rdev->recovery_offset > 0) { - sb->feature_map |= - cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET); - sb->recovery_offset = - cpu_to_le64(rdev->recovery_offset); - } + sb->feature_map |= + cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET); + sb->recovery_offset = + cpu_to_le64(rdev->recovery_offset); } if (mddev->reshape_position != MaxSector) { @@ -1440,7 +1546,7 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev) sb->dev_roles[i] = cpu_to_le16(0xfffe); else if (test_bit(In_sync, &rdev2->flags)) sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk); - else if (rdev2->raid_disk >= 0 && rdev2->recovery_offset > 0) + else if (rdev2->raid_disk >= 0) sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk); else sb->dev_roles[i] = cpu_to_le16(0xffff); @@ -1462,7 +1568,7 @@ super_1_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors) max_sectors -= rdev->data_offset; if (!num_sectors || num_sectors > max_sectors) num_sectors = max_sectors; - } else if (rdev->mddev->bitmap_offset) { + } else if (rdev->mddev->bitmap_info.offset) { /* minor version 0 with bitmap we can't move */ return 0; } else { @@ -1830,15 +1936,11 @@ static void print_sb_1(struct mdp_superblock_1 *sb) uuid = sb->set_uuid; printk(KERN_INFO - "md: SB: (V:%u) (F:0x%08x) Array-ID:<%02x%02x%02x%02x" - ":%02x%02x:%02x%02x:%02x%02x:%02x%02x%02x%02x%02x%02x>\n" + "md: SB: (V:%u) (F:0x%08x) Array-ID:<%pU>\n" "md: Name: \"%s\" CT:%llu\n", le32_to_cpu(sb->major_version), le32_to_cpu(sb->feature_map), - uuid[0], uuid[1], uuid[2], uuid[3], - uuid[4], uuid[5], uuid[6], uuid[7], - uuid[8], uuid[9], uuid[10], uuid[11], - uuid[12], uuid[13], uuid[14], uuid[15], + uuid, sb->set_name, (unsigned long long)le64_to_cpu(sb->ctime) & MD_SUPERBLOCK_1_TIME_SEC_MASK); @@ -1847,8 +1949,7 @@ static void print_sb_1(struct mdp_superblock_1 *sb) printk(KERN_INFO "md: L%u SZ%llu RD:%u LO:%u CS:%u DO:%llu DS:%llu SO:%llu" " RO:%llu\n" - "md: Dev:%08x UUID: %02x%02x%02x%02x:%02x%02x:%02x%02x:%02x%02x" - ":%02x%02x%02x%02x%02x%02x\n" + "md: Dev:%08x UUID: %pU\n" "md: (F:0x%08x) UT:%llu Events:%llu ResyncOffset:%llu CSUM:0x%08x\n" "md: (MaxDev:%u) \n", le32_to_cpu(sb->level), @@ -1861,10 +1962,7 @@ static void print_sb_1(struct mdp_superblock_1 *sb) (unsigned long long)le64_to_cpu(sb->super_offset), (unsigned long long)le64_to_cpu(sb->recovery_offset), le32_to_cpu(sb->dev_number), - uuid[0], uuid[1], uuid[2], uuid[3], - uuid[4], uuid[5], uuid[6], uuid[7], - uuid[8], uuid[9], uuid[10], uuid[11], - uuid[12], uuid[13], uuid[14], uuid[15], + uuid, sb->devflags, (unsigned long long)le64_to_cpu(sb->utime) & MD_SUPERBLOCK_1_TIME_SEC_MASK, (unsigned long long)le64_to_cpu(sb->events), @@ -2446,12 +2544,49 @@ rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len) static struct rdev_sysfs_entry rdev_size = __ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store); + +static ssize_t recovery_start_show(mdk_rdev_t *rdev, char *page) +{ + unsigned long long recovery_start = rdev->recovery_offset; + + if (test_bit(In_sync, &rdev->flags) || + recovery_start == MaxSector) + return sprintf(page, "none\n"); + + return sprintf(page, "%llu\n", recovery_start); +} + +static ssize_t recovery_start_store(mdk_rdev_t *rdev, const char *buf, size_t len) +{ + unsigned long long recovery_start; + + if (cmd_match(buf, "none")) + recovery_start = MaxSector; + else if (strict_strtoull(buf, 10, &recovery_start)) + return -EINVAL; + + if (rdev->mddev->pers && + rdev->raid_disk >= 0) + return -EBUSY; + + rdev->recovery_offset = recovery_start; + if (recovery_start == MaxSector) + set_bit(In_sync, &rdev->flags); + else + clear_bit(In_sync, &rdev->flags); + return len; +} + +static struct rdev_sysfs_entry rdev_recovery_start = +__ATTR(recovery_start, S_IRUGO|S_IWUSR, recovery_start_show, recovery_start_store); + static struct attribute *rdev_default_attrs[] = { &rdev_state.attr, &rdev_errors.attr, &rdev_slot.attr, &rdev_offset.attr, &rdev_size.attr, + &rdev_recovery_start.attr, NULL, }; static ssize_t @@ -2553,6 +2688,8 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi rdev->flags = 0; rdev->data_offset = 0; rdev->sb_events = 0; + rdev->last_read_error.tv_sec = 0; + rdev->last_read_error.tv_nsec = 0; atomic_set(&rdev->nr_pending, 0); atomic_set(&rdev->read_errors, 0); atomic_set(&rdev->corrected_errors, 0); @@ -2663,6 +2800,47 @@ static void analyze_sbs(mddev_t * mddev) } } +/* Read a fixed-point number. + * Numbers in sysfs attributes should be in "standard" units where + * possible, so time should be in seconds. + * However we internally use a a much smaller unit such as + * milliseconds or jiffies. + * This function takes a decimal number with a possible fractional + * component, and produces an integer which is the result of + * multiplying that number by 10^'scale'. + * all without any floating-point arithmetic. + */ +int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale) +{ + unsigned long result = 0; + long decimals = -1; + while (isdigit(*cp) || (*cp == '.' && decimals < 0)) { + if (*cp == '.') + decimals = 0; + else if (decimals < scale) { + unsigned int value; + value = *cp - '0'; + result = result * 10 + value; + if (decimals >= 0) + decimals++; + } + cp++; + } + if (*cp == '\n') + cp++; + if (*cp) + return -EINVAL; + if (decimals < 0) + decimals = 0; + while (decimals < scale) { + result *= 10; + decimals ++; + } + *res = result; + return 0; +} + + static void md_safemode_timeout(unsigned long data); static ssize_t @@ -2674,31 +2852,10 @@ safe_delay_show(mddev_t *mddev, char *page) static ssize_t safe_delay_store(mddev_t *mddev, const char *cbuf, size_t len) { - int scale=1; - int dot=0; - int i; unsigned long msec; - char buf[30]; - /* remove a period, and count digits after it */ - if (len >= sizeof(buf)) - return -EINVAL; - strlcpy(buf, cbuf, sizeof(buf)); - for (i=0; i<len; i++) { - if (dot) { - if (isdigit(buf[i])) { - buf[i-1] = buf[i]; - scale *= 10; - } - buf[i] = 0; - } else if (buf[i] == '.') { - dot=1; - buf[i] = 0; - } - } - if (strict_strtoul(buf, 10, &msec) < 0) + if (strict_strtoul_scaled(cbuf, &msec, 3) < 0) return -EINVAL; - msec = (msec * 1000) / scale; if (msec == 0) mddev->safemode_delay = 0; else { @@ -2974,7 +3131,9 @@ resync_start_store(mddev_t *mddev, const char *buf, size_t len) if (mddev->pers) return -EBUSY; - if (!*buf || (*e && *e != '\n')) + if (cmd_match(buf, "none")) + n = MaxSector; + else if (!*buf || (*e && *e != '\n')) return -EINVAL; mddev->recovery_cp = n; @@ -3170,6 +3329,29 @@ static struct md_sysfs_entry md_array_state = __ATTR(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store); static ssize_t +max_corrected_read_errors_show(mddev_t *mddev, char *page) { + return sprintf(page, "%d\n", + atomic_read(&mddev->max_corr_read_errors)); +} + +static ssize_t +max_corrected_read_errors_store(mddev_t *mddev, const char *buf, size_t len) +{ + char *e; + unsigned long n = simple_strtoul(buf, &e, 10); + + if (*buf && (*e == 0 || *e == '\n')) { + atomic_set(&mddev->max_corr_read_errors, n); + return len; + } + return -EINVAL; +} + +static struct md_sysfs_entry max_corr_read_errors = +__ATTR(max_read_errors, S_IRUGO|S_IWUSR, max_corrected_read_errors_show, + max_corrected_read_errors_store); + +static ssize_t null_show(mddev_t *mddev, char *page) { return -EINVAL; @@ -3250,8 +3432,7 @@ bitmap_store(mddev_t *mddev, const char *buf, size_t len) } if (*end && !isspace(*end)) break; bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk); - buf = end; - while (isspace(*buf)) buf++; + buf = skip_spaces(end); } bitmap_unplug(mddev->bitmap); /* flush the bits to disk */ out: @@ -3794,6 +3975,7 @@ static struct attribute *md_default_attrs[] = { &md_array_state.attr, &md_reshape_position.attr, &md_array_size.attr, + &max_corr_read_errors.attr, NULL, }; @@ -3898,6 +4080,7 @@ static void mddev_delayed_delete(struct work_struct *ws) mddev->sysfs_action = NULL; mddev->private = NULL; } + sysfs_remove_group(&mddev->kobj, &md_bitmap_group); kobject_del(&mddev->kobj); kobject_put(&mddev->kobj); } @@ -3989,6 +4172,8 @@ static int md_alloc(dev_t dev, char *name) disk->disk_name); error = 0; } + if (sysfs_create_group(&mddev->kobj, &md_bitmap_group)) + printk(KERN_DEBUG "pointless warning\n"); abort: mutex_unlock(&disks_mutex); if (!error) { @@ -4210,6 +4395,8 @@ static int do_md_run(mddev_t * mddev) mddev->ro = 0; atomic_set(&mddev->writes_pending,0); + atomic_set(&mddev->max_corr_read_errors, + MD_DEFAULT_MAX_CORRECTED_READ_ERRORS); mddev->safemode = 0; mddev->safemode_timer.function = md_safemode_timeout; mddev->safemode_timer.data = (unsigned long) mddev; @@ -4314,7 +4501,7 @@ static int deny_bitmap_write_access(struct file * file) return 0; } -static void restore_bitmap_write_access(struct file *file) +void restore_bitmap_write_access(struct file *file) { struct inode *inode = file->f_mapping->host; @@ -4409,12 +4596,12 @@ out: printk(KERN_INFO "md: %s stopped.\n", mdname(mddev)); bitmap_destroy(mddev); - if (mddev->bitmap_file) { - restore_bitmap_write_access(mddev->bitmap_file); - fput(mddev->bitmap_file); - mddev->bitmap_file = NULL; + if (mddev->bitmap_info.file) { + restore_bitmap_write_access(mddev->bitmap_info.file); + fput(mddev->bitmap_info.file); + mddev->bitmap_info.file = NULL; } - mddev->bitmap_offset = 0; + mddev->bitmap_info.offset = 0; /* make sure all md_delayed_delete calls have finished */ flush_scheduled_work(); @@ -4455,6 +4642,11 @@ out: mddev->degraded = 0; mddev->barriers_work = 0; mddev->safemode = 0; + mddev->bitmap_info.offset = 0; + mddev->bitmap_info.default_offset = 0; + mddev->bitmap_info.chunksize = 0; + mddev->bitmap_info.daemon_sleep = 0; + mddev->bitmap_info.max_write_behind = 0; kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE); if (mddev->hold_active == UNTIL_STOP) mddev->hold_active = 0; @@ -4640,7 +4832,7 @@ static int get_array_info(mddev_t * mddev, void __user * arg) info.state = 0; if (mddev->in_sync) info.state = (1<<MD_SB_CLEAN); - if (mddev->bitmap && mddev->bitmap_offset) + if (mddev->bitmap && mddev->bitmap_info.offset) info.state = (1<<MD_SB_BITMAP_PRESENT); info.active_disks = insync; info.working_disks = working; @@ -4998,23 +5190,23 @@ static int set_bitmap_file(mddev_t *mddev, int fd) if (fd >= 0) { if (mddev->bitmap) return -EEXIST; /* cannot add when bitmap is present */ - mddev->bitmap_file = fget(fd); + mddev->bitmap_info.file = fget(fd); - if (mddev->bitmap_file == NULL) { + if (mddev->bitmap_info.file == NULL) { printk(KERN_ERR "%s: error: failed to get bitmap file\n", mdname(mddev)); return -EBADF; } - err = deny_bitmap_write_access(mddev->bitmap_file); + err = deny_bitmap_write_access(mddev->bitmap_info.file); if (err) { printk(KERN_ERR "%s: error: bitmap file is already in use\n", mdname(mddev)); - fput(mddev->bitmap_file); - mddev->bitmap_file = NULL; + fput(mddev->bitmap_info.file); + mddev->bitmap_info.file = NULL; return err; } - mddev->bitmap_offset = 0; /* file overrides offset */ + mddev->bitmap_info.offset = 0; /* file overrides offset */ } else if (mddev->bitmap == NULL) return -ENOENT; /* cannot remove what isn't there */ err = 0; @@ -5029,11 +5221,11 @@ static int set_bitmap_file(mddev_t *mddev, int fd) mddev->pers->quiesce(mddev, 0); } if (fd < 0) { - if (mddev->bitmap_file) { - restore_bitmap_write_access(mddev->bitmap_file); - fput(mddev->bitmap_file); + if (mddev->bitmap_info.file) { + restore_bitmap_write_access(mddev->bitmap_info.file); + fput(mddev->bitmap_info.file); } - mddev->bitmap_file = NULL; + mddev->bitmap_info.file = NULL; } return err; @@ -5100,8 +5292,8 @@ static int set_array_info(mddev_t * mddev, mdu_array_info_t *info) mddev->flags = 0; set_bit(MD_CHANGE_DEVS, &mddev->flags); - mddev->default_bitmap_offset = MD_SB_BYTES >> 9; - mddev->bitmap_offset = 0; + mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9; + mddev->bitmap_info.offset = 0; mddev->reshape_position = MaxSector; @@ -5201,7 +5393,7 @@ static int update_array_info(mddev_t *mddev, mdu_array_info_t *info) int state = 0; /* calculate expected state,ignoring low bits */ - if (mddev->bitmap && mddev->bitmap_offset) + if (mddev->bitmap && mddev->bitmap_info.offset) state |= (1 << MD_SB_BITMAP_PRESENT); if (mddev->major_version != info->major_version || @@ -5260,9 +5452,10 @@ static int update_array_info(mddev_t *mddev, mdu_array_info_t *info) /* add the bitmap */ if (mddev->bitmap) return -EEXIST; - if (mddev->default_bitmap_offset == 0) + if (mddev->bitmap_info.default_offset == 0) return -EINVAL; - mddev->bitmap_offset = mddev->default_bitmap_offset; + mddev->bitmap_info.offset = + mddev->bitmap_info.default_offset; mddev->pers->quiesce(mddev, 1); rv = bitmap_create(mddev); if (rv) @@ -5277,7 +5470,7 @@ static int update_array_info(mddev_t *mddev, mdu_array_info_t *info) mddev->pers->quiesce(mddev, 1); bitmap_destroy(mddev); mddev->pers->quiesce(mddev, 0); - mddev->bitmap_offset = 0; + mddev->bitmap_info.offset = 0; } } md_update_sb(mddev, 1); @@ -5528,6 +5721,25 @@ done: abort: return err; } +#ifdef CONFIG_COMPAT +static int md_compat_ioctl(struct block_device *bdev, fmode_t mode, + unsigned int cmd, unsigned long arg) +{ + switch (cmd) { + case HOT_REMOVE_DISK: + case HOT_ADD_DISK: + case SET_DISK_FAULTY: + case SET_BITMAP_FILE: + /* These take in integer arg, do not convert */ + break; + default: + arg = (unsigned long)compat_ptr(arg); + break; + } + + return md_ioctl(bdev, mode, cmd, arg); +} +#endif /* CONFIG_COMPAT */ static int md_open(struct block_device *bdev, fmode_t mode) { @@ -5593,6 +5805,9 @@ static const struct block_device_operations md_fops = .open = md_open, .release = md_release, .ioctl = md_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = md_compat_ioctl, +#endif .getgeo = md_getgeo, .media_changed = md_media_changed, .revalidate_disk= md_revalidate, @@ -5986,14 +6201,14 @@ static int md_seq_show(struct seq_file *seq, void *v) unsigned long chunk_kb; unsigned long flags; spin_lock_irqsave(&bitmap->lock, flags); - chunk_kb = bitmap->chunksize >> 10; + chunk_kb = mddev->bitmap_info.chunksize >> 10; seq_printf(seq, "bitmap: %lu/%lu pages [%luKB], " "%lu%s chunk", bitmap->pages - bitmap->missing_pages, bitmap->pages, (bitmap->pages - bitmap->missing_pages) << (PAGE_SHIFT - 10), - chunk_kb ? chunk_kb : bitmap->chunksize, + chunk_kb ? chunk_kb : mddev->bitmap_info.chunksize, chunk_kb ? "KB" : "B"); if (bitmap->file) { seq_printf(seq, ", file: "); @@ -6342,12 +6557,14 @@ void md_do_sync(mddev_t *mddev) /* recovery follows the physical size of devices */ max_sectors = mddev->dev_sectors; j = MaxSector; - list_for_each_entry(rdev, &mddev->disks, same_set) + rcu_read_lock(); + list_for_each_entry_rcu(rdev, &mddev->disks, same_set) if (rdev->raid_disk >= 0 && !test_bit(Faulty, &rdev->flags) && !test_bit(In_sync, &rdev->flags) && rdev->recovery_offset < j) j = rdev->recovery_offset; + rcu_read_unlock(); } printk(KERN_INFO "md: %s of RAID array %s\n", desc, mdname(mddev)); @@ -6384,6 +6601,7 @@ void md_do_sync(mddev_t *mddev) desc, mdname(mddev)); mddev->curr_resync = j; } + mddev->curr_resync_completed = mddev->curr_resync; while (j < max_sectors) { sector_t sectors; @@ -6516,22 +6734,29 @@ void md_do_sync(mddev_t *mddev) } else { if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) mddev->curr_resync = MaxSector; - list_for_each_entry(rdev, &mddev->disks, same_set) + rcu_read_lock(); + list_for_each_entry_rcu(rdev, &mddev->disks, same_set) if (rdev->raid_disk >= 0 && !test_bit(Faulty, &rdev->flags) && !test_bit(In_sync, &rdev->flags) && rdev->recovery_offset < mddev->curr_resync) rdev->recovery_offset = mddev->curr_resync; + rcu_read_unlock(); } } set_bit(MD_CHANGE_DEVS, &mddev->flags); skip: + if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { + /* We completed so min/max setting can be forgotten if used. */ + if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) + mddev->resync_min = 0; + mddev->resync_max = MaxSector; + } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) + mddev->resync_min = mddev->curr_resync_completed; mddev->curr_resync = 0; - mddev->curr_resync_completed = 0; if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) - /* We completed so max setting can be forgotten. */ - mddev->resync_max = MaxSector; + mddev->curr_resync_completed = 0; sysfs_notify(&mddev->kobj, NULL, "sync_completed"); wake_up(&resync_wait); set_bit(MD_RECOVERY_DONE, &mddev->recovery); @@ -6594,6 +6819,7 @@ static int remove_and_add_spares(mddev_t *mddev) nm, mdname(mddev)); spares++; md_new_event(mddev); + set_bit(MD_CHANGE_DEVS, &mddev->flags); } else break; } @@ -6629,7 +6855,7 @@ void md_check_recovery(mddev_t *mddev) if (mddev->bitmap) - bitmap_daemon_work(mddev->bitmap); + bitmap_daemon_work(mddev); if (mddev->ro) return; @@ -6999,5 +7225,6 @@ EXPORT_SYMBOL(md_unregister_thread); EXPORT_SYMBOL(md_wakeup_thread); EXPORT_SYMBOL(md_check_recovery); MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("MD RAID framework"); MODULE_ALIAS("md"); MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR); diff --git a/drivers/md/md.h b/drivers/md/md.h index f184b69ef33..8e4c75c00d4 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -97,6 +97,9 @@ struct mdk_rdev_s atomic_t read_errors; /* number of consecutive read errors that * we have tried to ignore. */ + struct timespec last_read_error; /* monotonic time since our + * last read error + */ atomic_t corrected_errors; /* number of corrected read errors, * for reporting to userspace and storing * in superblock. @@ -280,17 +283,38 @@ struct mddev_s unsigned int max_write_behind; /* 0 = sync */ struct bitmap *bitmap; /* the bitmap for the device */ - struct file *bitmap_file; /* the bitmap file */ - long bitmap_offset; /* offset from superblock of - * start of bitmap. May be - * negative, but not '0' - */ - long default_bitmap_offset; /* this is the offset to use when - * hot-adding a bitmap. It should - * eventually be settable by sysfs. - */ - + struct { + struct file *file; /* the bitmap file */ + loff_t offset; /* offset from superblock of + * start of bitmap. May be + * negative, but not '0' + * For external metadata, offset + * from start of device. + */ + loff_t default_offset; /* this is the offset to use when + * hot-adding a bitmap. It should + * eventually be settable by sysfs. + */ + struct mutex mutex; + unsigned long chunksize; + unsigned long daemon_sleep; /* how many seconds between updates? */ + unsigned long max_write_behind; /* write-behind mode */ + int external; + } bitmap_info; + + atomic_t max_corr_read_errors; /* max read retries */ struct list_head all_mddevs; + + /* Generic barrier handling. + * If there is a pending barrier request, all other + * writes are blocked while the devices are flushed. + * The last to finish a flush schedules a worker to + * submit the barrier request (without the barrier flag), + * then submit more flush requests. + */ + struct bio *barrier; + atomic_t flush_pending; + struct work_struct barrier_work; }; @@ -353,7 +377,7 @@ struct md_sysfs_entry { ssize_t (*show)(mddev_t *, char *); ssize_t (*store)(mddev_t *, const char *, size_t); }; - +extern struct attribute_group md_bitmap_group; static inline char * mdname (mddev_t * mddev) { @@ -431,6 +455,7 @@ extern void md_done_sync(mddev_t *mddev, int blocks, int ok); extern void md_error(mddev_t *mddev, mdk_rdev_t *rdev); extern int mddev_congested(mddev_t *mddev, int bits); +extern void md_barrier_request(mddev_t *mddev, struct bio *bio); extern void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev, sector_t sector, int size, struct page *page); extern void md_super_wait(mddev_t *mddev); @@ -443,6 +468,8 @@ extern void md_wait_for_blocked_rdev(mdk_rdev_t *rdev, mddev_t *mddev); extern void md_set_array_sectors(mddev_t *mddev, sector_t array_sectors); extern int md_check_no_bitmap(mddev_t *mddev); extern int md_integrity_register(mddev_t *mddev); -void md_integrity_add_rdev(mdk_rdev_t *rdev, mddev_t *mddev); +extern void md_integrity_add_rdev(mdk_rdev_t *rdev, mddev_t *mddev); +extern int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale); +extern void restore_bitmap_write_access(struct file *file); #endif /* _MD_MD_H */ diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index ee7646f974a..32a662fc55c 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c @@ -145,7 +145,7 @@ static int multipath_make_request (struct request_queue *q, struct bio * bio) int cpu; if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) { - bio_endio(bio, -EOPNOTSUPP); + md_barrier_request(mddev, bio); return 0; } @@ -581,6 +581,7 @@ static void __exit multipath_exit (void) module_init(multipath_init); module_exit(multipath_exit); MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("simple multi-path personality for MD"); MODULE_ALIAS("md-personality-7"); /* MULTIPATH */ MODULE_ALIAS("md-multipath"); MODULE_ALIAS("md-level--4"); diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index d3a4ce06015..77605cdceaf 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -453,7 +453,7 @@ static int raid0_make_request(struct request_queue *q, struct bio *bio) int cpu; if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) { - bio_endio(bio, -EOPNOTSUPP); + md_barrier_request(mddev, bio); return 0; } @@ -567,6 +567,7 @@ static void raid0_exit (void) module_init(raid0_init); module_exit(raid0_exit); MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("RAID0 (striping) personality for MD"); MODULE_ALIAS("md-personality-2"); /* RAID0 */ MODULE_ALIAS("md-raid0"); MODULE_ALIAS("md-level-0"); diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index e07ce2e033a..859bd3ffe43 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -677,6 +677,7 @@ static void raise_barrier(conf_t *conf) static void lower_barrier(conf_t *conf) { unsigned long flags; + BUG_ON(conf->barrier <= 0); spin_lock_irqsave(&conf->resync_lock, flags); conf->barrier--; spin_unlock_irqrestore(&conf->resync_lock, flags); @@ -801,6 +802,25 @@ static int make_request(struct request_queue *q, struct bio * bio) md_write_start(mddev, bio); /* wait on superblock update early */ + if (bio_data_dir(bio) == WRITE && + bio->bi_sector + bio->bi_size/512 > mddev->suspend_lo && + bio->bi_sector < mddev->suspend_hi) { + /* As the suspend_* range is controlled by + * userspace, we want an interruptible + * wait. + */ + DEFINE_WAIT(w); + for (;;) { + flush_signals(current); + prepare_to_wait(&conf->wait_barrier, + &w, TASK_INTERRUPTIBLE); + if (bio->bi_sector + bio->bi_size/512 <= mddev->suspend_lo || + bio->bi_sector >= mddev->suspend_hi) + break; + schedule(); + } + finish_wait(&conf->wait_barrier, &w); + } if (unlikely(!mddev->barriers_work && bio_rw_flagged(bio, BIO_RW_BARRIER))) { if (rw == WRITE) @@ -923,7 +943,8 @@ static int make_request(struct request_queue *q, struct bio * bio) /* do behind I/O ? */ if (bitmap && - atomic_read(&bitmap->behind_writes) < bitmap->max_write_behind && + (atomic_read(&bitmap->behind_writes) + < mddev->bitmap_info.max_write_behind) && (behind_pages = alloc_behind_pages(bio)) != NULL) set_bit(R1BIO_BehindIO, &r1_bio->state); @@ -1941,74 +1962,48 @@ static sector_t raid1_size(mddev_t *mddev, sector_t sectors, int raid_disks) return mddev->dev_sectors; } -static int run(mddev_t *mddev) +static conf_t *setup_conf(mddev_t *mddev) { conf_t *conf; - int i, j, disk_idx; + int i; mirror_info_t *disk; mdk_rdev_t *rdev; + int err = -ENOMEM; - if (mddev->level != 1) { - printk("raid1: %s: raid level not set to mirroring (%d)\n", - mdname(mddev), mddev->level); - goto out; - } - if (mddev->reshape_position != MaxSector) { - printk("raid1: %s: reshape_position set but not supported\n", - mdname(mddev)); - goto out; - } - /* - * copy the already verified devices into our private RAID1 - * bookkeeping area. [whatever we allocate in run(), - * should be freed in stop()] - */ conf = kzalloc(sizeof(conf_t), GFP_KERNEL); - mddev->private = conf; if (!conf) - goto out_no_mem; + goto abort; conf->mirrors = kzalloc(sizeof(struct mirror_info)*mddev->raid_disks, GFP_KERNEL); if (!conf->mirrors) - goto out_no_mem; + goto abort; conf->tmppage = alloc_page(GFP_KERNEL); if (!conf->tmppage) - goto out_no_mem; + goto abort; - conf->poolinfo = kmalloc(sizeof(*conf->poolinfo), GFP_KERNEL); + conf->poolinfo = kzalloc(sizeof(*conf->poolinfo), GFP_KERNEL); if (!conf->poolinfo) - goto out_no_mem; - conf->poolinfo->mddev = NULL; + goto abort; conf->poolinfo->raid_disks = mddev->raid_disks; conf->r1bio_pool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc, r1bio_pool_free, conf->poolinfo); if (!conf->r1bio_pool) - goto out_no_mem; + goto abort; + conf->poolinfo->mddev = mddev; spin_lock_init(&conf->device_lock); - mddev->queue->queue_lock = &conf->device_lock; - list_for_each_entry(rdev, &mddev->disks, same_set) { - disk_idx = rdev->raid_disk; + int disk_idx = rdev->raid_disk; if (disk_idx >= mddev->raid_disks || disk_idx < 0) continue; disk = conf->mirrors + disk_idx; disk->rdev = rdev; - disk_stack_limits(mddev->gendisk, rdev->bdev, - rdev->data_offset << 9); - /* as we don't honour merge_bvec_fn, we must never risk - * violating it, so limit ->max_sector to one PAGE, as - * a one page request is never in violation. - */ - if (rdev->bdev->bd_disk->queue->merge_bvec_fn && - queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9)) - blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); disk->head_position = 0; } @@ -2022,8 +2017,7 @@ static int run(mddev_t *mddev) bio_list_init(&conf->pending_bio_list); bio_list_init(&conf->flushing_bio_list); - - mddev->degraded = 0; + conf->last_used = -1; for (i = 0; i < conf->raid_disks; i++) { disk = conf->mirrors + i; @@ -2031,38 +2025,97 @@ static int run(mddev_t *mddev) if (!disk->rdev || !test_bit(In_sync, &disk->rdev->flags)) { disk->head_position = 0; - mddev->degraded++; if (disk->rdev) conf->fullsync = 1; - } + } else if (conf->last_used < 0) + /* + * The first working device is used as a + * starting point to read balancing. + */ + conf->last_used = i; } - if (mddev->degraded == conf->raid_disks) { + + err = -EIO; + if (conf->last_used < 0) { printk(KERN_ERR "raid1: no operational mirrors for %s\n", - mdname(mddev)); - goto out_free_conf; + mdname(mddev)); + goto abort; } - if (conf->raid_disks - mddev->degraded == 1) - mddev->recovery_cp = MaxSector; + err = -ENOMEM; + conf->thread = md_register_thread(raid1d, mddev, NULL); + if (!conf->thread) { + printk(KERN_ERR + "raid1: couldn't allocate thread for %s\n", + mdname(mddev)); + goto abort; + } + + return conf; + + abort: + if (conf) { + if (conf->r1bio_pool) + mempool_destroy(conf->r1bio_pool); + kfree(conf->mirrors); + safe_put_page(conf->tmppage); + kfree(conf->poolinfo); + kfree(conf); + } + return ERR_PTR(err); +} +static int run(mddev_t *mddev) +{ + conf_t *conf; + int i; + mdk_rdev_t *rdev; + + if (mddev->level != 1) { + printk("raid1: %s: raid level not set to mirroring (%d)\n", + mdname(mddev), mddev->level); + return -EIO; + } + if (mddev->reshape_position != MaxSector) { + printk("raid1: %s: reshape_position set but not supported\n", + mdname(mddev)); + return -EIO; + } /* - * find the first working one and use it as a starting point - * to read balancing. + * copy the already verified devices into our private RAID1 + * bookkeeping area. [whatever we allocate in run(), + * should be freed in stop()] */ - for (j = 0; j < conf->raid_disks && - (!conf->mirrors[j].rdev || - !test_bit(In_sync, &conf->mirrors[j].rdev->flags)) ; j++) - /* nothing */; - conf->last_used = j; + if (mddev->private == NULL) + conf = setup_conf(mddev); + else + conf = mddev->private; + if (IS_ERR(conf)) + return PTR_ERR(conf); - mddev->thread = md_register_thread(raid1d, mddev, NULL); - if (!mddev->thread) { - printk(KERN_ERR - "raid1: couldn't allocate thread for %s\n", - mdname(mddev)); - goto out_free_conf; + mddev->queue->queue_lock = &conf->device_lock; + list_for_each_entry(rdev, &mddev->disks, same_set) { + disk_stack_limits(mddev->gendisk, rdev->bdev, + rdev->data_offset << 9); + /* as we don't honour merge_bvec_fn, we must never risk + * violating it, so limit ->max_sector to one PAGE, as + * a one page request is never in violation. + */ + if (rdev->bdev->bd_disk->queue->merge_bvec_fn && + queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9)) + blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); } + mddev->degraded = 0; + for (i=0; i < conf->raid_disks; i++) + if (conf->mirrors[i].rdev == NULL || + !test_bit(In_sync, &conf->mirrors[i].rdev->flags) || + test_bit(Faulty, &conf->mirrors[i].rdev->flags)) + mddev->degraded++; + + if (conf->raid_disks - mddev->degraded == 1) + mddev->recovery_cp = MaxSector; + if (mddev->recovery_cp != MaxSector) printk(KERN_NOTICE "raid1: %s is not clean" " -- starting background reconstruction\n", @@ -2071,9 +2124,14 @@ static int run(mddev_t *mddev) "raid1: raid set %s active with %d out of %d mirrors\n", mdname(mddev), mddev->raid_disks - mddev->degraded, mddev->raid_disks); + /* * Ok, everything is just fine now */ + mddev->thread = conf->thread; + conf->thread = NULL; + mddev->private = conf; + md_set_array_sectors(mddev, raid1_size(mddev, 0, 0)); mddev->queue->unplug_fn = raid1_unplug; @@ -2081,23 +2139,6 @@ static int run(mddev_t *mddev) mddev->queue->backing_dev_info.congested_data = mddev; md_integrity_register(mddev); return 0; - -out_no_mem: - printk(KERN_ERR "raid1: couldn't allocate memory for %s\n", - mdname(mddev)); - -out_free_conf: - if (conf) { - if (conf->r1bio_pool) - mempool_destroy(conf->r1bio_pool); - kfree(conf->mirrors); - safe_put_page(conf->tmppage); - kfree(conf->poolinfo); - kfree(conf); - mddev->private = NULL; - } -out: - return -EIO; } static int stop(mddev_t *mddev) @@ -2271,6 +2312,9 @@ static void raid1_quiesce(mddev_t *mddev, int state) conf_t *conf = mddev->private; switch(state) { + case 2: /* wake for suspend */ + wake_up(&conf->wait_barrier); + break; case 1: raise_barrier(conf); break; @@ -2280,6 +2324,23 @@ static void raid1_quiesce(mddev_t *mddev, int state) } } +static void *raid1_takeover(mddev_t *mddev) +{ + /* raid1 can take over: + * raid5 with 2 devices, any layout or chunk size + */ + if (mddev->level == 5 && mddev->raid_disks == 2) { + conf_t *conf; + mddev->new_level = 1; + mddev->new_layout = 0; + mddev->new_chunk_sectors = 0; + conf = setup_conf(mddev); + if (!IS_ERR(conf)) + conf->barrier = 1; + return conf; + } + return ERR_PTR(-EINVAL); +} static struct mdk_personality raid1_personality = { @@ -2299,6 +2360,7 @@ static struct mdk_personality raid1_personality = .size = raid1_size, .check_reshape = raid1_reshape, .quiesce = raid1_quiesce, + .takeover = raid1_takeover, }; static int __init raid_init(void) @@ -2314,6 +2376,7 @@ static void raid_exit(void) module_init(raid_init); module_exit(raid_exit); MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("RAID1 (mirroring) personality for MD"); MODULE_ALIAS("md-personality-3"); /* RAID1 */ MODULE_ALIAS("md-raid1"); MODULE_ALIAS("md-level-1"); diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h index e87b84deff6..5f2d443ae28 100644 --- a/drivers/md/raid1.h +++ b/drivers/md/raid1.h @@ -59,6 +59,11 @@ struct r1_private_data_s { mempool_t *r1bio_pool; mempool_t *r1buf_pool; + + /* When taking over an array from a different personality, we store + * the new thread here until we fully activate the array. + */ + struct mdk_thread_s *thread; }; typedef struct r1_private_data_s conf_t; diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index c2cb7b87b44..d119b7b75e7 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -804,7 +804,7 @@ static int make_request(struct request_queue *q, struct bio * bio) mdk_rdev_t *blocked_rdev; if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) { - bio_endio(bio, -EOPNOTSUPP); + md_barrier_request(mddev, bio); return 0; } @@ -1432,6 +1432,43 @@ static void recovery_request_write(mddev_t *mddev, r10bio_t *r10_bio) /* + * Used by fix_read_error() to decay the per rdev read_errors. + * We halve the read error count for every hour that has elapsed + * since the last recorded read error. + * + */ +static void check_decay_read_errors(mddev_t *mddev, mdk_rdev_t *rdev) +{ + struct timespec cur_time_mon; + unsigned long hours_since_last; + unsigned int read_errors = atomic_read(&rdev->read_errors); + + ktime_get_ts(&cur_time_mon); + + if (rdev->last_read_error.tv_sec == 0 && + rdev->last_read_error.tv_nsec == 0) { + /* first time we've seen a read error */ + rdev->last_read_error = cur_time_mon; + return; + } + + hours_since_last = (cur_time_mon.tv_sec - + rdev->last_read_error.tv_sec) / 3600; + + rdev->last_read_error = cur_time_mon; + + /* + * if hours_since_last is > the number of bits in read_errors + * just set read errors to 0. We do this to avoid + * overflowing the shift of read_errors by hours_since_last. + */ + if (hours_since_last >= 8 * sizeof(read_errors)) + atomic_set(&rdev->read_errors, 0); + else + atomic_set(&rdev->read_errors, read_errors >> hours_since_last); +} + +/* * This is a kernel thread which: * * 1. Retries failed read operations on working mirrors. @@ -1444,6 +1481,43 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio) int sect = 0; /* Offset from r10_bio->sector */ int sectors = r10_bio->sectors; mdk_rdev_t*rdev; + int max_read_errors = atomic_read(&mddev->max_corr_read_errors); + + rcu_read_lock(); + { + int d = r10_bio->devs[r10_bio->read_slot].devnum; + char b[BDEVNAME_SIZE]; + int cur_read_error_count = 0; + + rdev = rcu_dereference(conf->mirrors[d].rdev); + bdevname(rdev->bdev, b); + + if (test_bit(Faulty, &rdev->flags)) { + rcu_read_unlock(); + /* drive has already been failed, just ignore any + more fix_read_error() attempts */ + return; + } + + check_decay_read_errors(mddev, rdev); + atomic_inc(&rdev->read_errors); + cur_read_error_count = atomic_read(&rdev->read_errors); + if (cur_read_error_count > max_read_errors) { + rcu_read_unlock(); + printk(KERN_NOTICE + "raid10: %s: Raid device exceeded " + "read_error threshold " + "[cur %d:max %d]\n", + b, cur_read_error_count, max_read_errors); + printk(KERN_NOTICE + "raid10: %s: Failing raid " + "device\n", b); + md_error(mddev, conf->mirrors[d].rdev); + return; + } + } + rcu_read_unlock(); + while(sectors) { int s = sectors; int sl = r10_bio->read_slot; @@ -1488,6 +1562,7 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio) /* write it back and re-read */ rcu_read_lock(); while (sl != r10_bio->read_slot) { + char b[BDEVNAME_SIZE]; int d; if (sl==0) sl = conf->copies; @@ -1503,9 +1578,21 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio) r10_bio->devs[sl].addr + sect + rdev->data_offset, s<<9, conf->tmppage, WRITE) - == 0) + == 0) { /* Well, this device is dead */ + printk(KERN_NOTICE + "raid10:%s: read correction " + "write failed" + " (%d sectors at %llu on %s)\n", + mdname(mddev), s, + (unsigned long long)(sect+ + rdev->data_offset), + bdevname(rdev->bdev, b)); + printk(KERN_NOTICE "raid10:%s: failing " + "drive\n", + bdevname(rdev->bdev, b)); md_error(mddev, rdev); + } rdev_dec_pending(rdev, mddev); rcu_read_lock(); } @@ -1526,10 +1613,22 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio) if (sync_page_io(rdev->bdev, r10_bio->devs[sl].addr + sect + rdev->data_offset, - s<<9, conf->tmppage, READ) == 0) + s<<9, conf->tmppage, + READ) == 0) { /* Well, this device is dead */ + printk(KERN_NOTICE + "raid10:%s: unable to read back " + "corrected sectors" + " (%d sectors at %llu on %s)\n", + mdname(mddev), s, + (unsigned long long)(sect+ + rdev->data_offset), + bdevname(rdev->bdev, b)); + printk(KERN_NOTICE "raid10:%s: failing drive\n", + bdevname(rdev->bdev, b)); + md_error(mddev, rdev); - else + } else { printk(KERN_INFO "raid10:%s: read error corrected" " (%d sectors at %llu on %s)\n", @@ -1537,6 +1636,7 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio) (unsigned long long)(sect+ rdev->data_offset), bdevname(rdev->bdev, b)); + } rdev_dec_pending(rdev, mddev); rcu_read_lock(); @@ -2275,13 +2375,6 @@ static void raid10_quiesce(mddev_t *mddev, int state) lower_barrier(conf); break; } - if (mddev->thread) { - if (mddev->bitmap) - mddev->thread->timeout = mddev->bitmap->daemon_sleep * HZ; - else - mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT; - md_wakeup_thread(mddev->thread); - } } static struct mdk_personality raid10_personality = @@ -2315,6 +2408,7 @@ static void raid_exit(void) module_init(raid_init); module_exit(raid_exit); MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("RAID10 (striped mirror) personality for MD"); MODULE_ALIAS("md-personality-9"); /* RAID10 */ MODULE_ALIAS("md-raid10"); MODULE_ALIAS("md-level-10"); diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index d29215d966d..e84204eb12d 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -2947,6 +2947,7 @@ static void handle_stripe5(struct stripe_head *sh) struct r5dev *dev; mdk_rdev_t *blocked_rdev = NULL; int prexor; + int dec_preread_active = 0; memset(&s, 0, sizeof(s)); pr_debug("handling stripe %llu, state=%#lx cnt=%d, pd_idx=%d check:%d " @@ -3096,12 +3097,8 @@ static void handle_stripe5(struct stripe_head *sh) set_bit(STRIPE_INSYNC, &sh->state); } } - if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { - atomic_dec(&conf->preread_active_stripes); - if (atomic_read(&conf->preread_active_stripes) < - IO_THRESHOLD) - md_wakeup_thread(conf->mddev->thread); - } + if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) + dec_preread_active = 1; } /* Now to consider new write requests and what else, if anything @@ -3208,6 +3205,16 @@ static void handle_stripe5(struct stripe_head *sh) ops_run_io(sh, &s); + if (dec_preread_active) { + /* We delay this until after ops_run_io so that if make_request + * is waiting on a barrier, it won't continue until the writes + * have actually been submitted. + */ + atomic_dec(&conf->preread_active_stripes); + if (atomic_read(&conf->preread_active_stripes) < + IO_THRESHOLD) + md_wakeup_thread(conf->mddev->thread); + } return_io(return_bi); } @@ -3221,6 +3228,7 @@ static void handle_stripe6(struct stripe_head *sh) struct r6_state r6s; struct r5dev *dev, *pdev, *qdev; mdk_rdev_t *blocked_rdev = NULL; + int dec_preread_active = 0; pr_debug("handling stripe %llu, state=%#lx cnt=%d, " "pd_idx=%d, qd_idx=%d\n, check:%d, reconstruct:%d\n", @@ -3358,7 +3366,6 @@ static void handle_stripe6(struct stripe_head *sh) * completed */ if (sh->reconstruct_state == reconstruct_state_drain_result) { - int qd_idx = sh->qd_idx; sh->reconstruct_state = reconstruct_state_idle; /* All the 'written' buffers and the parity blocks are ready to @@ -3380,12 +3387,8 @@ static void handle_stripe6(struct stripe_head *sh) set_bit(STRIPE_INSYNC, &sh->state); } } - if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { - atomic_dec(&conf->preread_active_stripes); - if (atomic_read(&conf->preread_active_stripes) < - IO_THRESHOLD) - md_wakeup_thread(conf->mddev->thread); - } + if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) + dec_preread_active = 1; } /* Now to consider new write requests and what else, if anything @@ -3494,6 +3497,18 @@ static void handle_stripe6(struct stripe_head *sh) ops_run_io(sh, &s); + + if (dec_preread_active) { + /* We delay this until after ops_run_io so that if make_request + * is waiting on a barrier, it won't continue until the writes + * have actually been submitted. + */ + atomic_dec(&conf->preread_active_stripes); + if (atomic_read(&conf->preread_active_stripes) < + IO_THRESHOLD) + md_wakeup_thread(conf->mddev->thread); + } + return_io(return_bi); } @@ -3741,7 +3756,7 @@ static int chunk_aligned_read(struct request_queue *q, struct bio * raid_bio) { mddev_t *mddev = q->queuedata; raid5_conf_t *conf = mddev->private; - unsigned int dd_idx; + int dd_idx; struct bio* align_bi; mdk_rdev_t *rdev; @@ -3866,7 +3881,13 @@ static int make_request(struct request_queue *q, struct bio * bi) int cpu, remaining; if (unlikely(bio_rw_flagged(bi, BIO_RW_BARRIER))) { - bio_endio(bi, -EOPNOTSUPP); + /* Drain all pending writes. We only really need + * to ensure they have been submitted, but this is + * easier. + */ + mddev->pers->quiesce(mddev, 1); + mddev->pers->quiesce(mddev, 0); + md_barrier_request(mddev, bi); return 0; } @@ -3990,6 +4011,9 @@ static int make_request(struct request_queue *q, struct bio * bi) finish_wait(&conf->wait_for_overlap, &w); set_bit(STRIPE_HANDLE, &sh->state); clear_bit(STRIPE_DELAYED, &sh->state); + if (mddev->barrier && + !test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) + atomic_inc(&conf->preread_active_stripes); release_stripe(sh); } else { /* cannot get stripe for read-ahead, just give-up */ @@ -4009,6 +4033,14 @@ static int make_request(struct request_queue *q, struct bio * bi) bio_endio(bi, 0); } + + if (mddev->barrier) { + /* We need to wait for the stripes to all be handled. + * So: wait for preread_active_stripes to drop to 0. + */ + wait_event(mddev->thread->wqueue, + atomic_read(&conf->preread_active_stripes) == 0); + } return 0; } @@ -5860,6 +5892,7 @@ static void raid5_exit(void) module_init(raid5_init); module_exit(raid5_exit); MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("RAID4/5/6 (striping with parity) personality for MD"); MODULE_ALIAS("md-personality-4"); /* RAID5 */ MODULE_ALIAS("md-raid5"); MODULE_ALIAS("md-raid4"); diff --git a/drivers/md/raid6algos.c b/drivers/md/raid6algos.c index 866215ac7f2..bffc61bff5a 100644 --- a/drivers/md/raid6algos.c +++ b/drivers/md/raid6algos.c @@ -31,25 +31,6 @@ EXPORT_SYMBOL(raid6_empty_zero_page); struct raid6_calls raid6_call; EXPORT_SYMBOL_GPL(raid6_call); -/* Various routine sets */ -extern const struct raid6_calls raid6_intx1; -extern const struct raid6_calls raid6_intx2; -extern const struct raid6_calls raid6_intx4; -extern const struct raid6_calls raid6_intx8; -extern const struct raid6_calls raid6_intx16; -extern const struct raid6_calls raid6_intx32; -extern const struct raid6_calls raid6_mmxx1; -extern const struct raid6_calls raid6_mmxx2; -extern const struct raid6_calls raid6_sse1x1; -extern const struct raid6_calls raid6_sse1x2; -extern const struct raid6_calls raid6_sse2x1; -extern const struct raid6_calls raid6_sse2x2; -extern const struct raid6_calls raid6_sse2x4; -extern const struct raid6_calls raid6_altivec1; -extern const struct raid6_calls raid6_altivec2; -extern const struct raid6_calls raid6_altivec4; -extern const struct raid6_calls raid6_altivec8; - const struct raid6_calls * const raid6_algos[] = { &raid6_intx1, &raid6_intx2, @@ -169,3 +150,4 @@ static void raid6_exit(void) subsys_initcall(raid6_select_algo); module_exit(raid6_exit); MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("RAID6 Q-syndrome calculations"); |