diff options
Diffstat (limited to 'drivers/md/persistent-data/dm-block-manager.c')
| -rw-r--r-- | drivers/md/persistent-data/dm-block-manager.c | 130 |
1 files changed, 73 insertions, 57 deletions
diff --git a/drivers/md/persistent-data/dm-block-manager.c b/drivers/md/persistent-data/dm-block-manager.c index 0317ecdc6e5..087411c95ff 100644 --- a/drivers/md/persistent-data/dm-block-manager.c +++ b/drivers/md/persistent-data/dm-block-manager.c @@ -25,7 +25,7 @@ * may be held at once. This is just an implementation detail. * * ii) Recursive locking attempts are detected and return EINVAL. A stack - * trace is also emitted for the previous lock aquisition. + * trace is also emitted for the previous lock acquisition. * * iii) Priority is given to write locks. */ @@ -104,12 +104,12 @@ static int __check_holder(struct block_lock *lock) for (i = 0; i < MAX_HOLDERS; i++) { if (lock->holders[i] == current) { - DMERR("recursive lock detected in pool metadata"); + DMERR("recursive lock detected in metadata"); #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING DMERR("previously held here:"); print_stack_trace(lock->traces + i, 4); - DMERR("subsequent aquisition attempted here:"); + DMERR("subsequent acquisition attempted here:"); t.nr_entries = 0; t.max_entries = MAX_STACK; t.entries = entries; @@ -325,11 +325,6 @@ static struct dm_buffer *to_buffer(struct dm_block *b) return (struct dm_buffer *) b; } -static struct dm_bufio_client *to_bufio(struct dm_block_manager *bm) -{ - return (struct dm_bufio_client *) bm; -} - dm_block_t dm_block_location(struct dm_block *b) { return dm_bufio_get_block_number(to_buffer(b)); @@ -367,34 +362,60 @@ static void dm_block_manager_write_callback(struct dm_buffer *buf) /*---------------------------------------------------------------- * Public interface *--------------------------------------------------------------*/ +struct dm_block_manager { + struct dm_bufio_client *bufio; + bool read_only:1; +}; + struct dm_block_manager *dm_block_manager_create(struct block_device *bdev, unsigned block_size, unsigned cache_size, unsigned max_held_per_thread) { - return (struct dm_block_manager *) - dm_bufio_client_create(bdev, block_size, max_held_per_thread, - sizeof(struct buffer_aux), - dm_block_manager_alloc_callback, - dm_block_manager_write_callback); + int r; + struct dm_block_manager *bm; + + bm = kmalloc(sizeof(*bm), GFP_KERNEL); + if (!bm) { + r = -ENOMEM; + goto bad; + } + + bm->bufio = dm_bufio_client_create(bdev, block_size, max_held_per_thread, + sizeof(struct buffer_aux), + dm_block_manager_alloc_callback, + dm_block_manager_write_callback); + if (IS_ERR(bm->bufio)) { + r = PTR_ERR(bm->bufio); + kfree(bm); + goto bad; + } + + bm->read_only = false; + + return bm; + +bad: + return ERR_PTR(r); } EXPORT_SYMBOL_GPL(dm_block_manager_create); void dm_block_manager_destroy(struct dm_block_manager *bm) { - return dm_bufio_client_destroy(to_bufio(bm)); + dm_bufio_client_destroy(bm->bufio); + kfree(bm); } EXPORT_SYMBOL_GPL(dm_block_manager_destroy); unsigned dm_bm_block_size(struct dm_block_manager *bm) { - return dm_bufio_get_block_size(to_bufio(bm)); + return dm_bufio_get_block_size(bm->bufio); } EXPORT_SYMBOL_GPL(dm_bm_block_size); dm_block_t dm_bm_nr_blocks(struct dm_block_manager *bm) { - return dm_bufio_get_device_size(to_bufio(bm)); + return dm_bufio_get_device_size(bm->bufio); } static int dm_bm_validate_buffer(struct dm_block_manager *bm, @@ -406,16 +427,18 @@ static int dm_bm_validate_buffer(struct dm_block_manager *bm, int r; if (!v) return 0; - r = v->check(v, (struct dm_block *) buf, dm_bufio_get_block_size(to_bufio(bm))); - if (unlikely(r)) + r = v->check(v, (struct dm_block *) buf, dm_bufio_get_block_size(bm->bufio)); + if (unlikely(r)) { + DMERR_LIMIT("%s validator check failed for block %llu", v->name, + (unsigned long long) dm_bufio_get_block_number(buf)); return r; + } aux->validator = v; } else { if (unlikely(aux->validator != v)) { - DMERR("validator mismatch (old=%s vs new=%s) for block %llu", - aux->validator->name, v ? v->name : "NULL", - (unsigned long long) - dm_bufio_get_block_number(buf)); + DMERR_LIMIT("validator mismatch (old=%s vs new=%s) for block %llu", + aux->validator->name, v ? v->name : "NULL", + (unsigned long long) dm_bufio_get_block_number(buf)); return -EINVAL; } } @@ -430,7 +453,7 @@ int dm_bm_read_lock(struct dm_block_manager *bm, dm_block_t b, void *p; int r; - p = dm_bufio_read(to_bufio(bm), b, (struct dm_buffer **) result); + p = dm_bufio_read(bm->bufio, b, (struct dm_buffer **) result); if (unlikely(IS_ERR(p))) return PTR_ERR(p); @@ -463,7 +486,10 @@ int dm_bm_write_lock(struct dm_block_manager *bm, void *p; int r; - p = dm_bufio_read(to_bufio(bm), b, (struct dm_buffer **) result); + if (bm->read_only) + return -EPERM; + + p = dm_bufio_read(bm->bufio, b, (struct dm_buffer **) result); if (unlikely(IS_ERR(p))) return PTR_ERR(p); @@ -496,7 +522,7 @@ int dm_bm_read_try_lock(struct dm_block_manager *bm, void *p; int r; - p = dm_bufio_get(to_bufio(bm), b, (struct dm_buffer **) result); + p = dm_bufio_get(bm->bufio, b, (struct dm_buffer **) result); if (unlikely(IS_ERR(p))) return PTR_ERR(p); if (unlikely(!p)) @@ -529,7 +555,10 @@ int dm_bm_write_lock_zero(struct dm_block_manager *bm, struct buffer_aux *aux; void *p; - p = dm_bufio_new(to_bufio(bm), b, (struct dm_buffer **) result); + if (bm->read_only) + return -EPERM; + + p = dm_bufio_new(bm->bufio, b, (struct dm_buffer **) result); if (unlikely(IS_ERR(p))) return PTR_ERR(p); @@ -547,6 +576,7 @@ int dm_bm_write_lock_zero(struct dm_block_manager *bm, return 0; } +EXPORT_SYMBOL_GPL(dm_bm_write_lock_zero); int dm_bm_unlock(struct dm_block *b) { @@ -565,45 +595,31 @@ int dm_bm_unlock(struct dm_block *b) } EXPORT_SYMBOL_GPL(dm_bm_unlock); -int dm_bm_unlock_move(struct dm_block *b, dm_block_t n) +int dm_bm_flush(struct dm_block_manager *bm) { - struct buffer_aux *aux; - - aux = dm_bufio_get_aux_data(to_buffer(b)); + if (bm->read_only) + return -EPERM; - if (aux->write_locked) { - dm_bufio_mark_buffer_dirty(to_buffer(b)); - bl_up_write(&aux->lock); - } else - bl_up_read(&aux->lock); - - dm_bufio_release_move(to_buffer(b), n); - return 0; + return dm_bufio_write_dirty_buffers(bm->bufio); } +EXPORT_SYMBOL_GPL(dm_bm_flush); -int dm_bm_flush_and_unlock(struct dm_block_manager *bm, - struct dm_block *superblock) +void dm_bm_prefetch(struct dm_block_manager *bm, dm_block_t b) { - int r; - - r = dm_bufio_write_dirty_buffers(to_bufio(bm)); - if (unlikely(r)) - return r; - r = dm_bufio_issue_flush(to_bufio(bm)); - if (unlikely(r)) - return r; - - dm_bm_unlock(superblock); + dm_bufio_prefetch(bm->bufio, b, 1); +} - r = dm_bufio_write_dirty_buffers(to_bufio(bm)); - if (unlikely(r)) - return r; - r = dm_bufio_issue_flush(to_bufio(bm)); - if (unlikely(r)) - return r; +void dm_bm_set_read_only(struct dm_block_manager *bm) +{ + bm->read_only = true; +} +EXPORT_SYMBOL_GPL(dm_bm_set_read_only); - return 0; +void dm_bm_set_read_write(struct dm_block_manager *bm) +{ + bm->read_only = false; } +EXPORT_SYMBOL_GPL(dm_bm_set_read_write); u32 dm_bm_checksum(const void *data, size_t len, u32 init_xor) { |
