diff options
author | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2008-10-15 11:31:54 +1100 |
---|---|---|
committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2008-10-15 11:31:54 +1100 |
commit | 6dc6472581f693b5fc95aebedf67b4960fb85cf0 (patch) | |
tree | 06a5a9a08519950575505273eabced331ed51405 /drivers/md/dm-crypt.c | |
parent | ee673eaa72d8d185012b1027a05e25aba18c267f (diff) | |
parent | 8acd3a60bcca17c6d89c73cee3ad6057eb83ba1e (diff) |
Merge commit 'origin'
Manual fixup of conflicts on:
arch/powerpc/include/asm/dcr-regs.h
drivers/net/ibm_newemac/core.h
Diffstat (limited to 'drivers/md/dm-crypt.c')
-rw-r--r-- | drivers/md/dm-crypt.c | 109 |
1 files changed, 66 insertions, 43 deletions
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 13956437bc8..682ef9e6acd 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -333,7 +333,6 @@ static void crypt_convert_init(struct crypt_config *cc, ctx->idx_out = bio_out ? bio_out->bi_idx : 0; ctx->sector = sector + cc->iv_offset; init_completion(&ctx->restart); - atomic_set(&ctx->pending, 1); } static int crypt_convert_block(struct crypt_config *cc, @@ -408,6 +407,8 @@ static int crypt_convert(struct crypt_config *cc, { int r; + atomic_set(&ctx->pending, 1); + while(ctx->idx_in < ctx->bio_in->bi_vcnt && ctx->idx_out < ctx->bio_out->bi_vcnt) { @@ -456,9 +457,11 @@ static void dm_crypt_bio_destructor(struct bio *bio) /* * Generate a new unfragmented bio with the given size * This should never violate the device limitations - * May return a smaller bio when running out of pages + * May return a smaller bio when running out of pages, indicated by + * *out_of_pages set to 1. */ -static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size) +static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size, + unsigned *out_of_pages) { struct crypt_config *cc = io->target->private; struct bio *clone; @@ -472,11 +475,14 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size) return NULL; clone_init(io, clone); + *out_of_pages = 0; for (i = 0; i < nr_iovecs; i++) { page = mempool_alloc(cc->page_pool, gfp_mask); - if (!page) + if (!page) { + *out_of_pages = 1; break; + } /* * if additional pages cannot be allocated without waiting, @@ -517,6 +523,27 @@ static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone) } } +static struct dm_crypt_io *crypt_io_alloc(struct dm_target *ti, + struct bio *bio, sector_t sector) +{ + struct crypt_config *cc = ti->private; + struct dm_crypt_io *io; + + io = mempool_alloc(cc->io_pool, GFP_NOIO); + io->target = ti; + io->base_bio = bio; + io->sector = sector; + io->error = 0; + atomic_set(&io->pending, 0); + + return io; +} + +static void crypt_inc_pending(struct dm_crypt_io *io) +{ + atomic_inc(&io->pending); +} + /* * One of the bios was finished. Check for completion of * the whole request and correctly clean up the buffer. @@ -591,7 +618,7 @@ static void kcryptd_io_read(struct dm_crypt_io *io) struct bio *base_bio = io->base_bio; struct bio *clone; - atomic_inc(&io->pending); + crypt_inc_pending(io); /* * The block layer might modify the bvec array, so always @@ -653,6 +680,7 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, crypt_free_buffer_pages(cc, clone); bio_put(clone); io->error = -EIO; + crypt_dec_pending(io); return; } @@ -664,28 +692,34 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, if (async) kcryptd_queue_io(io); - else { - atomic_inc(&io->pending); + else generic_make_request(clone); - } } -static void kcryptd_crypt_write_convert_loop(struct dm_crypt_io *io) +static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) { struct crypt_config *cc = io->target->private; struct bio *clone; + int crypt_finished; + unsigned out_of_pages = 0; unsigned remaining = io->base_bio->bi_size; int r; /* + * Prevent io from disappearing until this function completes. + */ + crypt_inc_pending(io); + crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, io->sector); + + /* * The allocated buffers can be smaller than the whole bio, * so repeat the whole process until all the data can be handled. */ while (remaining) { - clone = crypt_alloc_buffer(io, remaining); + clone = crypt_alloc_buffer(io, remaining, &out_of_pages); if (unlikely(!clone)) { io->error = -ENOMEM; - return; + break; } io->ctx.bio_out = clone; @@ -693,37 +727,32 @@ static void kcryptd_crypt_write_convert_loop(struct dm_crypt_io *io) remaining -= clone->bi_size; + crypt_inc_pending(io); r = crypt_convert(cc, &io->ctx); + crypt_finished = atomic_dec_and_test(&io->ctx.pending); - if (atomic_dec_and_test(&io->ctx.pending)) { - /* processed, no running async crypto */ + /* Encryption was already finished, submit io now */ + if (crypt_finished) { kcryptd_crypt_write_io_submit(io, r, 0); - if (unlikely(r < 0)) - return; - } else - atomic_inc(&io->pending); - /* out of memory -> run queues */ - if (unlikely(remaining)) { - /* wait for async crypto then reinitialize pending */ - wait_event(cc->writeq, !atomic_read(&io->ctx.pending)); - atomic_set(&io->ctx.pending, 1); - congestion_wait(WRITE, HZ/100); + /* + * If there was an error, do not try next fragments. + * For async, error is processed in async handler. + */ + if (unlikely(r < 0)) + break; } - } -} -static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) -{ - struct crypt_config *cc = io->target->private; - - /* - * Prevent io from disappearing until this function completes. - */ - atomic_inc(&io->pending); + /* + * Out of memory -> run queues + * But don't wait if split was due to the io size restriction + */ + if (unlikely(out_of_pages)) + congestion_wait(WRITE, HZ/100); - crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, io->sector); - kcryptd_crypt_write_convert_loop(io); + if (unlikely(remaining)) + wait_event(cc->writeq, !atomic_read(&io->ctx.pending)); + } crypt_dec_pending(io); } @@ -741,7 +770,7 @@ static void kcryptd_crypt_read_convert(struct dm_crypt_io *io) struct crypt_config *cc = io->target->private; int r = 0; - atomic_inc(&io->pending); + crypt_inc_pending(io); crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio, io->sector); @@ -1108,15 +1137,9 @@ static void crypt_dtr(struct dm_target *ti) static int crypt_map(struct dm_target *ti, struct bio *bio, union map_info *map_context) { - struct crypt_config *cc = ti->private; struct dm_crypt_io *io; - io = mempool_alloc(cc->io_pool, GFP_NOIO); - io->target = ti; - io->base_bio = bio; - io->sector = bio->bi_sector - ti->begin; - io->error = 0; - atomic_set(&io->pending, 0); + io = crypt_io_alloc(ti, bio, bio->bi_sector - ti->begin); if (bio_data_dir(io->base_bio) == READ) kcryptd_queue_io(io); |