diff options
Diffstat (limited to 'drivers/md/dm-crypt.c')
| -rw-r--r-- | drivers/md/dm-crypt.c | 673 |
1 files changed, 382 insertions, 291 deletions
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 8c2a000cf3f..4cba2d808af 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -1,7 +1,8 @@ /* - * Copyright (C) 2003 Christophe Saout <christophe@saout.de> + * Copyright (C) 2003 Jana Saout <jana@saout.de> * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org> * Copyright (C) 2006-2009 Red Hat, Inc. All rights reserved. + * Copyright (C) 2013 Milan Broz <gmazyland@gmail.com> * * This file is released under the GPL. */ @@ -18,7 +19,6 @@ #include <linux/crypto.h> #include <linux/workqueue.h> #include <linux/backing-dev.h> -#include <linux/percpu.h> #include <linux/atomic.h> #include <linux/scatterlist.h> #include <asm/page.h> @@ -38,25 +38,24 @@ struct convert_context { struct completion restart; struct bio *bio_in; struct bio *bio_out; - unsigned int offset_in; - unsigned int offset_out; - unsigned int idx_in; - unsigned int idx_out; - sector_t sector; - atomic_t pending; + struct bvec_iter iter_in; + struct bvec_iter iter_out; + sector_t cc_sector; + atomic_t cc_pending; + struct ablkcipher_request *req; }; /* * per bio private data */ struct dm_crypt_io { - struct dm_target *target; + struct crypt_config *cc; struct bio *base_bio; struct work_struct work; struct convert_context ctx; - atomic_t pending; + atomic_t io_pending; int error; sector_t sector; struct dm_crypt_io *base_io; @@ -98,6 +97,13 @@ struct iv_lmk_private { u8 *seed; }; +#define TCW_WHITENING_SIZE 16 +struct iv_tcw_private { + struct crypto_shash *crc32_tfm; + u8 *iv_seed; + u8 *whitening; +}; + /* * Crypt: maps a linear range of a block device * and encrypts / decrypts at the same time. @@ -105,18 +111,7 @@ struct iv_lmk_private { enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID }; /* - * Duplicated per-CPU state for cipher. - */ -struct crypt_cpu { - struct ablkcipher_request *req; - /* ESSIV: struct crypto_cipher *essiv_tfm */ - void *iv_private; - struct crypto_ablkcipher *tfms[0]; -}; - -/* - * The fields in here must be read only after initialization, - * changing state should be in crypt_cpu. + * The fields in here must be read only after initialization. */ struct crypt_config { struct dm_dev *dev; @@ -142,15 +137,14 @@ struct crypt_config { struct iv_essiv_private essiv; struct iv_benbi_private benbi; struct iv_lmk_private lmk; + struct iv_tcw_private tcw; } iv_gen_private; sector_t iv_offset; unsigned int iv_size; - /* - * Duplicated per cpu state. Access through - * per_cpu_ptr() only. - */ - struct crypt_cpu __percpu *cpu; + /* ESSIV: struct crypto_cipher *essiv_tfm */ + void *iv_private; + struct crypto_ablkcipher **tfms; unsigned tfms_count; /* @@ -170,13 +164,13 @@ struct crypt_config { unsigned long flags; unsigned int key_size; - unsigned int key_parts; + unsigned int key_parts; /* independent parts in key buffer */ + unsigned int key_extra_size; /* additional keys length */ u8 key[0]; }; #define MIN_IOS 16 #define MIN_POOL_PAGES 32 -#define MIN_BIO_PAGES 8 static struct kmem_cache *_crypt_io_pool; @@ -184,17 +178,12 @@ static void clone_init(struct dm_crypt_io *, struct bio *); static void kcryptd_queue_crypt(struct dm_crypt_io *io); static u8 *iv_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq); -static struct crypt_cpu *this_crypt_config(struct crypt_config *cc) -{ - return this_cpu_ptr(cc->cpu); -} - /* * Use this to access cipher attributes that are the same for each CPU. */ static struct crypto_ablkcipher *any_tfm(struct crypt_config *cc) { - return __this_cpu_ptr(cc->cpu)->tfms[0]; + return cc->tfms[0]; } /* @@ -230,6 +219,16 @@ static struct crypto_ablkcipher *any_tfm(struct crypt_config *cc) * version 3: the same as version 2 with additional IV seed * (it uses 65 keys, last key is used as IV seed) * + * tcw: Compatible implementation of the block chaining mode used + * by the TrueCrypt device encryption system (prior to version 4.1). + * For more info see: http://www.truecrypt.org + * It operates on full 512 byte sectors and uses CBC + * with an IV derived from initial key and the sector number. + * In addition, whitening value is applied on every sector, whitening + * is calculated from initial key, sector number and mixed using CRC32. + * Note that this encryption scheme is vulnerable to watermarking attacks + * and should be used for old compatible containers access only. + * * plumb: unimplemented, see: * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454 */ @@ -259,7 +258,7 @@ static int crypt_iv_essiv_init(struct crypt_config *cc) struct hash_desc desc; struct scatterlist sg; struct crypto_cipher *essiv_tfm; - int err, cpu; + int err; sg_init_one(&sg, cc->key, cc->key_size); desc.tfm = essiv->hash_tfm; @@ -269,14 +268,12 @@ static int crypt_iv_essiv_init(struct crypt_config *cc) if (err) return err; - for_each_possible_cpu(cpu) { - essiv_tfm = per_cpu_ptr(cc->cpu, cpu)->iv_private, + essiv_tfm = cc->iv_private; - err = crypto_cipher_setkey(essiv_tfm, essiv->salt, - crypto_hash_digestsize(essiv->hash_tfm)); - if (err) - return err; - } + err = crypto_cipher_setkey(essiv_tfm, essiv->salt, + crypto_hash_digestsize(essiv->hash_tfm)); + if (err) + return err; return 0; } @@ -287,16 +284,14 @@ static int crypt_iv_essiv_wipe(struct crypt_config *cc) struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; unsigned salt_size = crypto_hash_digestsize(essiv->hash_tfm); struct crypto_cipher *essiv_tfm; - int cpu, r, err = 0; + int r, err = 0; memset(essiv->salt, 0, salt_size); - for_each_possible_cpu(cpu) { - essiv_tfm = per_cpu_ptr(cc->cpu, cpu)->iv_private; - r = crypto_cipher_setkey(essiv_tfm, essiv->salt, salt_size); - if (r) - err = r; - } + essiv_tfm = cc->iv_private; + r = crypto_cipher_setkey(essiv_tfm, essiv->salt, salt_size); + if (r) + err = r; return err; } @@ -336,8 +331,6 @@ static struct crypto_cipher *setup_essiv_cpu(struct crypt_config *cc, static void crypt_iv_essiv_dtr(struct crypt_config *cc) { - int cpu; - struct crypt_cpu *cpu_cc; struct crypto_cipher *essiv_tfm; struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; @@ -347,15 +340,12 @@ static void crypt_iv_essiv_dtr(struct crypt_config *cc) kzfree(essiv->salt); essiv->salt = NULL; - for_each_possible_cpu(cpu) { - cpu_cc = per_cpu_ptr(cc->cpu, cpu); - essiv_tfm = cpu_cc->iv_private; + essiv_tfm = cc->iv_private; - if (essiv_tfm) - crypto_free_cipher(essiv_tfm); + if (essiv_tfm) + crypto_free_cipher(essiv_tfm); - cpu_cc->iv_private = NULL; - } + cc->iv_private = NULL; } static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti, @@ -364,7 +354,7 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti, struct crypto_cipher *essiv_tfm = NULL; struct crypto_hash *hash_tfm = NULL; u8 *salt = NULL; - int err, cpu; + int err; if (!opts) { ti->error = "Digest algorithm missing for ESSIV mode"; @@ -389,15 +379,13 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti, cc->iv_gen_private.essiv.salt = salt; cc->iv_gen_private.essiv.hash_tfm = hash_tfm; - for_each_possible_cpu(cpu) { - essiv_tfm = setup_essiv_cpu(cc, ti, salt, - crypto_hash_digestsize(hash_tfm)); - if (IS_ERR(essiv_tfm)) { - crypt_iv_essiv_dtr(cc); - return PTR_ERR(essiv_tfm); - } - per_cpu_ptr(cc->cpu, cpu)->iv_private = essiv_tfm; + essiv_tfm = setup_essiv_cpu(cc, ti, salt, + crypto_hash_digestsize(hash_tfm)); + if (IS_ERR(essiv_tfm)) { + crypt_iv_essiv_dtr(cc); + return PTR_ERR(essiv_tfm); } + cc->iv_private = essiv_tfm; return 0; @@ -411,7 +399,7 @@ bad: static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, struct dm_crypt_request *dmreq) { - struct crypto_cipher *essiv_tfm = this_crypt_config(cc)->iv_private; + struct crypto_cipher *essiv_tfm = cc->iv_private; memset(iv, 0, cc->iv_size); *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector); @@ -541,7 +529,7 @@ static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv, char ctx[crypto_shash_descsize(lmk->hash_tfm)]; } sdesc; struct md5_state md5state; - u32 buf[4]; + __le32 buf[4]; int i, r; sdesc.desc.tfm = lmk->hash_tfm; @@ -590,9 +578,9 @@ static int crypt_iv_lmk_gen(struct crypt_config *cc, u8 *iv, int r = 0; if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) { - src = kmap_atomic(sg_page(&dmreq->sg_in), KM_USER0); + src = kmap_atomic(sg_page(&dmreq->sg_in)); r = crypt_iv_lmk_one(cc, iv, dmreq, src + dmreq->sg_in.offset); - kunmap_atomic(src, KM_USER0); + kunmap_atomic(src); } else memset(iv, 0, cc->iv_size); @@ -608,14 +596,161 @@ static int crypt_iv_lmk_post(struct crypt_config *cc, u8 *iv, if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) return 0; - dst = kmap_atomic(sg_page(&dmreq->sg_out), KM_USER0); + dst = kmap_atomic(sg_page(&dmreq->sg_out)); r = crypt_iv_lmk_one(cc, iv, dmreq, dst + dmreq->sg_out.offset); /* Tweak the first block of plaintext sector */ if (!r) crypto_xor(dst + dmreq->sg_out.offset, iv, cc->iv_size); - kunmap_atomic(dst, KM_USER0); + kunmap_atomic(dst); + return r; +} + +static void crypt_iv_tcw_dtr(struct crypt_config *cc) +{ + struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw; + + kzfree(tcw->iv_seed); + tcw->iv_seed = NULL; + kzfree(tcw->whitening); + tcw->whitening = NULL; + + if (tcw->crc32_tfm && !IS_ERR(tcw->crc32_tfm)) + crypto_free_shash(tcw->crc32_tfm); + tcw->crc32_tfm = NULL; +} + +static int crypt_iv_tcw_ctr(struct crypt_config *cc, struct dm_target *ti, + const char *opts) +{ + struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw; + + if (cc->key_size <= (cc->iv_size + TCW_WHITENING_SIZE)) { + ti->error = "Wrong key size for TCW"; + return -EINVAL; + } + + tcw->crc32_tfm = crypto_alloc_shash("crc32", 0, 0); + if (IS_ERR(tcw->crc32_tfm)) { + ti->error = "Error initializing CRC32 in TCW"; + return PTR_ERR(tcw->crc32_tfm); + } + + tcw->iv_seed = kzalloc(cc->iv_size, GFP_KERNEL); + tcw->whitening = kzalloc(TCW_WHITENING_SIZE, GFP_KERNEL); + if (!tcw->iv_seed || !tcw->whitening) { + crypt_iv_tcw_dtr(cc); + ti->error = "Error allocating seed storage in TCW"; + return -ENOMEM; + } + + return 0; +} + +static int crypt_iv_tcw_init(struct crypt_config *cc) +{ + struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw; + int key_offset = cc->key_size - cc->iv_size - TCW_WHITENING_SIZE; + + memcpy(tcw->iv_seed, &cc->key[key_offset], cc->iv_size); + memcpy(tcw->whitening, &cc->key[key_offset + cc->iv_size], + TCW_WHITENING_SIZE); + + return 0; +} + +static int crypt_iv_tcw_wipe(struct crypt_config *cc) +{ + struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw; + + memset(tcw->iv_seed, 0, cc->iv_size); + memset(tcw->whitening, 0, TCW_WHITENING_SIZE); + + return 0; +} + +static int crypt_iv_tcw_whitening(struct crypt_config *cc, + struct dm_crypt_request *dmreq, + u8 *data) +{ + struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw; + u64 sector = cpu_to_le64((u64)dmreq->iv_sector); + u8 buf[TCW_WHITENING_SIZE]; + struct { + struct shash_desc desc; + char ctx[crypto_shash_descsize(tcw->crc32_tfm)]; + } sdesc; + int i, r; + + /* xor whitening with sector number */ + memcpy(buf, tcw->whitening, TCW_WHITENING_SIZE); + crypto_xor(buf, (u8 *)§or, 8); + crypto_xor(&buf[8], (u8 *)§or, 8); + + /* calculate crc32 for every 32bit part and xor it */ + sdesc.desc.tfm = tcw->crc32_tfm; + sdesc.desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; + for (i = 0; i < 4; i++) { + r = crypto_shash_init(&sdesc.desc); + if (r) + goto out; + r = crypto_shash_update(&sdesc.desc, &buf[i * 4], 4); + if (r) + goto out; + r = crypto_shash_final(&sdesc.desc, &buf[i * 4]); + if (r) + goto out; + } + crypto_xor(&buf[0], &buf[12], 4); + crypto_xor(&buf[4], &buf[8], 4); + + /* apply whitening (8 bytes) to whole sector */ + for (i = 0; i < ((1 << SECTOR_SHIFT) / 8); i++) + crypto_xor(data + i * 8, buf, 8); +out: + memset(buf, 0, sizeof(buf)); + return r; +} + +static int crypt_iv_tcw_gen(struct crypt_config *cc, u8 *iv, + struct dm_crypt_request *dmreq) +{ + struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw; + u64 sector = cpu_to_le64((u64)dmreq->iv_sector); + u8 *src; + int r = 0; + + /* Remove whitening from ciphertext */ + if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) { + src = kmap_atomic(sg_page(&dmreq->sg_in)); + r = crypt_iv_tcw_whitening(cc, dmreq, src + dmreq->sg_in.offset); + kunmap_atomic(src); + } + + /* Calculate IV */ + memcpy(iv, tcw->iv_seed, cc->iv_size); + crypto_xor(iv, (u8 *)§or, 8); + if (cc->iv_size > 8) + crypto_xor(&iv[8], (u8 *)§or, cc->iv_size - 8); + + return r; +} + +static int crypt_iv_tcw_post(struct crypt_config *cc, u8 *iv, + struct dm_crypt_request *dmreq) +{ + u8 *dst; + int r; + + if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) + return 0; + + /* Apply whitening on ciphertext */ + dst = kmap_atomic(sg_page(&dmreq->sg_out)); + r = crypt_iv_tcw_whitening(cc, dmreq, dst + dmreq->sg_out.offset); + kunmap_atomic(dst); + return r; } @@ -654,6 +789,15 @@ static struct crypt_iv_operations crypt_iv_lmk_ops = { .post = crypt_iv_lmk_post }; +static struct crypt_iv_operations crypt_iv_tcw_ops = { + .ctr = crypt_iv_tcw_ctr, + .dtr = crypt_iv_tcw_dtr, + .init = crypt_iv_tcw_init, + .wipe = crypt_iv_tcw_wipe, + .generator = crypt_iv_tcw_gen, + .post = crypt_iv_tcw_post +}; + static void crypt_convert_init(struct crypt_config *cc, struct convert_context *ctx, struct bio *bio_out, struct bio *bio_in, @@ -661,11 +805,11 @@ static void crypt_convert_init(struct crypt_config *cc, { ctx->bio_in = bio_in; ctx->bio_out = bio_out; - ctx->offset_in = 0; - ctx->offset_out = 0; - ctx->idx_in = bio_in ? bio_in->bi_idx : 0; - ctx->idx_out = bio_out ? bio_out->bi_idx : 0; - ctx->sector = sector + cc->iv_offset; + if (bio_in) + ctx->iter_in = bio_in->bi_iter; + if (bio_out) + ctx->iter_out = bio_out->bi_iter; + ctx->cc_sector = sector + cc->iv_offset; init_completion(&ctx->restart); } @@ -692,36 +836,27 @@ static int crypt_convert_block(struct crypt_config *cc, struct convert_context *ctx, struct ablkcipher_request *req) { - struct bio_vec *bv_in = bio_iovec_idx(ctx->bio_in, ctx->idx_in); - struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, ctx->idx_out); + struct bio_vec bv_in = bio_iter_iovec(ctx->bio_in, ctx->iter_in); + struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out); struct dm_crypt_request *dmreq; u8 *iv; - int r = 0; + int r; dmreq = dmreq_of_req(cc, req); iv = iv_of_dmreq(cc, dmreq); - dmreq->iv_sector = ctx->sector; + dmreq->iv_sector = ctx->cc_sector; dmreq->ctx = ctx; sg_init_table(&dmreq->sg_in, 1); - sg_set_page(&dmreq->sg_in, bv_in->bv_page, 1 << SECTOR_SHIFT, - bv_in->bv_offset + ctx->offset_in); + sg_set_page(&dmreq->sg_in, bv_in.bv_page, 1 << SECTOR_SHIFT, + bv_in.bv_offset); sg_init_table(&dmreq->sg_out, 1); - sg_set_page(&dmreq->sg_out, bv_out->bv_page, 1 << SECTOR_SHIFT, - bv_out->bv_offset + ctx->offset_out); + sg_set_page(&dmreq->sg_out, bv_out.bv_page, 1 << SECTOR_SHIFT, + bv_out.bv_offset); - ctx->offset_in += 1 << SECTOR_SHIFT; - if (ctx->offset_in >= bv_in->bv_len) { - ctx->offset_in = 0; - ctx->idx_in++; - } - - ctx->offset_out += 1 << SECTOR_SHIFT; - if (ctx->offset_out >= bv_out->bv_len) { - ctx->offset_out = 0; - ctx->idx_out++; - } + bio_advance_iter(ctx->bio_in, &ctx->iter_in, 1 << SECTOR_SHIFT); + bio_advance_iter(ctx->bio_out, &ctx->iter_out, 1 << SECTOR_SHIFT); if (cc->iv_gen_ops) { r = cc->iv_gen_ops->generator(cc, iv, dmreq); @@ -749,16 +884,15 @@ static void kcryptd_async_done(struct crypto_async_request *async_req, static void crypt_alloc_req(struct crypt_config *cc, struct convert_context *ctx) { - struct crypt_cpu *this_cc = this_crypt_config(cc); - unsigned key_index = ctx->sector & (cc->tfms_count - 1); + unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1); - if (!this_cc->req) - this_cc->req = mempool_alloc(cc->req_pool, GFP_NOIO); + if (!ctx->req) + ctx->req = mempool_alloc(cc->req_pool, GFP_NOIO); - ablkcipher_request_set_tfm(this_cc->req, this_cc->tfms[key_index]); - ablkcipher_request_set_callback(this_cc->req, + ablkcipher_request_set_tfm(ctx->req, cc->tfms[key_index]); + ablkcipher_request_set_callback(ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, - kcryptd_async_done, dmreq_of_req(cc, this_cc->req)); + kcryptd_async_done, dmreq_of_req(cc, ctx->req)); } /* @@ -767,41 +901,39 @@ static void crypt_alloc_req(struct crypt_config *cc, static int crypt_convert(struct crypt_config *cc, struct convert_context *ctx) { - struct crypt_cpu *this_cc = this_crypt_config(cc); int r; - atomic_set(&ctx->pending, 1); + atomic_set(&ctx->cc_pending, 1); - while(ctx->idx_in < ctx->bio_in->bi_vcnt && - ctx->idx_out < ctx->bio_out->bi_vcnt) { + while (ctx->iter_in.bi_size && ctx->iter_out.bi_size) { crypt_alloc_req(cc, ctx); - atomic_inc(&ctx->pending); + atomic_inc(&ctx->cc_pending); - r = crypt_convert_block(cc, ctx, this_cc->req); + r = crypt_convert_block(cc, ctx, ctx->req); switch (r) { /* async */ case -EBUSY: wait_for_completion(&ctx->restart); - INIT_COMPLETION(ctx->restart); + reinit_completion(&ctx->restart); /* fall through*/ case -EINPROGRESS: - this_cc->req = NULL; - ctx->sector++; + ctx->req = NULL; + ctx->cc_sector++; continue; /* sync */ case 0: - atomic_dec(&ctx->pending); - ctx->sector++; + atomic_dec(&ctx->cc_pending); + ctx->cc_sector++; cond_resched(); continue; /* error */ default: - atomic_dec(&ctx->pending); + atomic_dec(&ctx->cc_pending); return r; } } @@ -809,14 +941,6 @@ static int crypt_convert(struct crypt_config *cc, return 0; } -static void dm_crypt_bio_destructor(struct bio *bio) -{ - struct dm_crypt_io *io = bio->bi_private; - struct crypt_config *cc = io->target->private; - - bio_free(bio, cc->bs); -} - /* * Generate a new unfragmented bio with the given size * This should never violate the device limitations @@ -826,7 +950,7 @@ static void dm_crypt_bio_destructor(struct bio *bio) static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size, unsigned *out_of_pages) { - struct crypt_config *cc = io->target->private; + struct crypt_config *cc = io->cc; struct bio *clone; unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM; @@ -848,12 +972,11 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size, } /* - * if additional pages cannot be allocated without waiting, - * return a partially allocated bio, the caller will then try - * to allocate additional bios while submitting this partial bio + * If additional pages cannot be allocated without waiting, + * return a partially-allocated bio. The caller will then try + * to allocate more bios while submitting this partial bio. */ - if (i == (MIN_BIO_PAGES - 1)) - gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT; + gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT; len = (size > PAGE_SIZE) ? PAGE_SIZE : size; @@ -865,7 +988,7 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size, size -= len; } - if (!clone->bi_size) { + if (!clone->bi_iter.bi_size) { bio_put(clone); return NULL; } @@ -878,34 +1001,33 @@ static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone) unsigned int i; struct bio_vec *bv; - for (i = 0; i < clone->bi_vcnt; i++) { - bv = bio_iovec_idx(clone, i); + bio_for_each_segment_all(bv, clone, i) { BUG_ON(!bv->bv_page); mempool_free(bv->bv_page, cc->page_pool); bv->bv_page = NULL; } } -static struct dm_crypt_io *crypt_io_alloc(struct dm_target *ti, +static struct dm_crypt_io *crypt_io_alloc(struct crypt_config *cc, struct bio *bio, sector_t sector) { - struct crypt_config *cc = ti->private; struct dm_crypt_io *io; io = mempool_alloc(cc->io_pool, GFP_NOIO); - io->target = ti; + io->cc = cc; io->base_bio = bio; io->sector = sector; io->error = 0; io->base_io = NULL; - atomic_set(&io->pending, 0); + io->ctx.req = NULL; + atomic_set(&io->io_pending, 0); return io; } static void crypt_inc_pending(struct dm_crypt_io *io) { - atomic_inc(&io->pending); + atomic_inc(&io->io_pending); } /* @@ -915,14 +1037,16 @@ static void crypt_inc_pending(struct dm_crypt_io *io) */ static void crypt_dec_pending(struct dm_crypt_io *io) { - struct crypt_config *cc = io->target->private; + struct crypt_config *cc = io->cc; struct bio *base_bio = io->base_bio; struct dm_crypt_io *base_io = io->base_io; int error = io->error; - if (!atomic_dec_and_test(&io->pending)) + if (!atomic_dec_and_test(&io->io_pending)) return; + if (io->ctx.req) + mempool_free(io->ctx.req, cc->req_pool); mempool_free(io, cc->io_pool); if (likely(!base_io)) @@ -954,7 +1078,7 @@ static void crypt_dec_pending(struct dm_crypt_io *io) static void crypt_endio(struct bio *clone, int error) { struct dm_crypt_io *io = clone->bi_private; - struct crypt_config *cc = io->target->private; + struct crypt_config *cc = io->cc; unsigned rw = bio_data_dir(clone); if (unlikely(!bio_flagged(clone, BIO_UPTODATE) && !error)) @@ -981,18 +1105,17 @@ static void crypt_endio(struct bio *clone, int error) static void clone_init(struct dm_crypt_io *io, struct bio *clone) { - struct crypt_config *cc = io->target->private; + struct crypt_config *cc = io->cc; clone->bi_private = io; clone->bi_end_io = crypt_endio; clone->bi_bdev = cc->dev->bdev; clone->bi_rw = io->base_bio->bi_rw; - clone->bi_destructor = dm_crypt_bio_destructor; } static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp) { - struct crypt_config *cc = io->target->private; + struct crypt_config *cc = io->cc; struct bio *base_bio = io->base_bio; struct bio *clone; @@ -1001,19 +1124,14 @@ static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp) * copy the required bvecs because we need the original * one in order to decrypt the whole bio data *afterwards*. */ - clone = bio_alloc_bioset(gfp, bio_segments(base_bio), cc->bs); + clone = bio_clone_bioset(base_bio, gfp, cc->bs); if (!clone) return 1; crypt_inc_pending(io); clone_init(io, clone); - clone->bi_idx = 0; - clone->bi_vcnt = bio_segments(base_bio); - clone->bi_size = base_bio->bi_size; - clone->bi_sector = cc->start + io->sector; - memcpy(clone->bi_io_vec, bio_iovec(base_bio), - sizeof(struct bio_vec) * clone->bi_vcnt); + clone->bi_iter.bi_sector = cc->start + io->sector; generic_make_request(clone); return 0; @@ -1040,30 +1158,28 @@ static void kcryptd_io(struct work_struct *work) static void kcryptd_queue_io(struct dm_crypt_io *io) { - struct crypt_config *cc = io->target->private; + struct crypt_config *cc = io->cc; INIT_WORK(&io->work, kcryptd_io); queue_work(cc->io_queue, &io->work); } -static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, - int error, int async) +static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async) { struct bio *clone = io->ctx.bio_out; - struct crypt_config *cc = io->target->private; + struct crypt_config *cc = io->cc; - if (unlikely(error < 0)) { + if (unlikely(io->error < 0)) { crypt_free_buffer_pages(cc, clone); bio_put(clone); - io->error = -EIO; crypt_dec_pending(io); return; } /* crypt_convert should have filled the clone bio */ - BUG_ON(io->ctx.idx_out < clone->bi_vcnt); + BUG_ON(io->ctx.iter_out.bi_size); - clone->bi_sector = cc->start + io->sector; + clone->bi_iter.bi_sector = cc->start + io->sector; if (async) kcryptd_queue_io(io); @@ -1073,12 +1189,12 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) { - struct crypt_config *cc = io->target->private; + struct crypt_config *cc = io->cc; struct bio *clone; struct dm_crypt_io *new_io; int crypt_finished; unsigned out_of_pages = 0; - unsigned remaining = io->base_bio->bi_size; + unsigned remaining = io->base_bio->bi_iter.bi_size; sector_t sector = io->sector; int r; @@ -1100,18 +1216,22 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) } io->ctx.bio_out = clone; - io->ctx.idx_out = 0; + io->ctx.iter_out = clone->bi_iter; - remaining -= clone->bi_size; + remaining -= clone->bi_iter.bi_size; sector += bio_sectors(clone); crypt_inc_pending(io); + r = crypt_convert(cc, &io->ctx); - crypt_finished = atomic_dec_and_test(&io->ctx.pending); + if (r < 0) + io->error = -EIO; + + crypt_finished = atomic_dec_and_test(&io->ctx.cc_pending); /* Encryption was already finished, submit io now */ if (crypt_finished) { - kcryptd_crypt_write_io_submit(io, r, 0); + kcryptd_crypt_write_io_submit(io, 0); /* * If there was an error, do not try next fragments. @@ -1135,13 +1255,12 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) * between fragments, so switch to a new dm_crypt_io structure. */ if (unlikely(!crypt_finished && remaining)) { - new_io = crypt_io_alloc(io->target, io->base_bio, + new_io = crypt_io_alloc(io->cc, io->base_bio, sector); crypt_inc_pending(new_io); crypt_convert_init(cc, &new_io->ctx, NULL, io->base_bio, sector); - new_io->ctx.idx_in = io->ctx.idx_in; - new_io->ctx.offset_in = io->ctx.offset_in; + new_io->ctx.iter_in = io->ctx.iter_in; /* * Fragments after the first use the base_io @@ -1162,17 +1281,14 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) crypt_dec_pending(io); } -static void kcryptd_crypt_read_done(struct dm_crypt_io *io, int error) +static void kcryptd_crypt_read_done(struct dm_crypt_io *io) { - if (unlikely(error < 0)) - io->error = -EIO; - crypt_dec_pending(io); } static void kcryptd_crypt_read_convert(struct dm_crypt_io *io) { - struct crypt_config *cc = io->target->private; + struct crypt_config *cc = io->cc; int r = 0; crypt_inc_pending(io); @@ -1181,9 +1297,11 @@ static void kcryptd_crypt_read_convert(struct dm_crypt_io *io) io->sector); r = crypt_convert(cc, &io->ctx); + if (r < 0) + io->error = -EIO; - if (atomic_dec_and_test(&io->ctx.pending)) - kcryptd_crypt_read_done(io, r); + if (atomic_dec_and_test(&io->ctx.cc_pending)) + kcryptd_crypt_read_done(io); crypt_dec_pending(io); } @@ -1194,7 +1312,7 @@ static void kcryptd_async_done(struct crypto_async_request *async_req, struct dm_crypt_request *dmreq = async_req->data; struct convert_context *ctx = dmreq->ctx; struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx); - struct crypt_config *cc = io->target->private; + struct crypt_config *cc = io->cc; if (error == -EINPROGRESS) { complete(&ctx->restart); @@ -1204,15 +1322,18 @@ static void kcryptd_async_done(struct crypto_async_request *async_req, if (!error && cc->iv_gen_ops && cc->iv_gen_ops->post) error = cc->iv_gen_ops->post(cc, iv_of_dmreq(cc, dmreq), dmreq); + if (error < 0) + io->error = -EIO; + mempool_free(req_of_dmreq(cc, dmreq), cc->req_pool); - if (!atomic_dec_and_test(&ctx->pending)) + if (!atomic_dec_and_test(&ctx->cc_pending)) return; if (bio_data_dir(io->base_bio) == READ) - kcryptd_crypt_read_done(io, error); + kcryptd_crypt_read_done(io); else - kcryptd_crypt_write_io_submit(io, error, 1); + kcryptd_crypt_write_io_submit(io, 1); } static void kcryptd_crypt(struct work_struct *work) @@ -1227,7 +1348,7 @@ static void kcryptd_crypt(struct work_struct *work) static void kcryptd_queue_crypt(struct dm_crypt_io *io) { - struct crypt_config *cc = io->target->private; + struct crypt_config *cc = io->cc; INIT_WORK(&io->work, kcryptd_crypt); queue_work(cc->crypt_queue, &io->work); @@ -1239,7 +1360,6 @@ static void kcryptd_queue_crypt(struct dm_crypt_io *io) static int crypt_decode_key(u8 *key, char *hex, unsigned int size) { char buffer[3]; - char *endp; unsigned int i; buffer[2] = '\0'; @@ -1248,9 +1368,7 @@ static int crypt_decode_key(u8 *key, char *hex, unsigned int size) buffer[0] = *hex++; buffer[1] = *hex++; - key[i] = (u8)simple_strtoul(buffer, &endp, 16); - - if (endp != &buffer[2]) + if (kstrtou8(buffer, 16, &key[i])) return -EINVAL; } @@ -1260,43 +1378,38 @@ static int crypt_decode_key(u8 *key, char *hex, unsigned int size) return 0; } -/* - * Encode key into its hex representation - */ -static void crypt_encode_key(char *hex, u8 *key, unsigned int size) +static void crypt_free_tfms(struct crypt_config *cc) { - unsigned int i; - - for (i = 0; i < size; i++) { - sprintf(hex, "%02x", *key); - hex += 2; - key++; - } -} - -static void crypt_free_tfms(struct crypt_config *cc, int cpu) -{ - struct crypt_cpu *cpu_cc = per_cpu_ptr(cc->cpu, cpu); unsigned i; + if (!cc->tfms) + return; + for (i = 0; i < cc->tfms_count; i++) - if (cpu_cc->tfms[i] && !IS_ERR(cpu_cc->tfms[i])) { - crypto_free_ablkcipher(cpu_cc->tfms[i]); - cpu_cc->tfms[i] = NULL; + if (cc->tfms[i] && !IS_ERR(cc->tfms[i])) { + crypto_free_ablkcipher(cc->tfms[i]); + cc->tfms[i] = NULL; } + + kfree(cc->tfms); + cc->tfms = NULL; } -static int crypt_alloc_tfms(struct crypt_config *cc, int cpu, char *ciphermode) +static int crypt_alloc_tfms(struct crypt_config *cc, char *ciphermode) { - struct crypt_cpu *cpu_cc = per_cpu_ptr(cc->cpu, cpu); unsigned i; int err; + cc->tfms = kmalloc(cc->tfms_count * sizeof(struct crypto_ablkcipher *), + GFP_KERNEL); + if (!cc->tfms) + return -ENOMEM; + for (i = 0; i < cc->tfms_count; i++) { - cpu_cc->tfms[i] = crypto_alloc_ablkcipher(ciphermode, 0, 0); - if (IS_ERR(cpu_cc->tfms[i])) { - err = PTR_ERR(cpu_cc->tfms[i]); - crypt_free_tfms(cc, cpu); + cc->tfms[i] = crypto_alloc_ablkcipher(ciphermode, 0, 0); + if (IS_ERR(cc->tfms[i])) { + err = PTR_ERR(cc->tfms[i]); + crypt_free_tfms(cc); return err; } } @@ -1306,16 +1419,18 @@ static int crypt_alloc_tfms(struct crypt_config *cc, int cpu, char *ciphermode) static int crypt_setkey_allcpus(struct crypt_config *cc) { - unsigned subkey_size = cc->key_size >> ilog2(cc->tfms_count); - int cpu, err = 0, i, r; + unsigned subkey_size; + int err = 0, i, r; - for_each_possible_cpu(cpu) { - for (i = 0; i < cc->tfms_count; i++) { - r = crypto_ablkcipher_setkey(per_cpu_ptr(cc->cpu, cpu)->tfms[i], - cc->key + (i * subkey_size), subkey_size); - if (r) - err = r; - } + /* Ignore extra keys (which are used for IV etc) */ + subkey_size = (cc->key_size - cc->key_extra_size) >> ilog2(cc->tfms_count); + + for (i = 0; i < cc->tfms_count; i++) { + r = crypto_ablkcipher_setkey(cc->tfms[i], + cc->key + (i * subkey_size), + subkey_size); + if (r) + err = r; } return err; @@ -1359,8 +1474,6 @@ static int crypt_wipe_key(struct crypt_config *cc) static void crypt_dtr(struct dm_target *ti) { struct crypt_config *cc = ti->private; - struct crypt_cpu *cpu_cc; - int cpu; ti->private = NULL; @@ -1372,13 +1485,7 @@ static void crypt_dtr(struct dm_target *ti) if (cc->crypt_queue) destroy_workqueue(cc->crypt_queue); - if (cc->cpu) - for_each_possible_cpu(cpu) { - cpu_cc = per_cpu_ptr(cc->cpu, cpu); - if (cpu_cc->req) - mempool_free(cpu_cc->req, cc->req_pool); - crypt_free_tfms(cc, cpu); - } + crypt_free_tfms(cc); if (cc->bs) bioset_free(cc->bs); @@ -1396,9 +1503,6 @@ static void crypt_dtr(struct dm_target *ti) if (cc->dev) dm_put_device(ti, cc->dev); - if (cc->cpu) - free_percpu(cc->cpu); - kzfree(cc->cipher); kzfree(cc->cipher_string); @@ -1412,7 +1516,8 @@ static int crypt_ctr_cipher(struct dm_target *ti, struct crypt_config *cc = ti->private; char *tmp, *cipher, *chainmode, *ivmode, *ivopts, *keycount; char *cipher_api = NULL; - int cpu, ret = -EINVAL; + int ret = -EINVAL; + char dummy; /* Convert to crypto api definition? */ if (strchr(cipher_in, '(')) { @@ -1434,12 +1539,13 @@ static int crypt_ctr_cipher(struct dm_target *ti, if (!keycount) cc->tfms_count = 1; - else if (sscanf(keycount, "%u", &cc->tfms_count) != 1 || + else if (sscanf(keycount, "%u%c", &cc->tfms_count, &dummy) != 1 || !is_power_of_2(cc->tfms_count)) { ti->error = "Bad cipher key count specification"; return -EINVAL; } cc->key_parts = cc->tfms_count; + cc->key_extra_size = 0; cc->cipher = kstrdup(cipher, GFP_KERNEL); if (!cc->cipher) @@ -1452,14 +1558,6 @@ static int crypt_ctr_cipher(struct dm_target *ti, if (tmp) DMWARN("Ignoring unexpected additional cipher options"); - cc->cpu = __alloc_percpu(sizeof(*(cc->cpu)) + - cc->tfms_count * sizeof(*(cc->cpu->tfms)), - __alignof__(struct crypt_cpu)); - if (!cc->cpu) { - ti->error = "Cannot allocate per cpu state"; - goto bad_mem; - } - /* * For compatibility with the original dm-crypt mapping format, if * only the cipher name is supplied, use cbc-plain. @@ -1486,18 +1584,9 @@ static int crypt_ctr_cipher(struct dm_target *ti, } /* Allocate cipher */ - for_each_possible_cpu(cpu) { - ret = crypt_alloc_tfms(cc, cpu, cipher_api); - if (ret < 0) { - ti->error = "Error allocating crypto tfm"; - goto bad; - } - } - - /* Initialize and set key */ - ret = crypt_set_key(cc, key); + ret = crypt_alloc_tfms(cc, cipher_api); if (ret < 0) { - ti->error = "Error decoding and setting key"; + ti->error = "Error allocating crypto tfm"; goto bad; } @@ -1527,18 +1616,33 @@ static int crypt_ctr_cipher(struct dm_target *ti, cc->iv_gen_ops = &crypt_iv_null_ops; else if (strcmp(ivmode, "lmk") == 0) { cc->iv_gen_ops = &crypt_iv_lmk_ops; - /* Version 2 and 3 is recognised according + /* + * Version 2 and 3 is recognised according * to length of provided multi-key string. * If present (version 3), last key is used as IV seed. + * All keys (including IV seed) are always the same size. */ - if (cc->key_size % cc->key_parts) + if (cc->key_size % cc->key_parts) { cc->key_parts++; + cc->key_extra_size = cc->key_size / cc->key_parts; + } + } else if (strcmp(ivmode, "tcw") == 0) { + cc->iv_gen_ops = &crypt_iv_tcw_ops; + cc->key_parts += 2; /* IV + whitening */ + cc->key_extra_size = cc->iv_size + TCW_WHITENING_SIZE; } else { ret = -EINVAL; ti->error = "Invalid IV mode"; goto bad; } + /* Initialize and set key */ + ret = crypt_set_key(cc, key); + if (ret < 0) { + ti->error = "Error decoding and setting key"; + goto bad; + } + /* Allocate IV */ if (cc->iv_gen_ops && cc->iv_gen_ops->ctr) { ret = cc->iv_gen_ops->ctr(cc, ti, ivopts); @@ -1579,6 +1683,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) int ret; struct dm_arg_set as; const char *opt_string; + char dummy; static struct dm_arg _args[] = { {0, 1, "Invalid number of feature args"}, @@ -1636,7 +1741,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) } ret = -EINVAL; - if (sscanf(argv[2], "%llu", &tmpll) != 1) { + if (sscanf(argv[2], "%llu%c", &tmpll, &dummy) != 1) { ti->error = "Invalid iv_offset sector"; goto bad; } @@ -1647,7 +1752,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) goto bad; } - if (sscanf(argv[4], "%llu", &tmpll) != 1) { + if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1) { ti->error = "Invalid device sector"; goto bad; } @@ -1669,7 +1774,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) if (opt_params == 1 && opt_string && !strcasecmp(opt_string, "allow_discards")) - ti->num_discard_requests = 1; + ti->num_discard_bios = 1; else if (opt_params) { ret = -EINVAL; ti->error = "Invalid feature arguments"; @@ -1678,27 +1783,21 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) } ret = -ENOMEM; - cc->io_queue = alloc_workqueue("kcryptd_io", - WQ_NON_REENTRANT| - WQ_MEM_RECLAIM, - 1); + cc->io_queue = alloc_workqueue("kcryptd_io", WQ_MEM_RECLAIM, 1); if (!cc->io_queue) { ti->error = "Couldn't create kcryptd io queue"; goto bad; } cc->crypt_queue = alloc_workqueue("kcryptd", - WQ_NON_REENTRANT| - WQ_CPU_INTENSIVE| - WQ_MEM_RECLAIM, - 1); + WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 1); if (!cc->crypt_queue) { ti->error = "Couldn't create kcryptd queue"; goto bad; } - ti->num_flush_requests = 1; - ti->discard_zeroes_data_unsupported = 1; + ti->num_flush_bios = 1; + ti->discard_zeroes_data_unsupported = true; return 0; @@ -1707,11 +1806,10 @@ bad: return ret; } -static int crypt_map(struct dm_target *ti, struct bio *bio, - union map_info *map_context) +static int crypt_map(struct dm_target *ti, struct bio *bio) { struct dm_crypt_io *io; - struct crypt_config *cc; + struct crypt_config *cc = ti->private; /* * If bio is REQ_FLUSH or REQ_DISCARD, just bypass crypt queues. @@ -1719,14 +1817,14 @@ static int crypt_map(struct dm_target *ti, struct bio *bio, * - for REQ_DISCARD caller must use flush if IO ordering matters */ if (unlikely(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))) { - cc = ti->private; bio->bi_bdev = cc->dev->bdev; if (bio_sectors(bio)) - bio->bi_sector = cc->start + dm_target_offset(ti, bio->bi_sector); + bio->bi_iter.bi_sector = cc->start + + dm_target_offset(ti, bio->bi_iter.bi_sector); return DM_MAPIO_REMAPPED; } - io = crypt_io_alloc(ti, bio, dm_target_offset(ti, bio->bi_sector)); + io = crypt_io_alloc(cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector)); if (bio_data_dir(io->base_bio) == READ) { if (kcryptd_io_read(io, GFP_NOWAIT)) @@ -1737,11 +1835,11 @@ static int crypt_map(struct dm_target *ti, struct bio *bio, return DM_MAPIO_SUBMITTED; } -static int crypt_status(struct dm_target *ti, status_type_t type, - char *result, unsigned int maxlen) +static void crypt_status(struct dm_target *ti, status_type_t type, + unsigned status_flags, char *result, unsigned maxlen) { struct crypt_config *cc = ti->private; - unsigned int sz = 0; + unsigned i, sz = 0; switch (type) { case STATUSTYPE_INFO: @@ -1751,27 +1849,20 @@ static int crypt_status(struct dm_target *ti, status_type_t type, case STATUSTYPE_TABLE: DMEMIT("%s ", cc->cipher_string); - if (cc->key_size > 0) { - if ((maxlen - sz) < ((cc->key_size << 1) + 1)) - return -ENOMEM; - - crypt_encode_key(result + sz, cc->key, cc->key_size); - sz += cc->key_size << 1; - } else { - if (sz >= maxlen) - return -ENOMEM; - result[sz++] = '-'; - } + if (cc->key_size > 0) + for (i = 0; i < cc->key_size; i++) + DMEMIT("%02x", cc->key[i]); + else + DMEMIT("-"); DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset, cc->dev->name, (unsigned long long)cc->start); - if (ti->num_discard_requests) + if (ti->num_discard_bios) DMEMIT(" 1 allow_discards"); break; } - return 0; } static void crypt_postsuspend(struct dm_target *ti) @@ -1865,7 +1956,7 @@ static int crypt_iterate_devices(struct dm_target *ti, static struct target_type crypt_target = { .name = "crypt", - .version = {1, 11, 0}, + .version = {1, 13, 0}, .module = THIS_MODULE, .ctr = crypt_ctr, .dtr = crypt_dtr, @@ -1905,6 +1996,6 @@ static void __exit dm_crypt_exit(void) module_init(dm_crypt_init); module_exit(dm_crypt_exit); -MODULE_AUTHOR("Christophe Saout <christophe@saout.de>"); +MODULE_AUTHOR("Jana Saout <jana@saout.de>"); MODULE_DESCRIPTION(DM_NAME " target for transparent encryption / decryption"); MODULE_LICENSE("GPL"); |
