aboutsummaryrefslogtreecommitdiff
path: root/crypto
diff options
context:
space:
mode:
Diffstat (limited to 'crypto')
-rw-r--r--crypto/Kconfig36
-rw-r--r--crypto/Makefile4
-rw-r--r--crypto/ahash.c7
-rw-r--r--crypto/algboss.c4
-rw-r--r--crypto/async_tx/Kconfig14
-rw-r--r--crypto/async_tx/async_memcpy.c2
-rw-r--r--crypto/authenc.c2
-rw-r--r--crypto/blkcipher.c2
-rw-r--r--crypto/cryptd.c206
-rw-r--r--crypto/ctr.c2
-rw-r--r--crypto/des_generic.c130
-rw-r--r--crypto/pcrypt.c241
-rw-r--r--crypto/testmgr.c14
-rw-r--r--crypto/twofish_generic.c (renamed from crypto/twofish.c)1
-rw-r--r--crypto/xts.c2
15 files changed, 520 insertions, 147 deletions
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 9d9434f08c9..e4bac29a32e 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -23,13 +23,12 @@ comment "Crypto core or helper"
config CRYPTO_FIPS
bool "FIPS 200 compliance"
- depends on CRYPTO_ANSI_CPRNG
+ depends on CRYPTO_ANSI_CPRNG && !CRYPTO_MANAGER_DISABLE_TESTS
help
This options enables the fips boot option which is
required if you want to system to operate in a FIPS 200
certification. You should say no unless you know what
- this is. Note that CRYPTO_ANSI_CPRNG is required if this
- option is selected
+ this is.
config CRYPTO_ALGAPI
tristate
@@ -80,6 +79,11 @@ config CRYPTO_RNG2
config CRYPTO_PCOMP
tristate
+ select CRYPTO_PCOMP2
+ select CRYPTO_ALGAPI
+
+config CRYPTO_PCOMP2
+ tristate
select CRYPTO_ALGAPI2
config CRYPTO_MANAGER
@@ -94,7 +98,15 @@ config CRYPTO_MANAGER2
select CRYPTO_AEAD2
select CRYPTO_HASH2
select CRYPTO_BLKCIPHER2
- select CRYPTO_PCOMP
+ select CRYPTO_PCOMP2
+
+config CRYPTO_MANAGER_DISABLE_TESTS
+ bool "Disable run-time self tests"
+ default y
+ depends on CRYPTO_MANAGER2
+ help
+ Disable run-time self tests that normally take place at
+ algorithm registration.
config CRYPTO_GF128MUL
tristate "GF(2^128) multiplication functions (EXPERIMENTAL)"
@@ -352,7 +364,7 @@ config CRYPTO_RMD128
RIPEMD-160 should be used.
Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel.
- See <http://home.esat.kuleuven.be/~bosselae/ripemd160.html>
+ See <http://homes.esat.kuleuven.be/~bosselae/ripemd160.html>
config CRYPTO_RMD160
tristate "RIPEMD-160 digest algorithm"
@@ -369,7 +381,7 @@ config CRYPTO_RMD160
against RIPEMD-160.
Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel.
- See <http://home.esat.kuleuven.be/~bosselae/ripemd160.html>
+ See <http://homes.esat.kuleuven.be/~bosselae/ripemd160.html>
config CRYPTO_RMD256
tristate "RIPEMD-256 digest algorithm"
@@ -381,7 +393,7 @@ config CRYPTO_RMD256
(than RIPEMD-128).
Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel.
- See <http://home.esat.kuleuven.be/~bosselae/ripemd160.html>
+ See <http://homes.esat.kuleuven.be/~bosselae/ripemd160.html>
config CRYPTO_RMD320
tristate "RIPEMD-320 digest algorithm"
@@ -393,7 +405,7 @@ config CRYPTO_RMD320
(than RIPEMD-160).
Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel.
- See <http://home.esat.kuleuven.be/~bosselae/ripemd160.html>
+ See <http://homes.esat.kuleuven.be/~bosselae/ripemd160.html>
config CRYPTO_SHA1
tristate "SHA1 digest algorithm"
@@ -448,7 +460,7 @@ config CRYPTO_WP512
Whirlpool will be part of the ISO/IEC 10118-3:2003(E) standard
See also:
- <http://planeta.terra.com.br/informatica/paulobarreto/WhirlpoolPage.html>
+ <http://www.larc.usp.br/~pbarreto/WhirlpoolPage.html>
config CRYPTO_GHASH_CLMUL_NI_INTEL
tristate "GHASH digest algorithm (CLMUL-NI accelerated)"
@@ -566,8 +578,8 @@ config CRYPTO_ANUBIS
in the NESSIE competition.
See also:
- <https://www.cosic.esat.kuleuven.ac.be/nessie/reports/>
- <http://planeta.terra.com.br/informatica/paulobarreto/AnubisPage.html>
+ <https://www.cosic.esat.kuleuven.be/nessie/reports/>
+ <http://www.larc.usp.br/~pbarreto/AnubisPage.html>
config CRYPTO_ARC4
tristate "ARC4 cipher algorithm"
@@ -646,7 +658,7 @@ config CRYPTO_KHAZAD
on 32-bit processors. Khazad uses an 128 bit key size.
See also:
- <http://planeta.terra.com.br/informatica/paulobarreto/KhazadPage.html>
+ <http://www.larc.usp.br/~pbarreto/KhazadPage.html>
config CRYPTO_SALSA20
tristate "Salsa20 stream cipher algorithm (EXPERIMENTAL)"
diff --git a/crypto/Makefile b/crypto/Makefile
index d7e6441df7f..423b7de61f9 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -26,7 +26,7 @@ crypto_hash-objs += ahash.o
crypto_hash-objs += shash.o
obj-$(CONFIG_CRYPTO_HASH2) += crypto_hash.o
-obj-$(CONFIG_CRYPTO_PCOMP) += pcompress.o
+obj-$(CONFIG_CRYPTO_PCOMP2) += pcompress.o
cryptomgr-objs := algboss.o testmgr.o
@@ -61,7 +61,7 @@ obj-$(CONFIG_CRYPTO_CRYPTD) += cryptd.o
obj-$(CONFIG_CRYPTO_DES) += des_generic.o
obj-$(CONFIG_CRYPTO_FCRYPT) += fcrypt.o
obj-$(CONFIG_CRYPTO_BLOWFISH) += blowfish.o
-obj-$(CONFIG_CRYPTO_TWOFISH) += twofish.o
+obj-$(CONFIG_CRYPTO_TWOFISH) += twofish_generic.o
obj-$(CONFIG_CRYPTO_TWOFISH_COMMON) += twofish_common.o
obj-$(CONFIG_CRYPTO_SERPENT) += serpent.o
obj-$(CONFIG_CRYPTO_AES) += aes_generic.o
diff --git a/crypto/ahash.c b/crypto/ahash.c
index b8c59b889c6..f669822a7a4 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -47,8 +47,11 @@ static int hash_walk_next(struct crypto_hash_walk *walk)
walk->data = crypto_kmap(walk->pg, 0);
walk->data += offset;
- if (offset & alignmask)
- nbytes = alignmask + 1 - (offset & alignmask);
+ if (offset & alignmask) {
+ unsigned int unaligned = alignmask + 1 - (offset & alignmask);
+ if (nbytes > unaligned)
+ nbytes = unaligned;
+ }
walk->entrylen -= nbytes;
return nbytes;
diff --git a/crypto/algboss.c b/crypto/algboss.c
index c3c196b5823..791d194958f 100644
--- a/crypto/algboss.c
+++ b/crypto/algboss.c
@@ -212,6 +212,10 @@ static int cryptomgr_test(void *data)
u32 type = param->type;
int err = 0;
+#ifdef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS
+ goto skiptest;
+#endif
+
if (type & CRYPTO_ALG_TESTED)
goto skiptest;
diff --git a/crypto/async_tx/Kconfig b/crypto/async_tx/Kconfig
index e28e276ac61..5de2ed13b35 100644
--- a/crypto/async_tx/Kconfig
+++ b/crypto/async_tx/Kconfig
@@ -22,6 +22,20 @@ config ASYNC_RAID6_RECOV
tristate
select ASYNC_CORE
select ASYNC_PQ
+ select ASYNC_XOR
+
+config ASYNC_RAID6_TEST
+ tristate "Self test for hardware accelerated raid6 recovery"
+ depends on ASYNC_RAID6_RECOV
+ select ASYNC_MEMCPY
+ ---help---
+ This is a one-shot self test that permutes through the
+ recovery of all the possible two disk failure scenarios for a
+ N-disk array. Recovery is performed with the asynchronous
+ raid6 recovery routines, and will optionally use an offload
+ engine if one is available.
+
+ If unsure, say N.
config ASYNC_TX_DISABLE_PQ_VAL_DMA
bool
diff --git a/crypto/async_tx/async_memcpy.c b/crypto/async_tx/async_memcpy.c
index 0ec1fb69d4e..518c22bd956 100644
--- a/crypto/async_tx/async_memcpy.c
+++ b/crypto/async_tx/async_memcpy.c
@@ -83,8 +83,8 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
memcpy(dest_buf, src_buf, len);
- kunmap_atomic(dest_buf, KM_USER0);
kunmap_atomic(src_buf, KM_USER1);
+ kunmap_atomic(dest_buf, KM_USER0);
async_tx_sync_epilog(submit);
}
diff --git a/crypto/authenc.c b/crypto/authenc.c
index b9884ee0adb..a5a22cfcd07 100644
--- a/crypto/authenc.c
+++ b/crypto/authenc.c
@@ -616,7 +616,7 @@ static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb)
auth = ahash_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH,
CRYPTO_ALG_TYPE_AHASH_MASK);
if (IS_ERR(auth))
- return ERR_PTR(PTR_ERR(auth));
+ return ERR_CAST(auth);
auth_base = &auth->base;
diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c
index 90d26c91f4e..7a7219266e3 100644
--- a/crypto/blkcipher.c
+++ b/crypto/blkcipher.c
@@ -89,9 +89,9 @@ static inline unsigned int blkcipher_done_fast(struct blkcipher_walk *walk,
memcpy(walk->dst.virt.addr, walk->page, n);
blkcipher_unmap_dst(walk);
} else if (!(walk->flags & BLKCIPHER_WALK_PHYS)) {
- blkcipher_unmap_src(walk);
if (walk->flags & BLKCIPHER_WALK_DIFF)
blkcipher_unmap_dst(walk);
+ blkcipher_unmap_src(walk);
}
scatterwalk_advance(&walk->in, n);
diff --git a/crypto/cryptd.c b/crypto/cryptd.c
index ef71318976c..e46d21ae26b 100644
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -3,6 +3,13 @@
*
* Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
*
+ * Added AEAD support to cryptd.
+ * Authors: Tadeusz Struk (tadeusz.struk@intel.com)
+ * Adrian Hoban <adrian.hoban@intel.com>
+ * Gabriele Paoloni <gabriele.paoloni@intel.com>
+ * Aidan O'Mahony (aidan.o.mahony@intel.com)
+ * Copyright (c) 2010, Intel Corporation.
+ *
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
@@ -12,6 +19,7 @@
#include <crypto/algapi.h>
#include <crypto/internal/hash.h>
+#include <crypto/internal/aead.h>
#include <crypto/cryptd.h>
#include <crypto/crypto_wq.h>
#include <linux/err.h>
@@ -44,6 +52,11 @@ struct hashd_instance_ctx {
struct cryptd_queue *queue;
};
+struct aead_instance_ctx {
+ struct crypto_aead_spawn aead_spawn;
+ struct cryptd_queue *queue;
+};
+
struct cryptd_blkcipher_ctx {
struct crypto_blkcipher *child;
};
@@ -61,6 +74,14 @@ struct cryptd_hash_request_ctx {
struct shash_desc desc;
};
+struct cryptd_aead_ctx {
+ struct crypto_aead *child;
+};
+
+struct cryptd_aead_request_ctx {
+ crypto_completion_t complete;
+};
+
static void cryptd_queue_worker(struct work_struct *work);
static int cryptd_init_queue(struct cryptd_queue *queue,
@@ -601,6 +622,144 @@ out_put_alg:
return err;
}
+static void cryptd_aead_crypt(struct aead_request *req,
+ struct crypto_aead *child,
+ int err,
+ int (*crypt)(struct aead_request *req))
+{
+ struct cryptd_aead_request_ctx *rctx;
+ rctx = aead_request_ctx(req);
+
+ if (unlikely(err == -EINPROGRESS))
+ goto out;
+ aead_request_set_tfm(req, child);
+ err = crypt( req );
+ req->base.complete = rctx->complete;
+out:
+ local_bh_disable();
+ rctx->complete(&req->base, err);
+ local_bh_enable();
+}
+
+static void cryptd_aead_encrypt(struct crypto_async_request *areq, int err)
+{
+ struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
+ struct crypto_aead *child = ctx->child;
+ struct aead_request *req;
+
+ req = container_of(areq, struct aead_request, base);
+ cryptd_aead_crypt(req, child, err, crypto_aead_crt(child)->encrypt);
+}
+
+static void cryptd_aead_decrypt(struct crypto_async_request *areq, int err)
+{
+ struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
+ struct crypto_aead *child = ctx->child;
+ struct aead_request *req;
+
+ req = container_of(areq, struct aead_request, base);
+ cryptd_aead_crypt(req, child, err, crypto_aead_crt(child)->decrypt);
+}
+
+static int cryptd_aead_enqueue(struct aead_request *req,
+ crypto_completion_t complete)
+{
+ struct cryptd_aead_request_ctx *rctx = aead_request_ctx(req);
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct cryptd_queue *queue = cryptd_get_queue(crypto_aead_tfm(tfm));
+
+ rctx->complete = req->base.complete;
+ req->base.complete = complete;
+ return cryptd_enqueue_request(queue, &req->base);
+}
+
+static int cryptd_aead_encrypt_enqueue(struct aead_request *req)
+{
+ return cryptd_aead_enqueue(req, cryptd_aead_encrypt );
+}
+
+static int cryptd_aead_decrypt_enqueue(struct aead_request *req)
+{
+ return cryptd_aead_enqueue(req, cryptd_aead_decrypt );
+}
+
+static int cryptd_aead_init_tfm(struct crypto_tfm *tfm)
+{
+ struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
+ struct aead_instance_ctx *ictx = crypto_instance_ctx(inst);
+ struct crypto_aead_spawn *spawn = &ictx->aead_spawn;
+ struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_aead *cipher;
+
+ cipher = crypto_spawn_aead(spawn);
+ if (IS_ERR(cipher))
+ return PTR_ERR(cipher);
+
+ crypto_aead_set_flags(cipher, CRYPTO_TFM_REQ_MAY_SLEEP);
+ ctx->child = cipher;
+ tfm->crt_aead.reqsize = sizeof(struct cryptd_aead_request_ctx);
+ return 0;
+}
+
+static void cryptd_aead_exit_tfm(struct crypto_tfm *tfm)
+{
+ struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(tfm);
+ crypto_free_aead(ctx->child);
+}
+
+static int cryptd_create_aead(struct crypto_template *tmpl,
+ struct rtattr **tb,
+ struct cryptd_queue *queue)
+{
+ struct aead_instance_ctx *ctx;
+ struct crypto_instance *inst;
+ struct crypto_alg *alg;
+ int err;
+
+ alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_AEAD,
+ CRYPTO_ALG_TYPE_MASK);
+ if (IS_ERR(alg))
+ return PTR_ERR(alg);
+
+ inst = cryptd_alloc_instance(alg, 0, sizeof(*ctx));
+ err = PTR_ERR(inst);
+ if (IS_ERR(inst))
+ goto out_put_alg;
+
+ ctx = crypto_instance_ctx(inst);
+ ctx->queue = queue;
+
+ err = crypto_init_spawn(&ctx->aead_spawn.base, alg, inst,
+ CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
+ if (err)
+ goto out_free_inst;
+
+ inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC;
+ inst->alg.cra_type = alg->cra_type;
+ inst->alg.cra_ctxsize = sizeof(struct cryptd_aead_ctx);
+ inst->alg.cra_init = cryptd_aead_init_tfm;
+ inst->alg.cra_exit = cryptd_aead_exit_tfm;
+ inst->alg.cra_aead.setkey = alg->cra_aead.setkey;
+ inst->alg.cra_aead.setauthsize = alg->cra_aead.setauthsize;
+ inst->alg.cra_aead.geniv = alg->cra_aead.geniv;
+ inst->alg.cra_aead.ivsize = alg->cra_aead.ivsize;
+ inst->alg.cra_aead.maxauthsize = alg->cra_aead.maxauthsize;
+ inst->alg.cra_aead.encrypt = cryptd_aead_encrypt_enqueue;
+ inst->alg.cra_aead.decrypt = cryptd_aead_decrypt_enqueue;
+ inst->alg.cra_aead.givencrypt = alg->cra_aead.givencrypt;
+ inst->alg.cra_aead.givdecrypt = alg->cra_aead.givdecrypt;
+
+ err = crypto_register_instance(tmpl, inst);
+ if (err) {
+ crypto_drop_spawn(&ctx->aead_spawn.base);
+out_free_inst:
+ kfree(inst);
+ }
+out_put_alg:
+ crypto_mod_put(alg);
+ return err;
+}
+
static struct cryptd_queue queue;
static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
@@ -616,6 +775,8 @@ static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
return cryptd_create_blkcipher(tmpl, tb, &queue);
case CRYPTO_ALG_TYPE_DIGEST:
return cryptd_create_hash(tmpl, tb, &queue);
+ case CRYPTO_ALG_TYPE_AEAD:
+ return cryptd_create_aead(tmpl, tb, &queue);
}
return -EINVAL;
@@ -625,16 +786,21 @@ static void cryptd_free(struct crypto_instance *inst)
{
struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst);
struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst);
+ struct aead_instance_ctx *aead_ctx = crypto_instance_ctx(inst);
switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) {
case CRYPTO_ALG_TYPE_AHASH:
crypto_drop_shash(&hctx->spawn);
kfree(ahash_instance(inst));
return;
+ case CRYPTO_ALG_TYPE_AEAD:
+ crypto_drop_spawn(&aead_ctx->aead_spawn.base);
+ kfree(inst);
+ return;
+ default:
+ crypto_drop_spawn(&ctx->spawn);
+ kfree(inst);
}
-
- crypto_drop_spawn(&ctx->spawn);
- kfree(inst);
}
static struct crypto_template cryptd_tmpl = {
@@ -724,6 +890,40 @@ void cryptd_free_ahash(struct cryptd_ahash *tfm)
}
EXPORT_SYMBOL_GPL(cryptd_free_ahash);
+struct cryptd_aead *cryptd_alloc_aead(const char *alg_name,
+ u32 type, u32 mask)
+{
+ char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
+ struct crypto_aead *tfm;
+
+ if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
+ "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
+ return ERR_PTR(-EINVAL);
+ tfm = crypto_alloc_aead(cryptd_alg_name, type, mask);
+ if (IS_ERR(tfm))
+ return ERR_CAST(tfm);
+ if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
+ crypto_free_aead(tfm);
+ return ERR_PTR(-EINVAL);
+ }
+ return __cryptd_aead_cast(tfm);
+}
+EXPORT_SYMBOL_GPL(cryptd_alloc_aead);
+
+struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm)
+{
+ struct cryptd_aead_ctx *ctx;
+ ctx = crypto_aead_ctx(&tfm->base);
+ return ctx->child;
+}
+EXPORT_SYMBOL_GPL(cryptd_aead_child);
+
+void cryptd_free_aead(struct cryptd_aead *tfm)
+{
+ crypto_free_aead(&tfm->base);
+}
+EXPORT_SYMBOL_GPL(cryptd_free_aead);
+
static int __init cryptd_init(void)
{
int err;
diff --git a/crypto/ctr.c b/crypto/ctr.c
index 6c3bfabb9d1..4ca7222cfeb 100644
--- a/crypto/ctr.c
+++ b/crypto/ctr.c
@@ -185,7 +185,7 @@ static struct crypto_instance *crypto_ctr_alloc(struct rtattr **tb)
alg = crypto_attr_alg(tb[1], CRYPTO_ALG_TYPE_CIPHER,
CRYPTO_ALG_TYPE_MASK);
if (IS_ERR(alg))
- return ERR_PTR(PTR_ERR(alg));
+ return ERR_CAST(alg);
/* Block size must be >= 4 bytes. */
err = -EINVAL;
diff --git a/crypto/des_generic.c b/crypto/des_generic.c
index 249f903cc45..873818d48e8 100644
--- a/crypto/des_generic.c
+++ b/crypto/des_generic.c
@@ -614,7 +614,7 @@ static const u32 S8[64] = {
#define T3(x) pt[2 * (x) + 2]
#define T4(x) pt[2 * (x) + 3]
-#define PC2(a, b, c, d) (T4(d) | T3(c) | T2(b) | T1(a))
+#define DES_PC2(a, b, c, d) (T4(d) | T3(c) | T2(b) | T1(a))
/*
* Encryption key expansion
@@ -639,22 +639,22 @@ unsigned long des_ekey(u32 *pe, const u8 *k)
b = k[6]; b &= 0x0e; b <<= 4; b |= k[2] & 0x1e; b = pc1[b];
a = k[7]; a &= 0x0e; a <<= 4; a |= k[3] & 0x1e; a = pc1[a];
- pe[15 * 2 + 0] = PC2(a, b, c, d); d = rs[d];
- pe[14 * 2 + 0] = PC2(d, a, b, c); c = rs[c]; b = rs[b];
- pe[13 * 2 + 0] = PC2(b, c, d, a); a = rs[a]; d = rs[d];
- pe[12 * 2 + 0] = PC2(d, a, b, c); c = rs[c]; b = rs[b];
- pe[11 * 2 + 0] = PC2(b, c, d, a); a = rs[a]; d = rs[d];
- pe[10 * 2 + 0] = PC2(d, a, b, c); c = rs[c]; b = rs[b];
- pe[ 9 * 2 + 0] = PC2(b, c, d, a); a = rs[a]; d = rs[d];
- pe[ 8 * 2 + 0] = PC2(d, a, b, c); c = rs[c];
- pe[ 7 * 2 + 0] = PC2(c, d, a, b); b = rs[b]; a = rs[a];
- pe[ 6 * 2 + 0] = PC2(a, b, c, d); d = rs[d]; c = rs[c];
- pe[ 5 * 2 + 0] = PC2(c, d, a, b); b = rs[b]; a = rs[a];
- pe[ 4 * 2 + 0] = PC2(a, b, c, d); d = rs[d]; c = rs[c];
- pe[ 3 * 2 + 0] = PC2(c, d, a, b); b = rs[b]; a = rs[a];
- pe[ 2 * 2 + 0] = PC2(a, b, c, d); d = rs[d]; c = rs[c];
- pe[ 1 * 2 + 0] = PC2(c, d, a, b); b = rs[b];
- pe[ 0 * 2 + 0] = PC2(b, c, d, a);
+ pe[15 * 2 + 0] = DES_PC2(a, b, c, d); d = rs[d];
+ pe[14 * 2 + 0] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b];
+ pe[13 * 2 + 0] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d];
+ pe[12 * 2 + 0] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b];
+ pe[11 * 2 + 0] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d];
+ pe[10 * 2 + 0] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b];
+ pe[ 9 * 2 + 0] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d];
+ pe[ 8 * 2 + 0] = DES_PC2(d, a, b, c); c = rs[c];
+ pe[ 7 * 2 + 0] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a];
+ pe[ 6 * 2 + 0] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c];
+ pe[ 5 * 2 + 0] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a];
+ pe[ 4 * 2 + 0] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c];
+ pe[ 3 * 2 + 0] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a];
+ pe[ 2 * 2 + 0] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c];
+ pe[ 1 * 2 + 0] = DES_PC2(c, d, a, b); b = rs[b];
+ pe[ 0 * 2 + 0] = DES_PC2(b, c, d, a);
/* Check if first half is weak */
w = (a ^ c) | (b ^ d) | (rs[a] ^ c) | (b ^ rs[d]);
@@ -670,22 +670,22 @@ unsigned long des_ekey(u32 *pe, const u8 *k)
/* Check if second half is weak */
w |= (a ^ c) | (b ^ d) | (rs[a] ^ c) | (b ^ rs[d]);
- pe[15 * 2 + 1] = PC2(a, b, c, d); d = rs[d];
- pe[14 * 2 + 1] = PC2(d, a, b, c); c = rs[c]; b = rs[b];
- pe[13 * 2 + 1] = PC2(b, c, d, a); a = rs[a]; d = rs[d];
- pe[12 * 2 + 1] = PC2(d, a, b, c); c = rs[c]; b = rs[b];
- pe[11 * 2 + 1] = PC2(b, c, d, a); a = rs[a]; d = rs[d];
- pe[10 * 2 + 1] = PC2(d, a, b, c); c = rs[c]; b = rs[b];
- pe[ 9 * 2 + 1] = PC2(b, c, d, a); a = rs[a]; d = rs[d];
- pe[ 8 * 2 + 1] = PC2(d, a, b, c); c = rs[c];
- pe[ 7 * 2 + 1] = PC2(c, d, a, b); b = rs[b]; a = rs[a];
- pe[ 6 * 2 + 1] = PC2(a, b, c, d); d = rs[d]; c = rs[c];
- pe[ 5 * 2 + 1] = PC2(c, d, a, b); b = rs[b]; a = rs[a];
- pe[ 4 * 2 + 1] = PC2(a, b, c, d); d = rs[d]; c = rs[c];
- pe[ 3 * 2 + 1] = PC2(c, d, a, b); b = rs[b]; a = rs[a];
- pe[ 2 * 2 + 1] = PC2(a, b, c, d); d = rs[d]; c = rs[c];
- pe[ 1 * 2 + 1] = PC2(c, d, a, b); b = rs[b];
- pe[ 0 * 2 + 1] = PC2(b, c, d, a);
+ pe[15 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d];
+ pe[14 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b];
+ pe[13 * 2 + 1] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d];
+ pe[12 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b];
+ pe[11 * 2 + 1] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d];
+ pe[10 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b];
+ pe[ 9 * 2 + 1] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d];
+ pe[ 8 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c];
+ pe[ 7 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a];
+ pe[ 6 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c];
+ pe[ 5 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a];
+ pe[ 4 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c];
+ pe[ 3 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a];
+ pe[ 2 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c];
+ pe[ 1 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b];
+ pe[ 0 * 2 + 1] = DES_PC2(b, c, d, a);
/* Fixup: 2413 5768 -> 1357 2468 */
for (d = 0; d < 16; ++d) {
@@ -722,22 +722,22 @@ static void dkey(u32 *pe, const u8 *k)
b = k[6]; b &= 0x0e; b <<= 4; b |= k[2] & 0x1e; b = pc1[b];
a = k[7]; a &= 0x0e; a <<= 4; a |= k[3] & 0x1e; a = pc1[a];
- pe[ 0 * 2] = PC2(a, b, c, d); d = rs[d];
- pe[ 1 * 2] = PC2(d, a, b, c); c = rs[c]; b = rs[b];
- pe[ 2 * 2] = PC2(b, c, d, a); a = rs[a]; d = rs[d];
- pe[ 3 * 2] = PC2(d, a, b, c); c = rs[c]; b = rs[b];
- pe[ 4 * 2] = PC2(b, c, d, a); a = rs[a]; d = rs[d];
- pe[ 5 * 2] = PC2(d, a, b, c); c = rs[c]; b = rs[b];
- pe[ 6 * 2] = PC2(b, c, d, a); a = rs[a]; d = rs[d];
- pe[ 7 * 2] = PC2(d, a, b, c); c = rs[c];
- pe[ 8 * 2] = PC2(c, d, a, b); b = rs[b]; a = rs[a];
- pe[ 9 * 2] = PC2(a, b, c, d); d = rs[d]; c = rs[c];
- pe[10 * 2] = PC2(c, d, a, b); b = rs[b]; a = rs[a];
- pe[11 * 2] = PC2(a, b, c, d); d = rs[d]; c = rs[c];
- pe[12 * 2] = PC2(c, d, a, b); b = rs[b]; a = rs[a];
- pe[13 * 2] = PC2(a, b, c, d); d = rs[d]; c = rs[c];
- pe[14 * 2] = PC2(c, d, a, b); b = rs[b];
- pe[15 * 2] = PC2(b, c, d, a);
+ pe[ 0 * 2] = DES_PC2(a, b, c, d); d = rs[d];
+ pe[ 1 * 2] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b];
+ pe[ 2 * 2] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d];
+ pe[ 3 * 2] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b];
+ pe[ 4 * 2] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d];
+ pe[ 5 * 2] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b];
+ pe[ 6 * 2] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d];
+ pe[ 7 * 2] = DES_PC2(d, a, b, c); c = rs[c];
+ pe[ 8 * 2] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a];
+ pe[ 9 * 2] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c];
+ pe[10 * 2] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a];
+ pe[11 * 2] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c];
+ pe[12 * 2] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a];
+ pe[13 * 2] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c];
+ pe[14 * 2] = DES_PC2(c, d, a, b); b = rs[b];
+ pe[15 * 2] = DES_PC2(b, c, d, a);
/* Skip to next table set */
pt += 512;
@@ -747,22 +747,22 @@ static void dkey(u32 *pe, const u8 *k)
b = k[2]; b &= 0xe0; b >>= 4; b |= k[6] & 0xf0; b = pc1[b + 1];
a = k[3]; a &= 0xe0; a >>= 4; a |= k[7] & 0xf0; a = pc1[a + 1];
- pe[ 0 * 2 + 1] = PC2(a, b, c, d); d = rs[d];
- pe[ 1 * 2 + 1] = PC2(d, a, b, c); c = rs[c]; b = rs[b];
- pe[ 2 * 2 + 1] = PC2(b, c, d, a); a = rs[a]; d = rs[d];
- pe[ 3 * 2 + 1] = PC2(d, a, b, c); c = rs[c]; b = rs[b];
- pe[ 4 * 2 + 1] = PC2(b, c, d, a); a = rs[a]; d = rs[d];
- pe[ 5 * 2 + 1] = PC2(d, a, b, c); c = rs[c]; b = rs[b];
- pe[ 6 * 2 + 1] = PC2(b, c, d, a); a = rs[a]; d = rs[d];
- pe[ 7 * 2 + 1] = PC2(d, a, b, c); c = rs[c];
- pe[ 8 * 2 + 1] = PC2(c, d, a, b); b = rs[b]; a = rs[a];
- pe[ 9 * 2 + 1] = PC2(a, b, c, d); d = rs[d]; c = rs[c];
- pe[10 * 2 + 1] = PC2(c, d, a, b); b = rs[b]; a = rs[a];
- pe[11 * 2 + 1] = PC2(a, b, c, d); d = rs[d]; c = rs[c];
- pe[12 * 2 + 1] = PC2(c, d, a, b); b = rs[b]; a = rs[a];
- pe[13 * 2 + 1] = PC2(a, b, c, d); d = rs[d]; c = rs[c];
- pe[14 * 2 + 1] = PC2(c, d, a, b); b = rs[b];
- pe[15 * 2 + 1] = PC2(b, c, d, a);
+ pe[ 0 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d];
+ pe[ 1 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b];
+ pe[ 2 * 2 + 1] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d];
+ pe[ 3 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b];
+ pe[ 4 * 2 + 1] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d];
+ pe[ 5 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b];
+ pe[ 6 * 2 + 1] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d];
+ pe[ 7 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c];
+ pe[ 8 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a];
+ pe[ 9 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c];
+ pe[10 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a];
+ pe[11 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c];
+ pe[12 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a];
+ pe[13 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c];
+ pe[14 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b];
+ pe[15 * 2 + 1] = DES_PC2(b, c, d, a);
/* Fixup: 2413 5768 -> 1357 2468 */
for (d = 0; d < 16; ++d) {
diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
index 247178cb98e..de3078215fe 100644
--- a/crypto/pcrypt.c
+++ b/crypto/pcrypt.c
@@ -24,12 +24,40 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/notifier.h>
+#include <linux/kobject.h>
+#include <linux/cpu.h>
#include <crypto/pcrypt.h>
-static struct padata_instance *pcrypt_enc_padata;
-static struct padata_instance *pcrypt_dec_padata;
-static struct workqueue_struct *encwq;
-static struct workqueue_struct *decwq;
+struct padata_pcrypt {
+ struct padata_instance *pinst;
+ struct workqueue_struct *wq;
+
+ /*
+ * Cpumask for callback CPUs. It should be
+ * equal to serial cpumask of corresponding padata instance,
+ * so it is updated when padata notifies us about serial
+ * cpumask change.
+ *
+ * cb_cpumask is protected by RCU. This fact prevents us from
+ * using cpumask_var_t directly because the actual type of
+ * cpumsak_var_t depends on kernel configuration(particularly on
+ * CONFIG_CPUMASK_OFFSTACK macro). Depending on the configuration
+ * cpumask_var_t may be either a pointer to the struct cpumask
+ * or a variable allocated on the stack. Thus we can not safely use
+ * cpumask_var_t with RCU operations such as rcu_assign_pointer or
+ * rcu_dereference. So cpumask_var_t is wrapped with struct
+ * pcrypt_cpumask which makes possible to use it with RCU.
+ */
+ struct pcrypt_cpumask {
+ cpumask_var_t mask;
+ } *cb_cpumask;
+ struct notifier_block nblock;
+};
+
+static struct padata_pcrypt pencrypt;
+static struct padata_pcrypt pdecrypt;
+static struct kset *pcrypt_kset;
struct pcrypt_instance_ctx {
struct crypto_spawn spawn;
@@ -42,25 +70,32 @@ struct pcrypt_aead_ctx {
};
static int pcrypt_do_parallel(struct padata_priv *padata, unsigned int *cb_cpu,
- struct padata_instance *pinst)
+ struct padata_pcrypt *pcrypt)
{
unsigned int cpu_index, cpu, i;
+ struct pcrypt_cpumask *cpumask;
cpu = *cb_cpu;
- if (cpumask_test_cpu(cpu, cpu_active_mask))
+ rcu_read_lock_bh();
+ cpumask = rcu_dereference(pcrypt->cb_cpumask);
+ if (cpumask_test_cpu(cpu, cpumask->mask))
+ goto out;
+
+ if (!cpumask_weight(cpumask->mask))
goto out;
- cpu_index = cpu % cpumask_weight(cpu_active_mask);
+ cpu_index = cpu % cpumask_weight(cpumask->mask);
- cpu = cpumask_first(cpu_active_mask);
+ cpu = cpumask_first(cpumask->mask);
for (i = 0; i < cpu_index; i++)
- cpu = cpumask_next(cpu, cpu_active_mask);
+ cpu = cpumask_next(cpu, cpumask->mask);
*cb_cpu = cpu;
out:
- return padata_do_parallel(pinst, padata, cpu);
+ rcu_read_unlock_bh();
+ return padata_do_parallel(pcrypt->pinst, padata, cpu);
}
static int pcrypt_aead_setkey(struct crypto_aead *parent,
@@ -142,11 +177,9 @@ static int pcrypt_aead_encrypt(struct aead_request *req)
req->cryptlen, req->iv);
aead_request_set_assoc(creq, req->assoc, req->assoclen);
- err = pcrypt_do_parallel(padata, &ctx->cb_cpu, pcrypt_enc_padata);
- if (err)
- return err;
- else
- err = crypto_aead_encrypt(creq);
+ err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pencrypt);
+ if (!err)
+ return -EINPROGRESS;
return err;
}
@@ -186,11 +219,9 @@ static int pcrypt_aead_decrypt(struct aead_request *req)
req->cryptlen, req->iv);
aead_request_set_assoc(creq, req->assoc, req->assoclen);
- err = pcrypt_do_parallel(padata, &ctx->cb_cpu, pcrypt_dec_padata);
- if (err)
- return err;
- else
- err = crypto_aead_decrypt(creq);
+ err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pdecrypt);
+ if (!err)
+ return -EINPROGRESS;
return err;
}
@@ -232,11 +263,9 @@ static int pcrypt_aead_givencrypt(struct aead_givcrypt_request *req)
aead_givcrypt_set_assoc(creq, areq->assoc, areq->assoclen);
aead_givcrypt_set_giv(creq, req->giv, req->seq);
- err = pcrypt_do_parallel(padata, &ctx->cb_cpu, pcrypt_enc_padata);
- if (err)
- return err;
- else
- err = crypto_aead_givencrypt(creq);
+ err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pencrypt);
+ if (!err)
+ return -EINPROGRESS;
return err;
}
@@ -376,6 +405,115 @@ static void pcrypt_free(struct crypto_instance *inst)
kfree(inst);
}
+static int pcrypt_cpumask_change_notify(struct notifier_block *self,
+ unsigned long val, void *data)
+{
+ struct padata_pcrypt *pcrypt;
+ struct pcrypt_cpumask *new_mask, *old_mask;
+ struct padata_cpumask *cpumask = (struct padata_cpumask *)data;
+
+ if (!(val & PADATA_CPU_SERIAL))
+ return 0;
+
+ pcrypt = container_of(self, struct padata_pcrypt, nblock);
+ new_mask = kmalloc(sizeof(*new_mask), GFP_KERNEL);
+ if (!new_mask)
+ return -ENOMEM;
+ if (!alloc_cpumask_var(&new_mask->mask, GFP_KERNEL)) {
+ kfree(new_mask);
+ return -ENOMEM;
+ }
+
+ old_mask = pcrypt->cb_cpumask;
+
+ cpumask_copy(new_mask->mask, cpumask->cbcpu);
+ rcu_assign_pointer(pcrypt->cb_cpumask, new_mask);
+ synchronize_rcu_bh();
+
+ free_cpumask_var(old_mask->mask);
+ kfree(old_mask);
+ return 0;
+}
+
+static int pcrypt_sysfs_add(struct padata_instance *pinst, const char *name)
+{
+ int ret;
+
+ pinst->kobj.kset = pcrypt_kset;
+ ret = kobject_add(&pinst->kobj, NULL, name);
+ if (!ret)
+ kobject_uevent(&pinst->kobj, KOBJ_ADD);
+
+ return ret;
+}
+
+static int pcrypt_init_padata(struct padata_pcrypt *pcrypt,
+ const char *name)
+{
+ int ret = -ENOMEM;
+ struct pcrypt_cpumask *mask;
+
+ get_online_cpus();
+
+ pcrypt->wq = create_workqueue(name);
+ if (!pcrypt->wq)
+ goto err;
+
+ pcrypt->pinst = padata_alloc_possible(pcrypt->wq);
+ if (!pcrypt->pinst)
+ goto err_destroy_workqueue;
+
+ mask = kmalloc(sizeof(*mask), GFP_KERNEL);
+ if (!mask)
+ goto err_free_padata;
+ if (!alloc_cpumask_var(&mask->mask, GFP_KERNEL)) {
+ kfree(mask);
+ goto err_free_padata;
+ }
+
+ cpumask_and(mask->mask, cpu_possible_mask, cpu_active_mask);
+ rcu_assign_pointer(pcrypt->cb_cpumask, mask);
+
+ pcrypt->nblock.notifier_call = pcrypt_cpumask_change_notify;
+ ret = padata_register_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock);
+ if (ret)
+ goto err_free_cpumask;
+
+ ret = pcrypt_sysfs_add(pcrypt->pinst, name);
+ if (ret)
+ goto err_unregister_notifier;
+
+ put_online_cpus();
+
+ return ret;
+
+err_unregister_notifier:
+ padata_unregister_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock);
+err_free_cpumask:
+ free_cpumask_var(mask->mask);
+ kfree(mask);
+err_free_padata:
+ padata_free(pcrypt->pinst);
+err_destroy_workqueue:
+ destroy_workqueue(pcrypt->wq);
+err:
+ put_online_cpus();
+
+ return ret;
+}
+
+static void pcrypt_fini_padata(struct padata_pcrypt *pcrypt)
+{
+ kobject_put(&pcrypt->pinst->kobj);
+ free_cpumask_var(pcrypt->cb_cpumask->mask);
+ kfree(pcrypt->cb_cpumask);
+
+ padata_stop(pcrypt->pinst);
+ padata_unregister_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock);
+ destroy_workqueue(pcrypt->wq);
+ padata_free(pcrypt->pinst);
+}
+
static struct crypto_template pcrypt_tmpl = {
.name = "pcrypt",
.alloc = pcrypt_alloc,
@@ -385,52 +523,39 @@ static struct crypto_template pcrypt_tmpl = {
static int __init pcrypt_init(void)
{
- encwq = create_workqueue("pencrypt");
- if (!encwq)
- goto err;
-
- decwq = create_workqueue("pdecrypt");
- if (!decwq)
- goto err_destroy_encwq;
+ int err = -ENOMEM;
+ pcrypt_kset = kset_create_and_add("pcrypt", NULL, kernel_kobj);
+ if (!pcrypt_kset)
+ goto err;
- pcrypt_enc_padata = padata_alloc(cpu_possible_mask, encwq);
- if (!pcrypt_enc_padata)
- goto err_destroy_decwq;
+ err = pcrypt_init_padata(&pencrypt, "pencrypt");
+ if (err)
+ goto err_unreg_kset;
- pcrypt_dec_padata = padata_alloc(cpu_possible_mask, decwq);
- if (!pcrypt_dec_padata)
- goto err_free_padata;
+ err = pcrypt_init_padata(&pdecrypt, "pdecrypt");
+ if (err)
+ goto err_deinit_pencrypt;
- padata_start(pcrypt_enc_padata);
- padata_start(pcrypt_dec_padata);
+ padata_start(pencrypt.pinst);
+ padata_start(pdecrypt.pinst);
return crypto_register_template(&pcrypt_tmpl);
-err_free_padata:
- padata_free(pcrypt_enc_padata);
-
-err_destroy_decwq:
- destroy_workqueue(decwq);
-
-err_destroy_encwq:
- destroy_workqueue(encwq);
-
+err_deinit_pencrypt:
+ pcrypt_fini_padata(&pencrypt);
+err_unreg_kset:
+ kset_unregister(pcrypt_kset);
err:
- return -ENOMEM;
+ return err;
}
static void __exit pcrypt_exit(void)
{
- padata_stop(pcrypt_enc_padata);
- padata_stop(pcrypt_dec_padata);
-
- destroy_workqueue(encwq);
- destroy_workqueue(decwq);
-
- padata_free(pcrypt_enc_padata);
- padata_free(pcrypt_dec_padata);
+ pcrypt_fini_padata(&pencrypt);
+ pcrypt_fini_padata(&pdecrypt);
+ kset_unregister(pcrypt_kset);
crypto_unregister_template(&pcrypt_tmpl);
}
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index 5c8aaa0cb0b..fa8c8f78c8d 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -22,6 +22,17 @@
#include <crypto/rng.h>
#include "internal.h"
+
+#ifdef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS
+
+/* a perfect nop */
+int alg_test(const char *driver, const char *alg, u32 type, u32 mask)
+{
+ return 0;
+}
+
+#else
+
#include "testmgr.h"
/*
@@ -2530,4 +2541,7 @@ notest:
non_fips_alg:
return -EINVAL;
}
+
+#endif /* CONFIG_CRYPTO_MANAGER_DISABLE_TESTS */
+
EXPORT_SYMBOL_GPL(alg_test);
diff --git a/crypto/twofish.c b/crypto/twofish_generic.c
index dfcda231f87..1f07b843e07 100644
--- a/crypto/twofish.c
+++ b/crypto/twofish_generic.c
@@ -212,3 +212,4 @@ module_exit(twofish_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION ("Twofish Cipher Algorithm");
+MODULE_ALIAS("twofish");
diff --git a/crypto/xts.c b/crypto/xts.c
index d87b0f3102c..555ecaab1e5 100644
--- a/crypto/xts.c
+++ b/crypto/xts.c
@@ -224,7 +224,7 @@ static struct crypto_instance *alloc(struct rtattr **tb)
alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER,
CRYPTO_ALG_TYPE_MASK);
if (IS_ERR(alg))
- return ERR_PTR(PTR_ERR(alg));
+ return ERR_CAST(alg);
inst = crypto_alloc_instance("xts", alg);
if (IS_ERR(inst))