aboutsummaryrefslogtreecommitdiff
path: root/crypto
diff options
context:
space:
mode:
Diffstat (limited to 'crypto')
-rw-r--r--crypto/Kconfig44
-rw-r--r--crypto/Makefile5
-rw-r--r--crypto/ablkcipher.c19
-rw-r--r--crypto/aead.c16
-rw-r--r--crypto/ahash.c2
-rw-r--r--crypto/algapi.c6
-rw-r--r--crypto/algboss.c20
-rw-r--r--crypto/ansi_cprng.c17
-rw-r--r--crypto/api.c52
-rw-r--r--crypto/authenc.c24
-rw-r--r--crypto/blkcipher.c4
-rw-r--r--crypto/ccm.c2
-rw-r--r--crypto/chainiv.c3
-rw-r--r--crypto/cryptd.c237
-rw-r--r--crypto/crypto_wq.c38
-rw-r--r--crypto/gf128mul.c2
-rw-r--r--crypto/internal.h6
-rw-r--r--crypto/lrw.c8
-rw-r--r--crypto/pcompress.c97
-rw-r--r--crypto/scatterwalk.c3
-rw-r--r--crypto/sha256_generic.c2
-rw-r--r--crypto/shash.c27
-rw-r--r--crypto/tcrypt.c6
-rw-r--r--crypto/testmgr.c198
-rw-r--r--crypto/testmgr.h147
-rw-r--r--crypto/zlib.c378
26 files changed, 1192 insertions, 171 deletions
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 8dde4fcf99c..74d0e622a51 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -56,6 +56,7 @@ config CRYPTO_BLKCIPHER2
tristate
select CRYPTO_ALGAPI2
select CRYPTO_RNG2
+ select CRYPTO_WORKQUEUE
config CRYPTO_HASH
tristate
@@ -75,6 +76,10 @@ config CRYPTO_RNG2
tristate
select CRYPTO_ALGAPI2
+config CRYPTO_PCOMP
+ tristate
+ select CRYPTO_ALGAPI2
+
config CRYPTO_MANAGER
tristate "Cryptographic algorithm manager"
select CRYPTO_MANAGER2
@@ -87,6 +92,7 @@ config CRYPTO_MANAGER2
select CRYPTO_AEAD2
select CRYPTO_HASH2
select CRYPTO_BLKCIPHER2
+ select CRYPTO_PCOMP
config CRYPTO_GF128MUL
tristate "GF(2^128) multiplication functions (EXPERIMENTAL)"
@@ -106,11 +112,15 @@ config CRYPTO_NULL
help
These are 'Null' algorithms, used by IPsec, which do nothing.
+config CRYPTO_WORKQUEUE
+ tristate
+
config CRYPTO_CRYPTD
tristate "Software async crypto daemon"
select CRYPTO_BLKCIPHER
select CRYPTO_HASH
select CRYPTO_MANAGER
+ select CRYPTO_WORKQUEUE
help
This is a generic software asynchronous crypto daemon that
converts an arbitrary synchronous software crypto algorithm
@@ -470,6 +480,31 @@ config CRYPTO_AES_X86_64
See <http://csrc.nist.gov/encryption/aes/> for more information.
+config CRYPTO_AES_NI_INTEL
+ tristate "AES cipher algorithms (AES-NI)"
+ depends on (X86 || UML_X86) && 64BIT
+ select CRYPTO_AES_X86_64
+ select CRYPTO_CRYPTD
+ select CRYPTO_ALGAPI
+ help
+ Use Intel AES-NI instructions for AES algorithm.
+
+ AES cipher algorithms (FIPS-197). AES uses the Rijndael
+ algorithm.
+
+ Rijndael appears to be consistently a very good performer in
+ both hardware and software across a wide range of computing
+ environments regardless of its use in feedback or non-feedback
+ modes. Its key setup time is excellent, and its key agility is
+ good. Rijndael's very low memory requirements make it very well
+ suited for restricted-space environments, in which it also
+ demonstrates excellent performance. Rijndael's operations are
+ among the easiest to defend against power and timing attacks.
+
+ The AES specifies three key sizes: 128, 192 and 256 bits
+
+ See <http://csrc.nist.gov/encryption/aes/> for more information.
+
config CRYPTO_ANUBIS
tristate "Anubis cipher algorithm"
select CRYPTO_ALGAPI
@@ -714,6 +749,15 @@ config CRYPTO_DEFLATE
You will most probably want this if using IPSec.
+config CRYPTO_ZLIB
+ tristate "Zlib compression algorithm"
+ select CRYPTO_PCOMP
+ select ZLIB_INFLATE
+ select ZLIB_DEFLATE
+ select NLATTR
+ help
+ This is the zlib algorithm.
+
config CRYPTO_LZO
tristate "LZO compression algorithm"
select CRYPTO_ALGAPI
diff --git a/crypto/Makefile b/crypto/Makefile
index 46b08bf2035..673d9f7c1bd 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -5,6 +5,8 @@
obj-$(CONFIG_CRYPTO) += crypto.o
crypto-objs := api.o cipher.o digest.o compress.o
+obj-$(CONFIG_CRYPTO_WORKQUEUE) += crypto_wq.o
+
obj-$(CONFIG_CRYPTO_FIPS) += fips.o
crypto_algapi-$(CONFIG_PROC_FS) += proc.o
@@ -25,6 +27,8 @@ crypto_hash-objs += ahash.o
crypto_hash-objs += shash.o
obj-$(CONFIG_CRYPTO_HASH2) += crypto_hash.o
+obj-$(CONFIG_CRYPTO_PCOMP) += pcompress.o
+
cryptomgr-objs := algboss.o testmgr.o
obj-$(CONFIG_CRYPTO_MANAGER2) += cryptomgr.o
@@ -70,6 +74,7 @@ obj-$(CONFIG_CRYPTO_ANUBIS) += anubis.o
obj-$(CONFIG_CRYPTO_SEED) += seed.o
obj-$(CONFIG_CRYPTO_SALSA20) += salsa20_generic.o
obj-$(CONFIG_CRYPTO_DEFLATE) += deflate.o
+obj-$(CONFIG_CRYPTO_ZLIB) += zlib.o
obj-$(CONFIG_CRYPTO_MICHAEL_MIC) += michael_mic.o
obj-$(CONFIG_CRYPTO_CRC32C) += crc32c.o
obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o
diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c
index 94140b3756f..e11ce37c710 100644
--- a/crypto/ablkcipher.c
+++ b/crypto/ablkcipher.c
@@ -282,6 +282,25 @@ static struct crypto_alg *crypto_lookup_skcipher(const char *name, u32 type,
alg->cra_ablkcipher.ivsize))
return alg;
+ crypto_mod_put(alg);
+ alg = crypto_alg_mod_lookup(name, type | CRYPTO_ALG_TESTED,
+ mask & ~CRYPTO_ALG_TESTED);
+ if (IS_ERR(alg))
+ return alg;
+
+ if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
+ CRYPTO_ALG_TYPE_GIVCIPHER) {
+ if ((alg->cra_flags ^ type ^ ~mask) & CRYPTO_ALG_TESTED) {
+ crypto_mod_put(alg);
+ alg = ERR_PTR(-ENOENT);
+ }
+ return alg;
+ }
+
+ BUG_ON(!((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
+ CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize :
+ alg->cra_ablkcipher.ivsize));
+
return ERR_PTR(crypto_givcipher_default(alg, type, mask));
}
diff --git a/crypto/aead.c b/crypto/aead.c
index 3a6f3f52c7c..d9aa733db16 100644
--- a/crypto/aead.c
+++ b/crypto/aead.c
@@ -422,6 +422,22 @@ static struct crypto_alg *crypto_lookup_aead(const char *name, u32 type,
if (!alg->cra_aead.ivsize)
return alg;
+ crypto_mod_put(alg);
+ alg = crypto_alg_mod_lookup(name, type | CRYPTO_ALG_TESTED,
+ mask & ~CRYPTO_ALG_TESTED);
+ if (IS_ERR(alg))
+ return alg;
+
+ if (alg->cra_type == &crypto_aead_type) {
+ if ((alg->cra_flags ^ type ^ ~mask) & CRYPTO_ALG_TESTED) {
+ crypto_mod_put(alg);
+ alg = ERR_PTR(-ENOENT);
+ }
+ return alg;
+ }
+
+ BUG_ON(!alg->cra_aead.ivsize);
+
return ERR_PTR(crypto_nivaead_default(alg, type, mask));
}
diff --git a/crypto/ahash.c b/crypto/ahash.c
index ba5292d69eb..b2d1ee32cfe 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -214,7 +214,7 @@ static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
"yes" : "no");
seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
- seq_printf(m, "digestsize : %u\n", alg->cra_hash.digestsize);
+ seq_printf(m, "digestsize : %u\n", alg->cra_ahash.digestsize);
}
const struct crypto_type crypto_ahash_type = {
diff --git a/crypto/algapi.c b/crypto/algapi.c
index 7c41e7405c4..56c62e2858d 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -149,6 +149,9 @@ static struct crypto_larval *__crypto_register_alg(struct crypto_alg *alg)
if (q == alg)
goto err;
+ if (crypto_is_moribund(q))
+ continue;
+
if (crypto_is_larval(q)) {
if (!strcmp(alg->cra_driver_name, q->cra_driver_name))
goto err;
@@ -197,7 +200,7 @@ void crypto_alg_tested(const char *name, int err)
down_write(&crypto_alg_sem);
list_for_each_entry(q, &crypto_alg_list, cra_list) {
- if (!crypto_is_larval(q))
+ if (crypto_is_moribund(q) || !crypto_is_larval(q))
continue;
test = (struct crypto_larval *)q;
@@ -210,6 +213,7 @@ void crypto_alg_tested(const char *name, int err)
goto unlock;
found:
+ q->cra_flags |= CRYPTO_ALG_DEAD;
alg = test->adult;
if (err || list_empty(&alg->cra_list))
goto complete;
diff --git a/crypto/algboss.c b/crypto/algboss.c
index 4601e4267c8..6906f92aeac 100644
--- a/crypto/algboss.c
+++ b/crypto/algboss.c
@@ -10,7 +10,7 @@
*
*/
-#include <linux/crypto.h>
+#include <crypto/internal/aead.h>
#include <linux/ctype.h>
#include <linux/err.h>
#include <linux/init.h>
@@ -206,8 +206,7 @@ static int cryptomgr_test(void *data)
u32 type = param->type;
int err = 0;
- if (!((type ^ CRYPTO_ALG_TYPE_BLKCIPHER) &
- CRYPTO_ALG_TYPE_BLKCIPHER_MASK) && !(type & CRYPTO_ALG_GENIV))
+ if (type & CRYPTO_ALG_TESTED)
goto skiptest;
err = alg_test(param->driver, param->alg, type, CRYPTO_ALG_TESTED);
@@ -223,6 +222,7 @@ static int cryptomgr_schedule_test(struct crypto_alg *alg)
{
struct task_struct *thread;
struct crypto_test_param *param;
+ u32 type;
if (!try_module_get(THIS_MODULE))
goto err;
@@ -233,7 +233,19 @@ static int cryptomgr_schedule_test(struct crypto_alg *alg)
memcpy(param->driver, alg->cra_driver_name, sizeof(param->driver));
memcpy(param->alg, alg->cra_name, sizeof(param->alg));
- param->type = alg->cra_flags;
+ type = alg->cra_flags;
+
+ /* This piece of crap needs to disappear into per-type test hooks. */
+ if ((!((type ^ CRYPTO_ALG_TYPE_BLKCIPHER) &
+ CRYPTO_ALG_TYPE_BLKCIPHER_MASK) && !(type & CRYPTO_ALG_GENIV) &&
+ ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
+ CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize :
+ alg->cra_ablkcipher.ivsize)) ||
+ (!((type ^ CRYPTO_ALG_TYPE_AEAD) & CRYPTO_ALG_TYPE_MASK) &&
+ alg->cra_type == &crypto_nivaead_type && alg->cra_aead.ivsize))
+ type |= CRYPTO_ALG_TESTED;
+
+ param->type = type;
thread = kthread_run(cryptomgr_test, param, "cryptomgr_test");
if (IS_ERR(thread))
diff --git a/crypto/ansi_cprng.c b/crypto/ansi_cprng.c
index 0fac8ffc2fb..d80ed4c1e00 100644
--- a/crypto/ansi_cprng.c
+++ b/crypto/ansi_cprng.c
@@ -132,9 +132,15 @@ static int _get_more_prng_bytes(struct prng_context *ctx)
*/
if (!memcmp(ctx->rand_data, ctx->last_rand_data,
DEFAULT_BLK_SZ)) {
+ if (fips_enabled) {
+ panic("cprng %p Failed repetition check!\n",
+ ctx);
+ }
+
printk(KERN_ERR
"ctx %p Failed repetition check!\n",
ctx);
+
ctx->flags |= PRNG_NEED_RESET;
return -EINVAL;
}
@@ -338,7 +344,16 @@ static int cprng_init(struct crypto_tfm *tfm)
spin_lock_init(&ctx->prng_lock);
- return reset_prng_context(ctx, NULL, DEFAULT_PRNG_KSZ, NULL, NULL);
+ if (reset_prng_context(ctx, NULL, DEFAULT_PRNG_KSZ, NULL, NULL) < 0)
+ return -EINVAL;
+
+ /*
+ * after allocation, we should always force the user to reset
+ * so they don't inadvertently use the insecure default values
+ * without specifying them intentially
+ */
+ ctx->flags |= PRNG_NEED_RESET;
+ return 0;
}
static void cprng_exit(struct crypto_tfm *tfm)
diff --git a/crypto/api.c b/crypto/api.c
index 9975a7bd246..314dab96840 100644
--- a/crypto/api.c
+++ b/crypto/api.c
@@ -215,8 +215,19 @@ struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, u32 mask)
mask &= ~(CRYPTO_ALG_LARVAL | CRYPTO_ALG_DEAD);
type &= mask;
- alg = try_then_request_module(crypto_alg_lookup(name, type, mask),
- name);
+ alg = crypto_alg_lookup(name, type, mask);
+ if (!alg) {
+ char tmp[CRYPTO_MAX_ALG_NAME];
+
+ request_module(name);
+
+ if (!((type ^ CRYPTO_ALG_NEED_FALLBACK) & mask) &&
+ snprintf(tmp, sizeof(tmp), "%s-all", name) < sizeof(tmp))
+ request_module(tmp);
+
+ alg = crypto_alg_lookup(name, type, mask);
+ }
+
if (alg)
return crypto_is_larval(alg) ? crypto_larval_wait(alg) : alg;
@@ -244,7 +255,7 @@ struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask)
struct crypto_alg *larval;
int ok;
- if (!(mask & CRYPTO_ALG_TESTED)) {
+ if (!((type | mask) & CRYPTO_ALG_TESTED)) {
type |= CRYPTO_ALG_TESTED;
mask |= CRYPTO_ALG_TESTED;
}
@@ -453,8 +464,8 @@ err:
}
EXPORT_SYMBOL_GPL(crypto_alloc_base);
-struct crypto_tfm *crypto_create_tfm(struct crypto_alg *alg,
- const struct crypto_type *frontend)
+void *crypto_create_tfm(struct crypto_alg *alg,
+ const struct crypto_type *frontend)
{
char *mem;
struct crypto_tfm *tfm = NULL;
@@ -488,9 +499,9 @@ out_free_tfm:
crypto_shoot_alg(alg);
kfree(mem);
out_err:
- tfm = ERR_PTR(err);
+ mem = ERR_PTR(err);
out:
- return tfm;
+ return mem;
}
EXPORT_SYMBOL_GPL(crypto_create_tfm);
@@ -514,12 +525,11 @@ EXPORT_SYMBOL_GPL(crypto_create_tfm);
*
* In case of error the return value is an error pointer.
*/
-struct crypto_tfm *crypto_alloc_tfm(const char *alg_name,
- const struct crypto_type *frontend,
- u32 type, u32 mask)
+void *crypto_alloc_tfm(const char *alg_name,
+ const struct crypto_type *frontend, u32 type, u32 mask)
{
struct crypto_alg *(*lookup)(const char *name, u32 type, u32 mask);
- struct crypto_tfm *tfm;
+ void *tfm;
int err;
type &= frontend->maskclear;
@@ -557,34 +567,34 @@ err:
return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(crypto_alloc_tfm);
-
+
/*
- * crypto_free_tfm - Free crypto transform
+ * crypto_destroy_tfm - Free crypto transform
+ * @mem: Start of tfm slab
* @tfm: Transform to free
*
- * crypto_free_tfm() frees up the transform and any associated resources,
+ * This function frees up the transform and any associated resources,
* then drops the refcount on the associated algorithm.
*/
-void crypto_free_tfm(struct crypto_tfm *tfm)
+void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm)
{
struct crypto_alg *alg;
int size;
- if (unlikely(!tfm))
+ if (unlikely(!mem))
return;
alg = tfm->__crt_alg;
- size = sizeof(*tfm) + alg->cra_ctxsize;
+ size = ksize(mem);
if (!tfm->exit && alg->cra_exit)
alg->cra_exit(tfm);
crypto_exit_ops(tfm);
crypto_mod_put(alg);
- memset(tfm, 0, size);
- kfree(tfm);
+ memset(mem, 0, size);
+ kfree(mem);
}
-
-EXPORT_SYMBOL_GPL(crypto_free_tfm);
+EXPORT_SYMBOL_GPL(crypto_destroy_tfm);
int crypto_has_alg(const char *name, u32 type, u32 mask)
{
diff --git a/crypto/authenc.c b/crypto/authenc.c
index 40b6e9ec9e3..5793b64c81a 100644
--- a/crypto/authenc.c
+++ b/crypto/authenc.c
@@ -158,16 +158,19 @@ static int crypto_authenc_genicv(struct aead_request *req, u8 *iv,
dstp = sg_page(dst);
vdst = PageHighMem(dstp) ? NULL : page_address(dstp) + dst->offset;
- sg_init_table(cipher, 2);
- sg_set_buf(cipher, iv, ivsize);
- authenc_chain(cipher, dst, vdst == iv + ivsize);
+ if (ivsize) {
+ sg_init_table(cipher, 2);
+ sg_set_buf(cipher, iv, ivsize);
+ authenc_chain(cipher, dst, vdst == iv + ivsize);
+ dst = cipher;
+ }
cryptlen = req->cryptlen + ivsize;
- hash = crypto_authenc_hash(req, flags, cipher, cryptlen);
+ hash = crypto_authenc_hash(req, flags, dst, cryptlen);
if (IS_ERR(hash))
return PTR_ERR(hash);
- scatterwalk_map_and_copy(hash, cipher, cryptlen,
+ scatterwalk_map_and_copy(hash, dst, cryptlen,
crypto_aead_authsize(authenc), 1);
return 0;
}
@@ -285,11 +288,14 @@ static int crypto_authenc_iverify(struct aead_request *req, u8 *iv,
srcp = sg_page(src);
vsrc = PageHighMem(srcp) ? NULL : page_address(srcp) + src->offset;
- sg_init_table(cipher, 2);
- sg_set_buf(cipher, iv, ivsize);
- authenc_chain(cipher, src, vsrc == iv + ivsize);
+ if (ivsize) {
+ sg_init_table(cipher, 2);
+ sg_set_buf(cipher, iv, ivsize);
+ authenc_chain(cipher, src, vsrc == iv + ivsize);
+ src = cipher;
+ }
- return crypto_authenc_verify(req, cipher, cryptlen + ivsize);
+ return crypto_authenc_verify(req, src, cryptlen + ivsize);
}
static int crypto_authenc_decrypt(struct aead_request *req)
diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c
index 4a7e65c4df4..90d26c91f4e 100644
--- a/crypto/blkcipher.c
+++ b/crypto/blkcipher.c
@@ -124,6 +124,7 @@ int blkcipher_walk_done(struct blkcipher_desc *desc,
scatterwalk_done(&walk->in, 0, nbytes);
scatterwalk_done(&walk->out, 1, nbytes);
+err:
walk->total = nbytes;
walk->nbytes = nbytes;
@@ -132,7 +133,6 @@ int blkcipher_walk_done(struct blkcipher_desc *desc,
return blkcipher_walk_next(desc, walk);
}
-err:
if (walk->iv != desc->info)
memcpy(desc->info, walk->iv, crypto_blkcipher_ivsize(tfm));
if (walk->buffer != walk->page)
@@ -521,7 +521,7 @@ static int crypto_grab_nivcipher(struct crypto_skcipher_spawn *spawn,
int err;
type = crypto_skcipher_type(type);
- mask = crypto_skcipher_mask(mask) | CRYPTO_ALG_GENIV;
+ mask = crypto_skcipher_mask(mask)| CRYPTO_ALG_GENIV;
alg = crypto_alg_mod_lookup(name, type, mask);
if (IS_ERR(alg))
diff --git a/crypto/ccm.c b/crypto/ccm.c
index 7cf7e5a6b78..c36d654cf56 100644
--- a/crypto/ccm.c
+++ b/crypto/ccm.c
@@ -266,6 +266,8 @@ static int crypto_ccm_auth(struct aead_request *req, struct scatterlist *plain,
if (assoclen) {
pctx->ilen = format_adata(idata, assoclen);
get_data_to_compute(cipher, pctx, req->assoc, req->assoclen);
+ } else {
+ pctx->ilen = 0;
}
/* compute plaintext into mac */
diff --git a/crypto/chainiv.c b/crypto/chainiv.c
index 7c37a497b86..ba200b07449 100644
--- a/crypto/chainiv.c
+++ b/crypto/chainiv.c
@@ -15,6 +15,7 @@
#include <crypto/internal/skcipher.h>
#include <crypto/rng.h>
+#include <crypto/crypto_wq.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
@@ -133,7 +134,7 @@ static int async_chainiv_schedule_work(struct async_chainiv_ctx *ctx)
goto out;
}
- queued = schedule_work(&ctx->postponed);
+ queued = queue_work(kcrypto_wq, &ctx->postponed);
BUG_ON(!queued);
out:
diff --git a/crypto/cryptd.c b/crypto/cryptd.c
index d29e06b350f..d14b22658d7 100644
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -12,30 +12,31 @@
#include <crypto/algapi.h>
#include <crypto/internal/hash.h>
+#include <crypto/cryptd.h>
+#include <crypto/crypto_wq.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
-#include <linux/kthread.h>
#include <linux/list.h>
#include <linux/module.h>
-#include <linux/mutex.h>
#include <linux/scatterlist.h>
#include <linux/sched.h>
#include <linux/slab.h>
-#include <linux/spinlock.h>
-#define CRYPTD_MAX_QLEN 100
+#define CRYPTD_MAX_CPU_QLEN 100
-struct cryptd_state {
- spinlock_t lock;
- struct mutex mutex;
+struct cryptd_cpu_queue {
struct crypto_queue queue;
- struct task_struct *task;
+ struct work_struct work;
+};
+
+struct cryptd_queue {
+ struct cryptd_cpu_queue *cpu_queue;
};
struct cryptd_instance_ctx {
struct crypto_spawn spawn;
- struct cryptd_state *state;
+ struct cryptd_queue *queue;
};
struct cryptd_blkcipher_ctx {
@@ -54,11 +55,85 @@ struct cryptd_hash_request_ctx {
crypto_completion_t complete;
};
-static inline struct cryptd_state *cryptd_get_state(struct crypto_tfm *tfm)
+static void cryptd_queue_worker(struct work_struct *work);
+
+static int cryptd_init_queue(struct cryptd_queue *queue,
+ unsigned int max_cpu_qlen)
+{
+ int cpu;
+ struct cryptd_cpu_queue *cpu_queue;
+
+ queue->cpu_queue = alloc_percpu(struct cryptd_cpu_queue);
+ if (!queue->cpu_queue)
+ return -ENOMEM;
+ for_each_possible_cpu(cpu) {
+ cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
+ crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
+ INIT_WORK(&cpu_queue->work, cryptd_queue_worker);
+ }
+ return 0;
+}
+
+static void cryptd_fini_queue(struct cryptd_queue *queue)
+{
+ int cpu;
+ struct cryptd_cpu_queue *cpu_queue;
+
+ for_each_possible_cpu(cpu) {
+ cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
+ BUG_ON(cpu_queue->queue.qlen);
+ }
+ free_percpu(queue->cpu_queue);
+}
+
+static int cryptd_enqueue_request(struct cryptd_queue *queue,
+ struct crypto_async_request *request)
+{
+ int cpu, err;
+ struct cryptd_cpu_queue *cpu_queue;
+
+ cpu = get_cpu();
+ cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
+ err = crypto_enqueue_request(&cpu_queue->queue, request);
+ queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
+ put_cpu();
+
+ return err;
+}
+
+/* Called in workqueue context, do one real cryption work (via
+ * req->complete) and reschedule itself if there are more work to
+ * do. */
+static void cryptd_queue_worker(struct work_struct *work)
+{
+ struct cryptd_cpu_queue *cpu_queue;
+ struct crypto_async_request *req, *backlog;
+
+ cpu_queue = container_of(work, struct cryptd_cpu_queue, work);
+ /* Only handle one request at a time to avoid hogging crypto
+ * workqueue. preempt_disable/enable is used to prevent
+ * being preempted by cryptd_enqueue_request() */
+ preempt_disable();
+ backlog = crypto_get_backlog(&cpu_queue->queue);
+ req = crypto_dequeue_request(&cpu_queue->queue);
+ preempt_enable();
+
+ if (!req)
+ return;
+
+ if (backlog)
+ backlog->complete(backlog, -EINPROGRESS);
+ req->complete(req, 0);
+
+ if (cpu_queue->queue.qlen)
+ queue_work(kcrypto_wq, &cpu_queue->work);
+}
+
+static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm)
{
struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
- return ictx->state;
+ return ictx->queue;
}
static int cryptd_blkcipher_setkey(struct crypto_ablkcipher *parent,
@@ -130,19 +205,13 @@ static int cryptd_blkcipher_enqueue(struct ablkcipher_request *req,
{
struct cryptd_blkcipher_request_ctx *rctx = ablkcipher_request_ctx(req);
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct cryptd_state *state =
- cryptd_get_state(crypto_ablkcipher_tfm(tfm));
- int err;
+ struct cryptd_queue *queue;
+ queue = cryptd_get_queue(crypto_ablkcipher_tfm(tfm));
rctx->complete = req->base.complete;
req->base.complete = complete;
- spin_lock_bh(&state->lock);
- err = ablkcipher_enqueue_request(&state->queue, req);
- spin_unlock_bh(&state->lock);
-
- wake_up_process(state->task);
- return err;
+ return cryptd_enqueue_request(queue, &req->base);
}
static int cryptd_blkcipher_encrypt_enqueue(struct ablkcipher_request *req)
@@ -176,21 +245,12 @@ static int cryptd_blkcipher_init_tfm(struct crypto_tfm *tfm)
static void cryptd_blkcipher_exit_tfm(struct crypto_tfm *tfm)
{
struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
- struct cryptd_state *state = cryptd_get_state(tfm);
- int active;
-
- mutex_lock(&state->mutex);
- active = ablkcipher_tfm_in_queue(&state->queue,
- __crypto_ablkcipher_cast(tfm));
- mutex_unlock(&state->mutex);
-
- BUG_ON(active);
crypto_free_blkcipher(ctx->child);
}
static struct crypto_instance *cryptd_alloc_instance(struct crypto_alg *alg,
- struct cryptd_state *state)
+ struct cryptd_queue *queue)
{
struct crypto_instance *inst;
struct cryptd_instance_ctx *ctx;
@@ -213,7 +273,7 @@ static struct crypto_instance *cryptd_alloc_instance(struct crypto_alg *alg,
if (err)
goto out_free_inst;
- ctx->state = state;
+ ctx->queue = queue;
memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
@@ -231,7 +291,7 @@ out_free_inst:
}
static struct crypto_instance *cryptd_alloc_blkcipher(
- struct rtattr **tb, struct cryptd_state *state)
+ struct rtattr **tb, struct cryptd_queue *queue)
{
struct crypto_instance *inst;
struct crypto_alg *alg;
@@ -241,7 +301,7 @@ static struct crypto_instance *cryptd_alloc_blkcipher(
if (IS_ERR(alg))
return ERR_CAST(alg);
- inst = cryptd_alloc_instance(alg, state);
+ inst = cryptd_alloc_instance(alg, queue);
if (IS_ERR(inst))
goto out_put_alg;
@@ -289,15 +349,6 @@ static int cryptd_hash_init_tfm(struct crypto_tfm *tfm)
static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm)
{
struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
- struct cryptd_state *state = cryptd_get_state(tfm);
- int active;
-
- mutex_lock(&state->mutex);
- active = ahash_tfm_in_queue(&state->queue,
- __crypto_ahash_cast(tfm));
- mutex_unlock(&state->mutex);
-
- BUG_ON(active);
crypto_free_hash(ctx->child);
}
@@ -323,19 +374,13 @@ static int cryptd_hash_enqueue(struct ahash_request *req,
{
struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct cryptd_state *state =
- cryptd_get_state(crypto_ahash_tfm(tfm));
- int err;
+ struct cryptd_queue *queue =
+ cryptd_get_queue(crypto_ahash_tfm(tfm));
rctx->complete = req->base.complete;
req->base.complete = complete;
- spin_lock_bh(&state->lock);
- err = ahash_enqueue_request(&state->queue, req);
- spin_unlock_bh(&state->lock);
-
- wake_up_process(state->task);
- return err;
+ return cryptd_enqueue_request(queue, &req->base);
}
static void cryptd_hash_init(struct crypto_async_request *req_async, int err)
@@ -468,7 +513,7 @@ static int cryptd_hash_digest_enqueue(struct ahash_request *req)
}
static struct crypto_instance *cryptd_alloc_hash(
- struct rtattr **tb, struct cryptd_state *state)
+ struct rtattr **tb, struct cryptd_queue *queue)
{
struct crypto_instance *inst;
struct crypto_alg *alg;
@@ -478,7 +523,7 @@ static struct crypto_instance *cryptd_alloc_hash(
if (IS_ERR(alg))
return ERR_PTR(PTR_ERR(alg));
- inst = cryptd_alloc_instance(alg, state);
+ inst = cryptd_alloc_instance(alg, queue);
if (IS_ERR(inst))
goto out_put_alg;
@@ -502,7 +547,7 @@ out_put_alg:
return inst;
}
-static struct cryptd_state state;
+static struct cryptd_queue queue;
static struct crypto_instance *cryptd_alloc(struct rtattr **tb)
{
@@ -514,9 +559,9 @@ static struct crypto_instance *cryptd_alloc(struct rtattr **tb)
switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
case CRYPTO_ALG_TYPE_BLKCIPHER:
- return cryptd_alloc_blkcipher(tb, &state);
+ return cryptd_alloc_blkcipher(tb, &queue);
case CRYPTO_ALG_TYPE_DIGEST:
- return cryptd_alloc_hash(tb, &state);
+ return cryptd_alloc_hash(tb, &queue);
}
return ERR_PTR(-EINVAL);
@@ -537,82 +582,58 @@ static struct crypto_template cryptd_tmpl = {
.module = THIS_MODULE,
};
-static inline int cryptd_create_thread(struct cryptd_state *state,
- int (*fn)(void *data), const char *name)
-{
- spin_lock_init(&state->lock);
- mutex_init(&state->mutex);
- crypto_init_queue(&state->queue, CRYPTD_MAX_QLEN);
-
- state->task = kthread_run(fn, state, name);
- if (IS_ERR(state->task))
- return PTR_ERR(state->task);
+struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name,
+ u32 type, u32 mask)
+{
+ char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
+ struct crypto_ablkcipher *tfm;
+
+ if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
+ "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
+ return ERR_PTR(-EINVAL);
+ tfm = crypto_alloc_ablkcipher(cryptd_alg_name, type, mask);
+ if (IS_ERR(tfm))
+ return ERR_CAST(tfm);
+ if (crypto_ablkcipher_tfm(tfm)->__crt_alg->cra_module != THIS_MODULE) {
+ crypto_free_ablkcipher(tfm);
+ return ERR_PTR(-EINVAL);
+ }
- return 0;
+ return __cryptd_ablkcipher_cast(tfm);
}
+EXPORT_SYMBOL_GPL(cryptd_alloc_ablkcipher);
-static inline void cryptd_stop_thread(struct cryptd_state *state)
+struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm)
{
- BUG_ON(state->queue.qlen);
- kthread_stop(state->task);
+ struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base);
+ return ctx->child;
}
+EXPORT_SYMBOL_GPL(cryptd_ablkcipher_child);
-static int cryptd_thread(void *data)
+void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm)
{
- struct cryptd_state *state = data;
- int stop;
-
- current->flags |= PF_NOFREEZE;
-
- do {
- struct crypto_async_request *req, *backlog;
-
- mutex_lock(&state->mutex);
- __set_current_state(TASK_INTERRUPTIBLE);
-
- spin_lock_bh(&state->lock);
- backlog = crypto_get_backlog(&state->queue);
- req = crypto_dequeue_request(&state->queue);
- spin_unlock_bh(&state->lock);
-
- stop = kthread_should_stop();
-
- if (stop || req) {
- __set_current_state(TASK_RUNNING);
- if (req) {
- if (backlog)
- backlog->complete(backlog,
- -EINPROGRESS);
- req->complete(req, 0);
- }
- }
-
- mutex_unlock(&state->mutex);
-
- schedule();
- } while (!stop);
-
- return 0;
+ crypto_free_ablkcipher(&tfm->base);
}
+EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher);
static int __init cryptd_init(void)
{
int err;
- err = cryptd_create_thread(&state, cryptd_thread, "cryptd");
+ err = cryptd_init_queue(&queue, CRYPTD_MAX_CPU_QLEN);