From fbf0ca1bf852fe224cec5400a69cd755ddc4ddcb Mon Sep 17 00:00:00 2001 From: Steffen Klassert Date: Wed, 28 Mar 2012 08:51:03 +0200 Subject: crypto: pcrypt - Use the online cpumask as the default We use the active cpumask to determine the superset of cpus to use for parallelization. However, the active cpumask is for internal usage of the scheduler and therefore not the appropriate cpumask for these purposes. So use the online cpumask instead. Reported-by: Peter Zijlstra Signed-off-by: Steffen Klassert Signed-off-by: Herbert Xu --- crypto/pcrypt.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'crypto') diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c index 29a89dad68b..b2c99dc1c5e 100644 --- a/crypto/pcrypt.c +++ b/crypto/pcrypt.c @@ -280,11 +280,11 @@ static int pcrypt_aead_init_tfm(struct crypto_tfm *tfm) ictx->tfm_count++; - cpu_index = ictx->tfm_count % cpumask_weight(cpu_active_mask); + cpu_index = ictx->tfm_count % cpumask_weight(cpu_online_mask); - ctx->cb_cpu = cpumask_first(cpu_active_mask); + ctx->cb_cpu = cpumask_first(cpu_online_mask); for (cpu = 0; cpu < cpu_index; cpu++) - ctx->cb_cpu = cpumask_next(ctx->cb_cpu, cpu_active_mask); + ctx->cb_cpu = cpumask_next(ctx->cb_cpu, cpu_online_mask); cipher = crypto_spawn_aead(crypto_instance_ctx(inst)); @@ -472,7 +472,7 @@ static int pcrypt_init_padata(struct padata_pcrypt *pcrypt, goto err_free_padata; } - cpumask_and(mask->mask, cpu_possible_mask, cpu_active_mask); + cpumask_and(mask->mask, cpu_possible_mask, cpu_online_mask); rcu_assign_pointer(pcrypt->cb_cpumask, mask); pcrypt->nblock.notifier_call = pcrypt_cpumask_change_notify; -- cgit v1.2.3-18-g5258 From 1e1229940045a537c61fb69f86010a8774e576d0 Mon Sep 17 00:00:00 2001 From: Steffen Klassert Date: Thu, 29 Mar 2012 09:03:47 +0200 Subject: crypto: user - Fix lookup of algorithms with IV generator We lookup algorithms with crypto_alg_mod_lookup() when instantiating via crypto_add_alg(). However, algorithms that are wrapped by an IV genearator (e.g. aead or genicv type algorithms) need special care. The userspace process hangs until it gets a timeout when we use crypto_alg_mod_lookup() to lookup these algorithms. So export the lookup functions for these algorithms and use them in crypto_add_alg(). Signed-off-by: Steffen Klassert Signed-off-by: Herbert Xu --- crypto/ablkcipher.c | 4 +-- crypto/aead.c | 4 +-- crypto/crypto_user.c | 72 +++++++++++++++++++++++++++++++++++++++++++++++++++- 3 files changed, 75 insertions(+), 5 deletions(-) (limited to 'crypto') diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c index a0f768c1d9a..8d3a056ebee 100644 --- a/crypto/ablkcipher.c +++ b/crypto/ablkcipher.c @@ -613,8 +613,7 @@ out: return err; } -static struct crypto_alg *crypto_lookup_skcipher(const char *name, u32 type, - u32 mask) +struct crypto_alg *crypto_lookup_skcipher(const char *name, u32 type, u32 mask) { struct crypto_alg *alg; @@ -652,6 +651,7 @@ static struct crypto_alg *crypto_lookup_skcipher(const char *name, u32 type, return ERR_PTR(crypto_givcipher_default(alg, type, mask)); } +EXPORT_SYMBOL_GPL(crypto_lookup_skcipher); int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn, const char *name, u32 type, u32 mask) diff --git a/crypto/aead.c b/crypto/aead.c index 04add3dca6f..e4cb35159be 100644 --- a/crypto/aead.c +++ b/crypto/aead.c @@ -470,8 +470,7 @@ out: return err; } -static struct crypto_alg *crypto_lookup_aead(const char *name, u32 type, - u32 mask) +struct crypto_alg *crypto_lookup_aead(const char *name, u32 type, u32 mask) { struct crypto_alg *alg; @@ -503,6 +502,7 @@ static struct crypto_alg *crypto_lookup_aead(const char *name, u32 type, return ERR_PTR(crypto_nivaead_default(alg, type, mask)); } +EXPORT_SYMBOL_GPL(crypto_lookup_aead); int crypto_grab_aead(struct crypto_aead_spawn *spawn, const char *name, u32 type, u32 mask) diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c index f76e42bcc6e..e91c16111ed 100644 --- a/crypto/crypto_user.c +++ b/crypto/crypto_user.c @@ -21,9 +21,13 @@ #include #include #include +#include #include #include #include +#include +#include + #include "internal.h" DEFINE_MUTEX(crypto_cfg_mutex); @@ -301,6 +305,60 @@ static int crypto_del_alg(struct sk_buff *skb, struct nlmsghdr *nlh, return crypto_unregister_instance(alg); } +static struct crypto_alg *crypto_user_skcipher_alg(const char *name, u32 type, + u32 mask) +{ + int err; + struct crypto_alg *alg; + + type = crypto_skcipher_type(type); + mask = crypto_skcipher_mask(mask); + + for (;;) { + alg = crypto_lookup_skcipher(name, type, mask); + if (!IS_ERR(alg)) + return alg; + + err = PTR_ERR(alg); + if (err != -EAGAIN) + break; + if (signal_pending(current)) { + err = -EINTR; + break; + } + } + + return ERR_PTR(err); +} + +static struct crypto_alg *crypto_user_aead_alg(const char *name, u32 type, + u32 mask) +{ + int err; + struct crypto_alg *alg; + + type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV); + type |= CRYPTO_ALG_TYPE_AEAD; + mask &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV); + mask |= CRYPTO_ALG_TYPE_MASK; + + for (;;) { + alg = crypto_lookup_aead(name, type, mask); + if (!IS_ERR(alg)) + return alg; + + err = PTR_ERR(alg); + if (err != -EAGAIN) + break; + if (signal_pending(current)) { + err = -EINTR; + break; + } + } + + return ERR_PTR(err); +} + static int crypto_add_alg(struct sk_buff *skb, struct nlmsghdr *nlh, struct nlattr **attrs) { @@ -325,7 +383,19 @@ static int crypto_add_alg(struct sk_buff *skb, struct nlmsghdr *nlh, else name = p->cru_name; - alg = crypto_alg_mod_lookup(name, p->cru_type, p->cru_mask); + switch (p->cru_type & p->cru_mask & CRYPTO_ALG_TYPE_MASK) { + case CRYPTO_ALG_TYPE_AEAD: + alg = crypto_user_aead_alg(name, p->cru_type, p->cru_mask); + break; + case CRYPTO_ALG_TYPE_GIVCIPHER: + case CRYPTO_ALG_TYPE_BLKCIPHER: + case CRYPTO_ALG_TYPE_ABLKCIPHER: + alg = crypto_user_skcipher_alg(name, p->cru_type, p->cru_mask); + break; + default: + alg = crypto_alg_mod_lookup(name, p->cru_type, p->cru_mask); + } + if (IS_ERR(alg)) return PTR_ERR(alg); -- cgit v1.2.3-18-g5258 From 5219a5342ab13650ae0f0c62319407268c48d0ab Mon Sep 17 00:00:00 2001 From: Steffen Klassert Date: Thu, 29 Mar 2012 09:04:46 +0200 Subject: crypto: user - Fix size of netlink dump message The default netlink message size limit might be exceeded when dumping a lot of algorithms to userspace. As a result, not all of the instantiated algorithms dumped to userspace. So calculate an upper bound on the message size and call netlink_dump_start() with that value. Signed-off-by: Steffen Klassert Signed-off-by: Herbert Xu --- crypto/crypto_user.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'crypto') diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c index e91c16111ed..f1ea0a06413 100644 --- a/crypto/crypto_user.c +++ b/crypto/crypto_user.c @@ -457,12 +457,20 @@ static int crypto_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) if ((type == (CRYPTO_MSG_GETALG - CRYPTO_MSG_BASE) && (nlh->nlmsg_flags & NLM_F_DUMP))) { + struct crypto_alg *alg; + u16 dump_alloc = 0; + if (link->dump == NULL) return -EINVAL; + + list_for_each_entry(alg, &crypto_alg_list, cra_list) + dump_alloc += CRYPTO_REPORT_MAXSIZE; + { struct netlink_dump_control c = { .dump = link->dump, .done = link->done, + .min_dump_alloc = dump_alloc, }; return netlink_dump_start(crypto_nlsk, skb, nlh, &c); } -- cgit v1.2.3-18-g5258