aboutsummaryrefslogtreecommitdiff
path: root/arch/s390/crypto
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/crypto')
-rw-r--r--arch/s390/crypto/aes_s390.c156
-rw-r--r--arch/s390/crypto/crypt_s390.h3
-rw-r--r--arch/s390/crypto/crypto_des.h18
-rw-r--r--arch/s390/crypto/des_s390.c124
-rw-r--r--arch/s390/crypto/ghash_s390.c22
-rw-r--r--arch/s390/crypto/prng.c2
-rw-r--r--arch/s390/crypto/sha1_s390.c2
-rw-r--r--arch/s390/crypto/sha256_s390.c2
-rw-r--r--arch/s390/crypto/sha_common.c9
9 files changed, 201 insertions, 137 deletions
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
index a9ce135893f..23223cd63e5 100644
--- a/arch/s390/crypto/aes_s390.c
+++ b/arch/s390/crypto/aes_s390.c
@@ -4,7 +4,7 @@
* s390 implementation of the AES Cipher Algorithm.
*
* s390 Version:
- * Copyright IBM Corp. 2005,2007
+ * Copyright IBM Corp. 2005, 2007
* Author(s): Jan Glauber (jang@de.ibm.com)
* Sebastian Siewior (sebastian@breakpoint.cc> SW-Fallback
*
@@ -25,6 +25,7 @@
#include <linux/err.h>
#include <linux/module.h>
#include <linux/init.h>
+#include <linux/spinlock.h>
#include "crypt_s390.h"
#define AES_KEYLEN_128 1
@@ -32,10 +33,10 @@
#define AES_KEYLEN_256 4
static u8 *ctrblk;
+static DEFINE_SPINLOCK(ctrblk_lock);
static char keylen_flag;
struct s390_aes_ctx {
- u8 iv[AES_BLOCK_SIZE];
u8 key[AES_MAX_KEY_SIZE];
long enc;
long dec;
@@ -56,8 +57,7 @@ struct pcc_param {
struct s390_xts_ctx {
u8 key[32];
- u8 xts_param[16];
- struct pcc_param pcc;
+ u8 pcc_key[32];
long enc;
long dec;
int key_len;
@@ -216,7 +216,6 @@ static struct crypto_alg aes_alg = {
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct s390_aes_ctx),
.cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT(aes_alg.cra_list),
.cra_init = fallback_init_cip,
.cra_exit = fallback_exit_cip,
.cra_u = {
@@ -326,7 +325,8 @@ static int ecb_aes_crypt(struct blkcipher_desc *desc, long func, void *param,
u8 *in = walk->src.virt.addr;
ret = crypt_s390_km(func, param, out, in, n);
- BUG_ON((ret < 0) || (ret != n));
+ if (ret < 0 || ret != n)
+ return -EIO;
nbytes &= AES_BLOCK_SIZE - 1;
ret = blkcipher_walk_done(desc, walk, nbytes);
@@ -398,7 +398,6 @@ static struct crypto_alg ecb_aes_alg = {
.cra_ctxsize = sizeof(struct s390_aes_ctx),
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT(ecb_aes_alg.cra_list),
.cra_init = fallback_init_blk,
.cra_exit = fallback_exit_blk,
.cra_u = {
@@ -442,29 +441,36 @@ static int cbc_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
return aes_set_key(tfm, in_key, key_len);
}
-static int cbc_aes_crypt(struct blkcipher_desc *desc, long func, void *param,
+static int cbc_aes_crypt(struct blkcipher_desc *desc, long func,
struct blkcipher_walk *walk)
{
+ struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
int ret = blkcipher_walk_virt(desc, walk);
unsigned int nbytes = walk->nbytes;
+ struct {
+ u8 iv[AES_BLOCK_SIZE];
+ u8 key[AES_MAX_KEY_SIZE];
+ } param;
if (!nbytes)
goto out;
- memcpy(param, walk->iv, AES_BLOCK_SIZE);
+ memcpy(param.iv, walk->iv, AES_BLOCK_SIZE);
+ memcpy(param.key, sctx->key, sctx->key_len);
do {
/* only use complete blocks */
unsigned int n = nbytes & ~(AES_BLOCK_SIZE - 1);
u8 *out = walk->dst.virt.addr;
u8 *in = walk->src.virt.addr;
- ret = crypt_s390_kmc(func, param, out, in, n);
- BUG_ON((ret < 0) || (ret != n));
+ ret = crypt_s390_kmc(func, &param, out, in, n);
+ if (ret < 0 || ret != n)
+ return -EIO;
nbytes &= AES_BLOCK_SIZE - 1;
ret = blkcipher_walk_done(desc, walk, nbytes);
} while ((nbytes = walk->nbytes));
- memcpy(walk->iv, param, AES_BLOCK_SIZE);
+ memcpy(walk->iv, param.iv, AES_BLOCK_SIZE);
out:
return ret;
@@ -481,7 +487,7 @@ static int cbc_aes_encrypt(struct blkcipher_desc *desc,
return fallback_blk_enc(desc, dst, src, nbytes);
blkcipher_walk_init(&walk, dst, src, nbytes);
- return cbc_aes_crypt(desc, sctx->enc, sctx->iv, &walk);
+ return cbc_aes_crypt(desc, sctx->enc, &walk);
}
static int cbc_aes_decrypt(struct blkcipher_desc *desc,
@@ -495,7 +501,7 @@ static int cbc_aes_decrypt(struct blkcipher_desc *desc,
return fallback_blk_dec(desc, dst, src, nbytes);
blkcipher_walk_init(&walk, dst, src, nbytes);
- return cbc_aes_crypt(desc, sctx->dec, sctx->iv, &walk);
+ return cbc_aes_crypt(desc, sctx->dec, &walk);
}
static struct crypto_alg cbc_aes_alg = {
@@ -508,7 +514,6 @@ static struct crypto_alg cbc_aes_alg = {
.cra_ctxsize = sizeof(struct s390_aes_ctx),
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT(cbc_aes_alg.cra_list),
.cra_init = fallback_init_blk,
.cra_exit = fallback_exit_blk,
.cra_u = {
@@ -587,7 +592,7 @@ static int xts_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
xts_ctx->enc = KM_XTS_128_ENCRYPT;
xts_ctx->dec = KM_XTS_128_DECRYPT;
memcpy(xts_ctx->key + 16, in_key, 16);
- memcpy(xts_ctx->pcc.key + 16, in_key + 16, 16);
+ memcpy(xts_ctx->pcc_key + 16, in_key + 16, 16);
break;
case 48:
xts_ctx->enc = 0;
@@ -598,7 +603,7 @@ static int xts_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
xts_ctx->enc = KM_XTS_256_ENCRYPT;
xts_ctx->dec = KM_XTS_256_DECRYPT;
memcpy(xts_ctx->key, in_key, 32);
- memcpy(xts_ctx->pcc.key, in_key + 32, 32);
+ memcpy(xts_ctx->pcc_key, in_key + 32, 32);
break;
default:
*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
@@ -617,29 +622,35 @@ static int xts_aes_crypt(struct blkcipher_desc *desc, long func,
unsigned int nbytes = walk->nbytes;
unsigned int n;
u8 *in, *out;
- void *param;
+ struct pcc_param pcc_param;
+ struct {
+ u8 key[32];
+ u8 init[16];
+ } xts_param;
if (!nbytes)
goto out;
- memset(xts_ctx->pcc.block, 0, sizeof(xts_ctx->pcc.block));
- memset(xts_ctx->pcc.bit, 0, sizeof(xts_ctx->pcc.bit));
- memset(xts_ctx->pcc.xts, 0, sizeof(xts_ctx->pcc.xts));
- memcpy(xts_ctx->pcc.tweak, walk->iv, sizeof(xts_ctx->pcc.tweak));
- param = xts_ctx->pcc.key + offset;
- ret = crypt_s390_pcc(func, param);
- BUG_ON(ret < 0);
-
- memcpy(xts_ctx->xts_param, xts_ctx->pcc.xts, 16);
- param = xts_ctx->key + offset;
+ memset(pcc_param.block, 0, sizeof(pcc_param.block));
+ memset(pcc_param.bit, 0, sizeof(pcc_param.bit));
+ memset(pcc_param.xts, 0, sizeof(pcc_param.xts));
+ memcpy(pcc_param.tweak, walk->iv, sizeof(pcc_param.tweak));
+ memcpy(pcc_param.key, xts_ctx->pcc_key, 32);
+ ret = crypt_s390_pcc(func, &pcc_param.key[offset]);
+ if (ret < 0)
+ return -EIO;
+
+ memcpy(xts_param.key, xts_ctx->key, 32);
+ memcpy(xts_param.init, pcc_param.xts, 16);
do {
/* only use complete blocks */
n = nbytes & ~(AES_BLOCK_SIZE - 1);
out = walk->dst.virt.addr;
in = walk->src.virt.addr;
- ret = crypt_s390_km(func, param, out, in, n);
- BUG_ON(ret < 0 || ret != n);
+ ret = crypt_s390_km(func, &xts_param.key[offset], out, in, n);
+ if (ret < 0 || ret != n)
+ return -EIO;
nbytes &= AES_BLOCK_SIZE - 1;
ret = blkcipher_walk_done(desc, walk, nbytes);
@@ -710,7 +721,6 @@ static struct crypto_alg xts_aes_alg = {
.cra_ctxsize = sizeof(struct s390_xts_ctx),
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT(xts_aes_alg.cra_list),
.cra_init = xts_fallback_init,
.cra_exit = xts_fallback_exit,
.cra_u = {
@@ -725,6 +735,8 @@ static struct crypto_alg xts_aes_alg = {
}
};
+static int xts_aes_alg_reg;
+
static int ctr_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
@@ -748,42 +760,70 @@ static int ctr_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
return aes_set_key(tfm, in_key, key_len);
}
+static unsigned int __ctrblk_init(u8 *ctrptr, unsigned int nbytes)
+{
+ unsigned int i, n;
+
+ /* only use complete blocks, max. PAGE_SIZE */
+ n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(AES_BLOCK_SIZE - 1);
+ for (i = AES_BLOCK_SIZE; i < n; i += AES_BLOCK_SIZE) {
+ memcpy(ctrptr + i, ctrptr + i - AES_BLOCK_SIZE,
+ AES_BLOCK_SIZE);
+ crypto_inc(ctrptr + i, AES_BLOCK_SIZE);
+ }
+ return n;
+}
+
static int ctr_aes_crypt(struct blkcipher_desc *desc, long func,
struct s390_aes_ctx *sctx, struct blkcipher_walk *walk)
{
int ret = blkcipher_walk_virt_block(desc, walk, AES_BLOCK_SIZE);
- unsigned int i, n, nbytes;
- u8 buf[AES_BLOCK_SIZE];
- u8 *out, *in;
+ unsigned int n, nbytes;
+ u8 buf[AES_BLOCK_SIZE], ctrbuf[AES_BLOCK_SIZE];
+ u8 *out, *in, *ctrptr = ctrbuf;
if (!walk->nbytes)
return ret;
- memcpy(ctrblk, walk->iv, AES_BLOCK_SIZE);
+ if (spin_trylock(&ctrblk_lock))
+ ctrptr = ctrblk;
+
+ memcpy(ctrptr, walk->iv, AES_BLOCK_SIZE);
while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
out = walk->dst.virt.addr;
in = walk->src.virt.addr;
while (nbytes >= AES_BLOCK_SIZE) {
- /* only use complete blocks, max. PAGE_SIZE */
- n = (nbytes > PAGE_SIZE) ? PAGE_SIZE :
- nbytes & ~(AES_BLOCK_SIZE - 1);
- for (i = AES_BLOCK_SIZE; i < n; i += AES_BLOCK_SIZE) {
- memcpy(ctrblk + i, ctrblk + i - AES_BLOCK_SIZE,
- AES_BLOCK_SIZE);
- crypto_inc(ctrblk + i, AES_BLOCK_SIZE);
+ if (ctrptr == ctrblk)
+ n = __ctrblk_init(ctrptr, nbytes);
+ else
+ n = AES_BLOCK_SIZE;
+ ret = crypt_s390_kmctr(func, sctx->key, out, in,
+ n, ctrptr);
+ if (ret < 0 || ret != n) {
+ if (ctrptr == ctrblk)
+ spin_unlock(&ctrblk_lock);
+ return -EIO;
}
- ret = crypt_s390_kmctr(func, sctx->key, out, in, n, ctrblk);
- BUG_ON(ret < 0 || ret != n);
if (n > AES_BLOCK_SIZE)
- memcpy(ctrblk, ctrblk + n - AES_BLOCK_SIZE,
+ memcpy(ctrptr, ctrptr + n - AES_BLOCK_SIZE,
AES_BLOCK_SIZE);
- crypto_inc(ctrblk, AES_BLOCK_SIZE);
+ crypto_inc(ctrptr, AES_BLOCK_SIZE);
out += n;
in += n;
nbytes -= n;
}
ret = blkcipher_walk_done(desc, walk, nbytes);
}
+ if (ctrptr == ctrblk) {
+ if (nbytes)
+ memcpy(ctrbuf, ctrptr, AES_BLOCK_SIZE);
+ else
+ memcpy(walk->iv, ctrptr, AES_BLOCK_SIZE);
+ spin_unlock(&ctrblk_lock);
+ } else {
+ if (!nbytes)
+ memcpy(walk->iv, ctrptr, AES_BLOCK_SIZE);
+ }
/*
* final block may be < AES_BLOCK_SIZE, copy only nbytes
*/
@@ -791,13 +831,15 @@ static int ctr_aes_crypt(struct blkcipher_desc *desc, long func,
out = walk->dst.virt.addr;
in = walk->src.virt.addr;
ret = crypt_s390_kmctr(func, sctx->key, buf, in,
- AES_BLOCK_SIZE, ctrblk);
- BUG_ON(ret < 0 || ret != AES_BLOCK_SIZE);
+ AES_BLOCK_SIZE, ctrbuf);
+ if (ret < 0 || ret != AES_BLOCK_SIZE)
+ return -EIO;
memcpy(out, buf, nbytes);
- crypto_inc(ctrblk, AES_BLOCK_SIZE);
+ crypto_inc(ctrbuf, AES_BLOCK_SIZE);
ret = blkcipher_walk_done(desc, walk, 0);
+ memcpy(walk->iv, ctrbuf, AES_BLOCK_SIZE);
}
- memcpy(walk->iv, ctrblk, AES_BLOCK_SIZE);
+
return ret;
}
@@ -832,7 +874,6 @@ static struct crypto_alg ctr_aes_alg = {
.cra_ctxsize = sizeof(struct s390_aes_ctx),
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT(ctr_aes_alg.cra_list),
.cra_u = {
.blkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
@@ -845,6 +886,8 @@ static struct crypto_alg ctr_aes_alg = {
}
};
+static int ctr_aes_alg_reg;
+
static int __init aes_s390_init(void)
{
int ret;
@@ -883,6 +926,7 @@ static int __init aes_s390_init(void)
ret = crypto_register_alg(&xts_aes_alg);
if (ret)
goto xts_aes_err;
+ xts_aes_alg_reg = 1;
}
if (crypt_s390_func_available(KMCTR_AES_128_ENCRYPT,
@@ -901,6 +945,7 @@ static int __init aes_s390_init(void)
free_page((unsigned long) ctrblk);
goto ctr_aes_err;
}
+ ctr_aes_alg_reg = 1;
}
out:
@@ -920,9 +965,12 @@ aes_err:
static void __exit aes_s390_fini(void)
{
- crypto_unregister_alg(&ctr_aes_alg);
- free_page((unsigned long) ctrblk);
- crypto_unregister_alg(&xts_aes_alg);
+ if (ctr_aes_alg_reg) {
+ crypto_unregister_alg(&ctr_aes_alg);
+ free_page((unsigned long) ctrblk);
+ }
+ if (xts_aes_alg_reg)
+ crypto_unregister_alg(&xts_aes_alg);
crypto_unregister_alg(&cbc_aes_alg);
crypto_unregister_alg(&ecb_aes_alg);
crypto_unregister_alg(&aes_alg);
diff --git a/arch/s390/crypto/crypt_s390.h b/arch/s390/crypto/crypt_s390.h
index ffd1ac255f1..6c5cc6da711 100644
--- a/arch/s390/crypto/crypt_s390.h
+++ b/arch/s390/crypto/crypt_s390.h
@@ -3,7 +3,7 @@
*
* Support for s390 cryptographic instructions.
*
- * Copyright IBM Corp. 2003,2007
+ * Copyright IBM Corp. 2003, 2007
* Author(s): Thomas Spatzier
* Jan Glauber (jan.glauber@de.ibm.com)
*
@@ -17,6 +17,7 @@
#define _CRYPTO_ARCH_S390_CRYPT_S390_H
#include <asm/errno.h>
+#include <asm/facility.h>
#define CRYPT_S390_OP_MASK 0xFF00
#define CRYPT_S390_FUNC_MASK 0x00FF
diff --git a/arch/s390/crypto/crypto_des.h b/arch/s390/crypto/crypto_des.h
deleted file mode 100644
index 6210457ceeb..00000000000
--- a/arch/s390/crypto/crypto_des.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/*
- * Cryptographic API.
- *
- * Function for checking keys for the DES and Tripple DES Encryption
- * algorithms.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- */
-#ifndef __CRYPTO_DES_H__
-#define __CRYPTO_DES_H__
-
-extern int crypto_des_check_key(const u8*, unsigned int, u32*);
-
-#endif /*__CRYPTO_DES_H__*/
diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c
index a52bfd124d8..7acb77f7ef1 100644
--- a/arch/s390/crypto/des_s390.c
+++ b/arch/s390/crypto/des_s390.c
@@ -3,7 +3,7 @@
*
* s390 implementation of the DES Cipher Algorithm.
*
- * Copyright IBM Corp. 2003,2011
+ * Copyright IBM Corp. 2003, 2011
* Author(s): Thomas Spatzier
* Jan Glauber (jan.glauber@de.ibm.com)
*
@@ -25,6 +25,7 @@
#define DES3_KEY_SIZE (3 * DES_KEY_SIZE)
static u8 *ctrblk;
+static DEFINE_SPINLOCK(ctrblk_lock);
struct s390_des_ctx {
u8 iv[DES_BLOCK_SIZE];
@@ -70,7 +71,6 @@ static struct crypto_alg des_alg = {
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct s390_des_ctx),
.cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT(des_alg.cra_list),
.cra_u = {
.cipher = {
.cia_min_keysize = DES_KEY_SIZE,
@@ -95,7 +95,8 @@ static int ecb_desall_crypt(struct blkcipher_desc *desc, long func,
u8 *in = walk->src.virt.addr;
ret = crypt_s390_km(func, key, out, in, n);
- BUG_ON((ret < 0) || (ret != n));
+ if (ret < 0 || ret != n)
+ return -EIO;
nbytes &= DES_BLOCK_SIZE - 1;
ret = blkcipher_walk_done(desc, walk, nbytes);
@@ -105,28 +106,35 @@ static int ecb_desall_crypt(struct blkcipher_desc *desc, long func,
}
static int cbc_desall_crypt(struct blkcipher_desc *desc, long func,
- u8 *iv, struct blkcipher_walk *walk)
+ struct blkcipher_walk *walk)
{
+ struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
int ret = blkcipher_walk_virt(desc, walk);
unsigned int nbytes = walk->nbytes;
+ struct {
+ u8 iv[DES_BLOCK_SIZE];
+ u8 key[DES3_KEY_SIZE];
+ } param;
if (!nbytes)
goto out;
- memcpy(iv, walk->iv, DES_BLOCK_SIZE);
+ memcpy(param.iv, walk->iv, DES_BLOCK_SIZE);
+ memcpy(param.key, ctx->key, DES3_KEY_SIZE);
do {
/* only use complete blocks */
unsigned int n = nbytes & ~(DES_BLOCK_SIZE - 1);
u8 *out = walk->dst.virt.addr;
u8 *in = walk->src.virt.addr;
- ret = crypt_s390_kmc(func, iv, out, in, n);
- BUG_ON((ret < 0) || (ret != n));
+ ret = crypt_s390_kmc(func, &param, out, in, n);
+ if (ret < 0 || ret != n)
+ return -EIO;
nbytes &= DES_BLOCK_SIZE - 1;
ret = blkcipher_walk_done(desc, walk, nbytes);
} while ((nbytes = walk->nbytes));
- memcpy(walk->iv, iv, DES_BLOCK_SIZE);
+ memcpy(walk->iv, param.iv, DES_BLOCK_SIZE);
out:
return ret;
@@ -163,7 +171,6 @@ static struct crypto_alg ecb_des_alg = {
.cra_ctxsize = sizeof(struct s390_des_ctx),
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT(ecb_des_alg.cra_list),
.cra_u = {
.blkcipher = {
.min_keysize = DES_KEY_SIZE,
@@ -179,22 +186,20 @@ static int cbc_des_encrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
- struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
- return cbc_desall_crypt(desc, KMC_DEA_ENCRYPT, ctx->iv, &walk);
+ return cbc_desall_crypt(desc, KMC_DEA_ENCRYPT, &walk);
}
static int cbc_des_decrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
- struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
- return cbc_desall_crypt(desc, KMC_DEA_DECRYPT, ctx->iv, &walk);
+ return cbc_desall_crypt(desc, KMC_DEA_DECRYPT, &walk);
}
static struct crypto_alg cbc_des_alg = {
@@ -206,7 +211,6 @@ static struct crypto_alg cbc_des_alg = {
.cra_ctxsize = sizeof(struct s390_des_ctx),
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT(cbc_des_alg.cra_list),
.cra_u = {
.blkcipher = {
.min_keysize = DES_KEY_SIZE,
@@ -238,9 +242,9 @@ static int des3_setkey(struct crypto_tfm *tfm, const u8 *key,
struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm);
u32 *flags = &tfm->crt_flags;
- if (!(memcmp(key, &key[DES_KEY_SIZE], DES_KEY_SIZE) &&
- memcmp(&key[DES_KEY_SIZE], &key[DES_KEY_SIZE * 2],
- DES_KEY_SIZE)) &&
+ if (!(crypto_memneq(key, &key[DES_KEY_SIZE], DES_KEY_SIZE) &&
+ crypto_memneq(&key[DES_KEY_SIZE], &key[DES_KEY_SIZE * 2],
+ DES_KEY_SIZE)) &&
(*flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
*flags |= CRYPTO_TFM_RES_WEAK_KEY;
return -EINVAL;
@@ -271,7 +275,6 @@ static struct crypto_alg des3_alg = {
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct s390_des_ctx),
.cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT(des3_alg.cra_list),
.cra_u = {
.cipher = {
.cia_min_keysize = DES3_KEY_SIZE,
@@ -314,8 +317,6 @@ static struct crypto_alg ecb_des3_alg = {
.cra_ctxsize = sizeof(struct s390_des_ctx),
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT(
- ecb_des3_alg.cra_list),
.cra_u = {
.blkcipher = {
.min_keysize = DES3_KEY_SIZE,
@@ -331,22 +332,20 @@ static int cbc_des3_encrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
- struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
- return cbc_desall_crypt(desc, KMC_TDEA_192_ENCRYPT, ctx->iv, &walk);
+ return cbc_desall_crypt(desc, KMC_TDEA_192_ENCRYPT, &walk);
}
static int cbc_des3_decrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
- struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
- return cbc_desall_crypt(desc, KMC_TDEA_192_DECRYPT, ctx->iv, &walk);
+ return cbc_desall_crypt(desc, KMC_TDEA_192_DECRYPT, &walk);
}
static struct crypto_alg cbc_des3_alg = {
@@ -358,8 +357,6 @@ static struct crypto_alg cbc_des3_alg = {
.cra_ctxsize = sizeof(struct s390_des_ctx),
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT(
- cbc_des3_alg.cra_list),
.cra_u = {
.blkcipher = {
.min_keysize = DES3_KEY_SIZE,
@@ -372,52 +369,83 @@ static struct crypto_alg cbc_des3_alg = {
}
};
+static unsigned int __ctrblk_init(u8 *ctrptr, unsigned int nbytes)
+{
+ unsigned int i, n;
+
+ /* align to block size, max. PAGE_SIZE */
+ n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(DES_BLOCK_SIZE - 1);
+ for (i = DES_BLOCK_SIZE; i < n; i += DES_BLOCK_SIZE) {
+ memcpy(ctrptr + i, ctrptr + i - DES_BLOCK_SIZE, DES_BLOCK_SIZE);
+ crypto_inc(ctrptr + i, DES_BLOCK_SIZE);
+ }
+ return n;
+}
+
static int ctr_desall_crypt(struct blkcipher_desc *desc, long func,
- struct s390_des_ctx *ctx, struct blkcipher_walk *walk)
+ struct s390_des_ctx *ctx,
+ struct blkcipher_walk *walk)
{
int ret = blkcipher_walk_virt_block(desc, walk, DES_BLOCK_SIZE);
- unsigned int i, n, nbytes;
- u8 buf[DES_BLOCK_SIZE];
- u8 *out, *in;
+ unsigned int n, nbytes;
+ u8 buf[DES_BLOCK_SIZE], ctrbuf[DES_BLOCK_SIZE];
+ u8 *out, *in, *ctrptr = ctrbuf;
+
+ if (!walk->nbytes)
+ return ret;
+
+ if (spin_trylock(&ctrblk_lock))
+ ctrptr = ctrblk;
- memcpy(ctrblk, walk->iv, DES_BLOCK_SIZE);
+ memcpy(ctrptr, walk->iv, DES_BLOCK_SIZE);
while ((nbytes = walk->nbytes) >= DES_BLOCK_SIZE) {
out = walk->dst.virt.addr;
in = walk->src.virt.addr;
while (nbytes >= DES_BLOCK_SIZE) {
- /* align to block size, max. PAGE_SIZE */
- n = (nbytes > PAGE_SIZE) ? PAGE_SIZE :
- nbytes & ~(DES_BLOCK_SIZE - 1);
- for (i = DES_BLOCK_SIZE; i < n; i += DES_BLOCK_SIZE) {
- memcpy(ctrblk + i, ctrblk + i - DES_BLOCK_SIZE,
- DES_BLOCK_SIZE);
- crypto_inc(ctrblk + i, DES_BLOCK_SIZE);
+ if (ctrptr == ctrblk)
+ n = __ctrblk_init(ctrptr, nbytes);
+ else
+ n = DES_BLOCK_SIZE;
+ ret = crypt_s390_kmctr(func, ctx->key, out, in,
+ n, ctrptr);
+ if (ret < 0 || ret != n) {
+ if (ctrptr == ctrblk)
+ spin_unlock(&ctrblk_lock);
+ return -EIO;
}
- ret = crypt_s390_kmctr(func, ctx->key, out, in, n, ctrblk);
- BUG_ON((ret < 0) || (ret != n));
if (n > DES_BLOCK_SIZE)
- memcpy(ctrblk, ctrblk + n - DES_BLOCK_SIZE,
+ memcpy(ctrptr, ctrptr + n - DES_BLOCK_SIZE,
DES_BLOCK_SIZE);
- crypto_inc(ctrblk, DES_BLOCK_SIZE);
+ crypto_inc(ctrptr, DES_BLOCK_SIZE);
out += n;
in += n;
nbytes -= n;
}
ret = blkcipher_walk_done(desc, walk, nbytes);
}
-
+ if (ctrptr == ctrblk) {
+ if (nbytes)
+ memcpy(ctrbuf, ctrptr, DES_BLOCK_SIZE);
+ else
+ memcpy(walk->iv, ctrptr, DES_BLOCK_SIZE);
+ spin_unlock(&ctrblk_lock);
+ } else {
+ if (!nbytes)
+ memcpy(walk->iv, ctrptr, DES_BLOCK_SIZE);
+ }
/* final block may be < DES_BLOCK_SIZE, copy only nbytes */
if (nbytes) {
out = walk->dst.virt.addr;
in = walk->src.virt.addr;
ret = crypt_s390_kmctr(func, ctx->key, buf, in,
- DES_BLOCK_SIZE, ctrblk);
- BUG_ON(ret < 0 || ret != DES_BLOCK_SIZE);
+ DES_BLOCK_SIZE, ctrbuf);
+ if (ret < 0 || ret != DES_BLOCK_SIZE)
+ return -EIO;
memcpy(out, buf, nbytes);
- crypto_inc(ctrblk, DES_BLOCK_SIZE);
+ crypto_inc(ctrbuf, DES_BLOCK_SIZE);
ret = blkcipher_walk_done(desc, walk, 0);
+ memcpy(walk->iv, ctrbuf, DES_BLOCK_SIZE);
}
- memcpy(walk->iv, ctrblk, DES_BLOCK_SIZE);
return ret;
}
@@ -452,7 +480,6 @@ static struct crypto_alg ctr_des_alg = {
.cra_ctxsize = sizeof(struct s390_des_ctx),
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT(ctr_des_alg.cra_list),
.cra_u = {
.blkcipher = {
.min_keysize = DES_KEY_SIZE,
@@ -496,7 +523,6 @@ static struct crypto_alg ctr_des3_alg = {
.cra_ctxsize = sizeof(struct s390_des_ctx),
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT(ctr_des3_alg.cra_list),
.cra_u = {
.blkcipher = {
.min_keysize = DES3_KEY_SIZE,
diff --git a/arch/s390/crypto/ghash_s390.c b/arch/s390/crypto/ghash_s390.c
index b1bd170f24b..d43485d142e 100644
--- a/arch/s390/crypto/ghash_s390.c
+++ b/arch/s390/crypto/ghash_s390.c
@@ -72,14 +72,16 @@ static int ghash_update(struct shash_desc *desc,
if (!dctx->bytes) {
ret = crypt_s390_kimd(KIMD_GHASH, ctx, buf,
GHASH_BLOCK_SIZE);
- BUG_ON(ret != GHASH_BLOCK_SIZE);
+ if (ret != GHASH_BLOCK_SIZE)
+ return -EIO;
}
}
n = srclen & ~(GHASH_BLOCK_SIZE - 1);
if (n) {
ret = crypt_s390_kimd(KIMD_GHASH, ctx, src, n);
- BUG_ON(ret != n);
+ if (ret != n)
+ return -EIO;
src += n;
srclen -= n;
}
@@ -92,7 +94,7 @@ static int ghash_update(struct shash_desc *desc,
return 0;
}
-static void ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx)
+static int ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx)
{
u8 *buf = dctx->buffer;
int ret;
@@ -103,21 +105,24 @@ static void ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx)
memset(pos, 0, dctx->bytes);
ret = crypt_s390_kimd(KIMD_GHASH, ctx, buf, GHASH_BLOCK_SIZE);
- BUG_ON(ret != GHASH_BLOCK_SIZE);
+ if (ret != GHASH_BLOCK_SIZE)
+ return -EIO;
}
dctx->bytes = 0;
+ return 0;
}
static int ghash_final(struct shash_desc *desc, u8 *dst)
{
struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
+ int ret;
- ghash_flush(ctx, dctx);
- memcpy(dst, ctx->icv, GHASH_BLOCK_SIZE);
-
- return 0;
+ ret = ghash_flush(ctx, dctx);
+ if (!ret)
+ memcpy(dst, ctx->icv, GHASH_BLOCK_SIZE);
+ return ret;
}
static struct shash_alg ghash_alg = {
@@ -135,7 +140,6 @@ static struct shash_alg ghash_alg = {
.cra_blocksize = GHASH_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct ghash_ctx),
.cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT(ghash_alg.base.cra_list),
},
};
diff --git a/arch/s390/crypto/prng.c b/arch/s390/crypto/prng.c
index 0808fbf0f7d..94a35a4c1b4 100644
--- a/arch/s390/crypto/prng.c
+++ b/arch/s390/crypto/prng.c
@@ -1,5 +1,5 @@
/*
- * Copyright IBM Corp. 2006,2007
+ * Copyright IBM Corp. 2006, 2007
* Author(s): Jan Glauber <jan.glauber@de.ibm.com>
* Driver for the s390 pseudo random number generator
*/
diff --git a/arch/s390/crypto/sha1_s390.c b/arch/s390/crypto/sha1_s390.c
index e9868c6e0a0..a1b3a9dc9d8 100644
--- a/arch/s390/crypto/sha1_s390.c
+++ b/arch/s390/crypto/sha1_s390.c
@@ -8,7 +8,7 @@
* implementation written by Steve Reid.
*
* s390 Version:
- * Copyright IBM Corp. 2003,2007
+ * Copyright IBM Corp. 2003, 2007
* Author(s): Thomas Spatzier
* Jan Glauber (jan.glauber@de.ibm.com)
*
diff --git a/arch/s390/crypto/sha256_s390.c b/arch/s390/crypto/sha256_s390.c
index 0317a3547cb..9b853809a49 100644
--- a/arch/s390/crypto/sha256_s390.c
+++ b/arch/s390/crypto/sha256_s390.c
@@ -4,7 +4,7 @@
* s390 implementation of the SHA256 and SHA224 Secure Hash Algorithm.
*
* s390 Version:
- * Copyright IBM Corp. 2005,2011
+ * Copyright IBM Corp. 2005, 2011
* Author(s): Jan Glauber (jang@de.ibm.com)
*
* This program is free software; you can redistribute it and/or modify it
diff --git a/arch/s390/crypto/sha_common.c b/arch/s390/crypto/sha_common.c
index bd37d09b9d3..8620b0ec9c4 100644
--- a/arch/s390/crypto/sha_common.c
+++ b/arch/s390/crypto/sha_common.c
@@ -36,7 +36,8 @@ int s390_sha_update(struct shash_desc *desc, const u8 *data, unsigned int len)
if (index) {
memcpy(ctx->buf + index, data, bsize - index);
ret = crypt_s390_kimd(ctx->func, ctx->state, ctx->buf, bsize);
- BUG_ON(ret != bsize);
+ if (ret != bsize)
+ return -EIO;
data += bsize - index;
len -= bsize - index;
index = 0;
@@ -46,7 +47,8 @@ int s390_sha_update(struct shash_desc *desc, const u8 *data, unsigned int len)
if (len >= bsize) {
ret = crypt_s390_kimd(ctx->func, ctx->state, data,
len & ~(bsize - 1));
- BUG_ON(ret != (len & ~(bsize - 1)));
+ if (ret != (len & ~(bsize - 1)))
+ return -EIO;
data += ret;
len -= ret;
}
@@ -88,7 +90,8 @@ int s390_sha_final(struct shash_desc *desc, u8 *out)
memcpy(ctx->buf + end - 8, &bits, sizeof(bits));
ret = crypt_s390_kimd(ctx->func, ctx->state, ctx->buf, end);
- BUG_ON(ret != end);
+ if (ret != end)
+ return -EIO;
/* copy digest to out */
memcpy(out, ctx->state, crypto_shash_digestsize(desc->tfm));