diff options
Diffstat (limited to 'crypto')
41 files changed, 3202 insertions, 558 deletions
diff --git a/crypto/Kconfig b/crypto/Kconfig index 69ce573f122..ce4012a5878 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -174,9 +174,8 @@ config CRYPTO_TEST  	help  	  Quick & dirty crypto test module. -config CRYPTO_ABLK_HELPER_X86 +config CRYPTO_ABLK_HELPER  	tristate -	depends on X86  	select CRYPTO_CRYPTD  config CRYPTO_GLUE_HELPER_X86 @@ -492,14 +491,14 @@ config CRYPTO_SHA1  	  SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2).  config CRYPTO_SHA1_SSSE3 -	tristate "SHA1 digest algorithm (SSSE3/AVX)" +	tristate "SHA1 digest algorithm (SSSE3/AVX/AVX2)"  	depends on X86 && 64BIT  	select CRYPTO_SHA1  	select CRYPTO_HASH  	help  	  SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2) implemented  	  using Supplemental SSE3 (SSSE3) instructions or Advanced Vector -	  Extensions (AVX), when available. +	  Extensions (AVX/AVX2), when available.  config CRYPTO_SHA256_SSSE3  	tristate "SHA256 digest algorithm (SSSE3/AVX/AVX2)" @@ -695,7 +694,7 @@ config CRYPTO_AES_NI_INTEL  	select CRYPTO_AES_X86_64 if 64BIT  	select CRYPTO_AES_586 if !64BIT  	select CRYPTO_CRYPTD -	select CRYPTO_ABLK_HELPER_X86 +	select CRYPTO_ABLK_HELPER  	select CRYPTO_ALGAPI  	select CRYPTO_GLUE_HELPER_X86 if 64BIT  	select CRYPTO_LRW @@ -776,6 +775,22 @@ config CRYPTO_AES_ARM  	  See <http://csrc.nist.gov/encryption/aes/> for more information. +config CRYPTO_AES_ARM_BS +	tristate "Bit sliced AES using NEON instructions" +	depends on ARM && KERNEL_MODE_NEON +	select CRYPTO_ALGAPI +	select CRYPTO_AES_ARM +	select CRYPTO_ABLK_HELPER +	help +	  Use a faster and more secure NEON based implementation of AES in CBC, +	  CTR and XTS modes + +	  Bit sliced AES gives around 45% speedup on Cortex-A15 for CTR mode +	  and for XTS mode encryption, CBC and XTS mode decryption speedup is +	  around 25%. (CBC encryption speed is not affected by this driver.) +	  This implementation does not rely on any lookup tables so it is +	  believed to be invulnerable to cache timing attacks. +  config CRYPTO_ANUBIS  	tristate "Anubis cipher algorithm"  	select CRYPTO_ALGAPI @@ -879,7 +894,7 @@ config CRYPTO_CAMELLIA_AESNI_AVX_X86_64  	depends on CRYPTO  	select CRYPTO_ALGAPI  	select CRYPTO_CRYPTD -	select CRYPTO_ABLK_HELPER_X86 +	select CRYPTO_ABLK_HELPER  	select CRYPTO_GLUE_HELPER_X86  	select CRYPTO_CAMELLIA_X86_64  	select CRYPTO_LRW @@ -901,7 +916,7 @@ config CRYPTO_CAMELLIA_AESNI_AVX2_X86_64  	depends on CRYPTO  	select CRYPTO_ALGAPI  	select CRYPTO_CRYPTD -	select CRYPTO_ABLK_HELPER_X86 +	select CRYPTO_ABLK_HELPER  	select CRYPTO_GLUE_HELPER_X86  	select CRYPTO_CAMELLIA_X86_64  	select CRYPTO_CAMELLIA_AESNI_AVX_X86_64 @@ -953,7 +968,7 @@ config CRYPTO_CAST5_AVX_X86_64  	depends on X86 && 64BIT  	select CRYPTO_ALGAPI  	select CRYPTO_CRYPTD -	select CRYPTO_ABLK_HELPER_X86 +	select CRYPTO_ABLK_HELPER  	select CRYPTO_CAST_COMMON  	select CRYPTO_CAST5  	help @@ -976,7 +991,7 @@ config CRYPTO_CAST6_AVX_X86_64  	depends on X86 && 64BIT  	select CRYPTO_ALGAPI  	select CRYPTO_CRYPTD -	select CRYPTO_ABLK_HELPER_X86 +	select CRYPTO_ABLK_HELPER  	select CRYPTO_GLUE_HELPER_X86  	select CRYPTO_CAST_COMMON  	select CRYPTO_CAST6 @@ -1094,7 +1109,7 @@ config CRYPTO_SERPENT_SSE2_X86_64  	depends on X86 && 64BIT  	select CRYPTO_ALGAPI  	select CRYPTO_CRYPTD -	select CRYPTO_ABLK_HELPER_X86 +	select CRYPTO_ABLK_HELPER  	select CRYPTO_GLUE_HELPER_X86  	select CRYPTO_SERPENT  	select CRYPTO_LRW @@ -1116,7 +1131,7 @@ config CRYPTO_SERPENT_SSE2_586  	depends on X86 && !64BIT  	select CRYPTO_ALGAPI  	select CRYPTO_CRYPTD -	select CRYPTO_ABLK_HELPER_X86 +	select CRYPTO_ABLK_HELPER  	select CRYPTO_GLUE_HELPER_X86  	select CRYPTO_SERPENT  	select CRYPTO_LRW @@ -1138,7 +1153,7 @@ config CRYPTO_SERPENT_AVX_X86_64  	depends on X86 && 64BIT  	select CRYPTO_ALGAPI  	select CRYPTO_CRYPTD -	select CRYPTO_ABLK_HELPER_X86 +	select CRYPTO_ABLK_HELPER  	select CRYPTO_GLUE_HELPER_X86  	select CRYPTO_SERPENT  	select CRYPTO_LRW @@ -1160,7 +1175,7 @@ config CRYPTO_SERPENT_AVX2_X86_64  	depends on X86 && 64BIT  	select CRYPTO_ALGAPI  	select CRYPTO_CRYPTD -	select CRYPTO_ABLK_HELPER_X86 +	select CRYPTO_ABLK_HELPER  	select CRYPTO_GLUE_HELPER_X86  	select CRYPTO_SERPENT  	select CRYPTO_SERPENT_AVX_X86_64 @@ -1276,7 +1291,7 @@ config CRYPTO_TWOFISH_AVX_X86_64  	depends on X86 && 64BIT  	select CRYPTO_ALGAPI  	select CRYPTO_CRYPTD -	select CRYPTO_ABLK_HELPER_X86 +	select CRYPTO_ABLK_HELPER  	select CRYPTO_GLUE_HELPER_X86  	select CRYPTO_TWOFISH_COMMON  	select CRYPTO_TWOFISH_X86_64 @@ -1386,6 +1401,9 @@ config CRYPTO_USER_API_SKCIPHER  	  This option enables the user-spaces interface for symmetric  	  key cipher algorithms. +config CRYPTO_HASH_INFO +	bool +  source "drivers/crypto/Kconfig"  source crypto/asymmetric_keys/Kconfig diff --git a/crypto/Makefile b/crypto/Makefile index 80019ba8da3..38e64231dcd 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -3,7 +3,7 @@  #  obj-$(CONFIG_CRYPTO) += crypto.o -crypto-y := api.o cipher.o compress.o +crypto-y := api.o cipher.o compress.o memneq.o  obj-$(CONFIG_CRYPTO_WORKQUEUE) += crypto_wq.o @@ -81,7 +81,7 @@ obj-$(CONFIG_CRYPTO_SALSA20) += salsa20_generic.o  obj-$(CONFIG_CRYPTO_DEFLATE) += deflate.o  obj-$(CONFIG_CRYPTO_ZLIB) += zlib.o  obj-$(CONFIG_CRYPTO_MICHAEL_MIC) += michael_mic.o -obj-$(CONFIG_CRYPTO_CRC32C) += crc32c.o +obj-$(CONFIG_CRYPTO_CRC32C) += crc32c_generic.o  obj-$(CONFIG_CRYPTO_CRC32) += crc32.o  obj-$(CONFIG_CRYPTO_CRCT10DIF) += crct10dif_common.o crct10dif_generic.o  obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o authencesn.o @@ -104,3 +104,5 @@ obj-$(CONFIG_CRYPTO_USER_API_SKCIPHER) += algif_skcipher.o  obj-$(CONFIG_XOR_BLOCKS) += xor.o  obj-$(CONFIG_ASYNC_CORE) += async_tx/  obj-$(CONFIG_ASYMMETRIC_KEY_TYPE) += asymmetric_keys/ +obj-$(CONFIG_CRYPTO_HASH_INFO) += hash_info.o +obj-$(CONFIG_CRYPTO_ABLK_HELPER) += ablk_helper.o diff --git a/crypto/ablk_helper.c b/crypto/ablk_helper.c new file mode 100644 index 00000000000..ffe7278d4bd --- /dev/null +++ b/crypto/ablk_helper.c @@ -0,0 +1,150 @@ +/* + * Shared async block cipher helpers + * + * Copyright (c) 2012 Jussi Kivilinna <jussi.kivilinna@mbnet.fi> + * + * Based on aesni-intel_glue.c by: + *  Copyright (C) 2008, Intel Corp. + *    Author: Huang Ying <ying.huang@intel.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 + * USA + * + */ + +#include <linux/kernel.h> +#include <linux/crypto.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/hardirq.h> +#include <crypto/algapi.h> +#include <crypto/cryptd.h> +#include <crypto/ablk_helper.h> +#include <asm/simd.h> + +int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key, +		 unsigned int key_len) +{ +	struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm); +	struct crypto_ablkcipher *child = &ctx->cryptd_tfm->base; +	int err; + +	crypto_ablkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); +	crypto_ablkcipher_set_flags(child, crypto_ablkcipher_get_flags(tfm) +				    & CRYPTO_TFM_REQ_MASK); +	err = crypto_ablkcipher_setkey(child, key, key_len); +	crypto_ablkcipher_set_flags(tfm, crypto_ablkcipher_get_flags(child) +				    & CRYPTO_TFM_RES_MASK); +	return err; +} +EXPORT_SYMBOL_GPL(ablk_set_key); + +int __ablk_encrypt(struct ablkcipher_request *req) +{ +	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); +	struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm); +	struct blkcipher_desc desc; + +	desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm); +	desc.info = req->info; +	desc.flags = 0; + +	return crypto_blkcipher_crt(desc.tfm)->encrypt( +		&desc, req->dst, req->src, req->nbytes); +} +EXPORT_SYMBOL_GPL(__ablk_encrypt); + +int ablk_encrypt(struct ablkcipher_request *req) +{ +	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); +	struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm); + +	if (!may_use_simd()) { +		struct ablkcipher_request *cryptd_req = +			ablkcipher_request_ctx(req); + +		*cryptd_req = *req; +		ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base); + +		return crypto_ablkcipher_encrypt(cryptd_req); +	} else { +		return __ablk_encrypt(req); +	} +} +EXPORT_SYMBOL_GPL(ablk_encrypt); + +int ablk_decrypt(struct ablkcipher_request *req) +{ +	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); +	struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm); + +	if (!may_use_simd()) { +		struct ablkcipher_request *cryptd_req = +			ablkcipher_request_ctx(req); + +		*cryptd_req = *req; +		ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base); + +		return crypto_ablkcipher_decrypt(cryptd_req); +	} else { +		struct blkcipher_desc desc; + +		desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm); +		desc.info = req->info; +		desc.flags = 0; + +		return crypto_blkcipher_crt(desc.tfm)->decrypt( +			&desc, req->dst, req->src, req->nbytes); +	} +} +EXPORT_SYMBOL_GPL(ablk_decrypt); + +void ablk_exit(struct crypto_tfm *tfm) +{ +	struct async_helper_ctx *ctx = crypto_tfm_ctx(tfm); + +	cryptd_free_ablkcipher(ctx->cryptd_tfm); +} +EXPORT_SYMBOL_GPL(ablk_exit); + +int ablk_init_common(struct crypto_tfm *tfm, const char *drv_name) +{ +	struct async_helper_ctx *ctx = crypto_tfm_ctx(tfm); +	struct cryptd_ablkcipher *cryptd_tfm; + +	cryptd_tfm = cryptd_alloc_ablkcipher(drv_name, 0, 0); +	if (IS_ERR(cryptd_tfm)) +		return PTR_ERR(cryptd_tfm); + +	ctx->cryptd_tfm = cryptd_tfm; +	tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request) + +		crypto_ablkcipher_reqsize(&cryptd_tfm->base); + +	return 0; +} +EXPORT_SYMBOL_GPL(ablk_init_common); + +int ablk_init(struct crypto_tfm *tfm) +{ +	char drv_name[CRYPTO_MAX_ALG_NAME]; + +	snprintf(drv_name, sizeof(drv_name), "__driver-%s", +					crypto_tfm_alg_driver_name(tfm)); + +	return ablk_init_common(tfm, drv_name); +} +EXPORT_SYMBOL_GPL(ablk_init); + +MODULE_LICENSE("GPL"); diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c index 7d4a8d28277..40886c48990 100644 --- a/crypto/ablkcipher.c +++ b/crypto/ablkcipher.c @@ -16,9 +16,7 @@  #include <crypto/internal/skcipher.h>  #include <linux/cpumask.h>  #include <linux/err.h> -#include <linux/init.h>  #include <linux/kernel.h> -#include <linux/module.h>  #include <linux/rtnetlink.h>  #include <linux/sched.h>  #include <linux/slab.h> @@ -30,8 +28,6 @@  #include "internal.h" -static const char *skcipher_default_geniv __read_mostly; -  struct ablkcipher_buffer {  	struct list_head	entry;  	struct scatter_walk	dst; @@ -527,8 +523,7 @@ const char *crypto_default_geniv(const struct crypto_alg *alg)  	    alg->cra_blocksize)  		return "chainiv"; -	return alg->cra_flags & CRYPTO_ALG_ASYNC ? -	       "eseqiv" : skcipher_default_geniv; +	return "eseqiv";  }  static int crypto_givcipher_default(struct crypto_alg *alg, u32 type, u32 mask) @@ -709,17 +704,3 @@ err:  	return ERR_PTR(err);  }  EXPORT_SYMBOL_GPL(crypto_alloc_ablkcipher); - -static int __init skcipher_module_init(void) -{ -	skcipher_default_geniv = num_possible_cpus() > 1 ? -				 "eseqiv" : "chainiv"; -	return 0; -} - -static void skcipher_module_exit(void) -{ -} - -module_init(skcipher_module_init); -module_exit(skcipher_module_exit); diff --git a/crypto/af_alg.c b/crypto/af_alg.c index ac33d5f3077..6a3ad801158 100644 --- a/crypto/af_alg.c +++ b/crypto/af_alg.c @@ -21,6 +21,7 @@  #include <linux/module.h>  #include <linux/net.h>  #include <linux/rwsem.h> +#include <linux/security.h>  struct alg_type_list {  	const struct af_alg_type *type; @@ -243,6 +244,7 @@ int af_alg_accept(struct sock *sk, struct socket *newsock)  	sock_init_data(newsock, sk2);  	sock_graft(sk2, newsock); +	security_sk_clone(sk, sk2);  	err = type->accept(ask->private, sk2);  	if (err) { @@ -434,7 +436,7 @@ int af_alg_wait_for_completion(int err, struct af_alg_completion *completion)  	case -EINPROGRESS:  	case -EBUSY:  		wait_for_completion(&completion->completion); -		INIT_COMPLETION(completion->completion); +		reinit_completion(&completion->completion);  		err = completion->err;  		break;  	}; diff --git a/crypto/ahash.c b/crypto/ahash.c index 793a27f2493..f2a5d8f656f 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -15,6 +15,7 @@  #include <crypto/internal/hash.h>  #include <crypto/scatterwalk.h> +#include <linux/bug.h>  #include <linux/err.h>  #include <linux/kernel.h>  #include <linux/module.h> @@ -46,7 +47,10 @@ static int hash_walk_next(struct crypto_hash_walk *walk)  	unsigned int nbytes = min(walk->entrylen,  				  ((unsigned int)(PAGE_SIZE)) - offset); -	walk->data = kmap_atomic(walk->pg); +	if (walk->flags & CRYPTO_ALG_ASYNC) +		walk->data = kmap(walk->pg); +	else +		walk->data = kmap_atomic(walk->pg);  	walk->data += offset;  	if (offset & alignmask) { @@ -93,8 +97,16 @@ int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)  		return nbytes;  	} -	kunmap_atomic(walk->data); -	crypto_yield(walk->flags); +	if (walk->flags & CRYPTO_ALG_ASYNC) +		kunmap(walk->pg); +	else { +		kunmap_atomic(walk->data); +		/* +		 * The may sleep test only makes sense for sync users. +		 * Async users don't need to sleep here anyway. +		 */ +		crypto_yield(walk->flags); +	}  	if (err)  		return err; @@ -124,12 +136,31 @@ int crypto_hash_walk_first(struct ahash_request *req,  	walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));  	walk->sg = req->src; -	walk->flags = req->base.flags; +	walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK;  	return hash_walk_new_entry(walk);  }  EXPORT_SYMBOL_GPL(crypto_hash_walk_first); +int crypto_ahash_walk_first(struct ahash_request *req, +			    struct crypto_hash_walk *walk) +{ +	walk->total = req->nbytes; + +	if (!walk->total) +		return 0; + +	walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req)); +	walk->sg = req->src; +	walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK; +	walk->flags |= CRYPTO_ALG_ASYNC; + +	BUILD_BUG_ON(CRYPTO_TFM_REQ_MASK & CRYPTO_ALG_ASYNC); + +	return hash_walk_new_entry(walk); +} +EXPORT_SYMBOL_GPL(crypto_ahash_walk_first); +  int crypto_hash_walk_first_compat(struct hash_desc *hdesc,  				  struct crypto_hash_walk *walk,  				  struct scatterlist *sg, unsigned int len) @@ -141,7 +172,7 @@ int crypto_hash_walk_first_compat(struct hash_desc *hdesc,  	walk->alignmask = crypto_hash_alignmask(hdesc->tfm);  	walk->sg = sg; -	walk->flags = hdesc->flags; +	walk->flags = hdesc->flags & CRYPTO_TFM_REQ_MASK;  	return hash_walk_new_entry(walk);  } @@ -190,6 +221,75 @@ static inline unsigned int ahash_align_buffer_size(unsigned len,  	return len + (mask & ~(crypto_tfm_ctx_alignment() - 1));  } +static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt) +{ +	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); +	unsigned long alignmask = crypto_ahash_alignmask(tfm); +	unsigned int ds = crypto_ahash_digestsize(tfm); +	struct ahash_request_priv *priv; + +	priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask), +		       (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? +		       GFP_KERNEL : GFP_ATOMIC); +	if (!priv) +		return -ENOMEM; + +	/* +	 * WARNING: Voodoo programming below! +	 * +	 * The code below is obscure and hard to understand, thus explanation +	 * is necessary. See include/crypto/hash.h and include/linux/crypto.h +	 * to understand the layout of structures used here! +	 * +	 * The code here will replace portions of the ORIGINAL request with +	 * pointers to new code and buffers so the hashing operation can store +	 * the result in aligned buffer. We will call the modified request +	 * an ADJUSTED request. +	 * +	 * The newly mangled request will look as such: +	 * +	 * req { +	 *   .result        = ADJUSTED[new aligned buffer] +	 *   .base.complete = ADJUSTED[pointer to completion function] +	 *   .base.data     = ADJUSTED[*req (pointer to self)] +	 *   .priv          = ADJUSTED[new priv] { +	 *           .result   = ORIGINAL(result) +	 *           .complete = ORIGINAL(base.complete) +	 *           .data     = ORIGINAL(base.data) +	 *   } +	 */ + +	priv->result = req->result; +	priv->complete = req->base.complete; +	priv->data = req->base.data; +	/* +	 * WARNING: We do not backup req->priv here! The req->priv +	 *          is for internal use of the Crypto API and the +	 *          user must _NOT_ _EVER_ depend on it's content! +	 */ + +	req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1); +	req->base.complete = cplt; +	req->base.data = req; +	req->priv = priv; + +	return 0; +} + +static void ahash_restore_req(struct ahash_request *req) +{ +	struct ahash_request_priv *priv = req->priv; + +	/* Restore the original crypto request. */ +	req->result = priv->result; +	req->base.complete = priv->complete; +	req->base.data = priv->data; +	req->priv = NULL; + +	/* Free the req->priv.priv from the ADJUSTED request. */ +	kzfree(priv); +} +  static void ahash_op_unaligned_finish(struct ahash_request *req, int err)  {  	struct ahash_request_priv *priv = req->priv; @@ -201,44 +301,37 @@ static void ahash_op_unaligned_finish(struct ahash_request *req, int err)  		memcpy(priv->result, req->result,  		       crypto_ahash_digestsize(crypto_ahash_reqtfm(req))); -	kzfree(priv); +	ahash_restore_req(req);  }  static void ahash_op_unaligned_done(struct crypto_async_request *req, int err)  {  	struct ahash_request *areq = req->data; -	struct ahash_request_priv *priv = areq->priv; -	crypto_completion_t complete = priv->complete; -	void *data = priv->data; +	/* +	 * Restore the original request, see ahash_op_unaligned() for what +	 * goes where. +	 * +	 * The "struct ahash_request *req" here is in fact the "req.base" +	 * from the ADJUSTED request from ahash_op_unaligned(), thus as it +	 * is a pointer to self, it is also the ADJUSTED "req" . +	 */ + +	/* First copy req->result into req->priv.result */  	ahash_op_unaligned_finish(areq, err); -	complete(data, err); +	/* Complete the ORIGINAL request. */ +	areq->base.complete(&areq->base, err);  }  static int ahash_op_unaligned(struct ahash_request *req,  			      int (*op)(struct ahash_request *))  { -	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); -	unsigned long alignmask = crypto_ahash_alignmask(tfm); -	unsigned int ds = crypto_ahash_digestsize(tfm); -	struct ahash_request_priv *priv;  	int err; -	priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask), -		       (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? -		       GFP_KERNEL : GFP_ATOMIC); -	if (!priv) -		return -ENOMEM; - -	priv->result = req->result; -	priv->complete = req->base.complete; -	priv->data = req->base.data; - -	req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1); -	req->base.complete = ahash_op_unaligned_done; -	req->base.data = req; -	req->priv = priv; +	err = ahash_save_req(req, ahash_op_unaligned_done); +	if (err) +		return err;  	err = op(req);  	ahash_op_unaligned_finish(req, err); @@ -287,19 +380,16 @@ static void ahash_def_finup_finish2(struct ahash_request *req, int err)  		memcpy(priv->result, req->result,  		       crypto_ahash_digestsize(crypto_ahash_reqtfm(req))); -	kzfree(priv); +	ahash_restore_req(req);  }  static void ahash_def_finup_done2(struct crypto_async_request *req, int err)  {  	struct ahash_request *areq = req->data; -	struct ahash_request_priv *priv = areq->priv; -	crypto_completion_t complete = priv->complete; -	void *data = priv->data;  	ahash_def_finup_finish2(areq, err); -	complete(data, err); +	areq->base.complete(&areq->base, err);  }  static int ahash_def_finup_finish1(struct ahash_request *req, int err) @@ -319,38 +409,23 @@ out:  static void ahash_def_finup_done1(struct crypto_async_request *req, int err)  {  	struct ahash_request *areq = req->data; -	struct ahash_request_priv *priv = areq->priv; -	crypto_completion_t complete = priv->complete; -	void *data = priv->data;  	err = ahash_def_finup_finish1(areq, err); -	complete(data, err); +	areq->base.complete(&areq->base, err);  }  static int ahash_def_finup(struct ahash_request *req)  {  	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); -	unsigned long alignmask = crypto_ahash_alignmask(tfm); -	unsigned int ds = crypto_ahash_digestsize(tfm); -	struct ahash_request_priv *priv; - -	priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask), -		       (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? -		       GFP_KERNEL : GFP_ATOMIC); -	if (!priv) -		return -ENOMEM; - -	priv->result = req->result; -	priv->complete = req->base.complete; -	priv->data = req->base.data; +	int err; -	req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1); -	req->base.complete = ahash_def_finup_done1; -	req->base.data = req; -	req->priv = priv; +	err = ahash_save_req(req, ahash_def_finup_done1); +	if (err) +		return err; -	return ahash_def_finup_finish1(req, tfm->update(req)); +	err = tfm->update(req); +	return ahash_def_finup_finish1(req, err);  }  static int ahash_no_export(struct ahash_request *req, void *out) diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c index 0262210cad3..850246206b1 100644 --- a/crypto/algif_hash.c +++ b/crypto/algif_hash.c @@ -114,6 +114,9 @@ static ssize_t hash_sendpage(struct socket *sock, struct page *page,  	struct hash_ctx *ctx = ask->private;  	int err; +	if (flags & MSG_SENDPAGE_NOTLAST) +		flags |= MSG_MORE; +  	lock_sock(sk);  	sg_init_table(ctx->sgl.sg, 1);  	sg_set_page(ctx->sgl.sg, page, size, offset); @@ -161,8 +164,6 @@ static int hash_recvmsg(struct kiocb *unused, struct socket *sock,  	else if (len < ds)  		msg->msg_flags |= MSG_TRUNC; -	msg->msg_namelen = 0; -  	lock_sock(sk);  	if (ctx->more) {  		ctx->more = 0; diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c index a1c4f0a5558..a19c027b29b 100644 --- a/crypto/algif_skcipher.c +++ b/crypto/algif_skcipher.c @@ -378,6 +378,9 @@ static ssize_t skcipher_sendpage(struct socket *sock, struct page *page,  	struct skcipher_sg_list *sgl;  	int err = -EINVAL; +	if (flags & MSG_SENDPAGE_NOTLAST) +		flags |= MSG_MORE; +  	lock_sock(sk);  	if (!ctx->more && ctx->used)  		goto unlock; @@ -432,7 +435,6 @@ static int skcipher_recvmsg(struct kiocb *unused, struct socket *sock,  	long copied = 0;  	lock_sock(sk); -	msg->msg_namelen = 0;  	for (iov = msg->msg_iov, iovlen = msg->msg_iovlen; iovlen > 0;  	     iovlen--, iov++) {  		unsigned long seglen = iov->iov_len; diff --git a/crypto/ansi_cprng.c b/crypto/ansi_cprng.c index c0bb3778f1a..666f1962a16 100644 --- a/crypto/ansi_cprng.c +++ b/crypto/ansi_cprng.c @@ -230,11 +230,11 @@ remainder:  	 */  	if (byte_count < DEFAULT_BLK_SZ) {  empty_rbuf: -		for (; ctx->rand_data_valid < DEFAULT_BLK_SZ; -			ctx->rand_data_valid++) { +		while (ctx->rand_data_valid < DEFAULT_BLK_SZ) {  			*ptr = ctx->rand_data[ctx->rand_data_valid];  			ptr++;  			byte_count--; +			ctx->rand_data_valid++;  			if (byte_count == 0)  				goto done;  		} diff --git a/crypto/asymmetric_keys/Kconfig b/crypto/asymmetric_keys/Kconfig index 6d2c2ea1255..03a6eb95ab5 100644 --- a/crypto/asymmetric_keys/Kconfig +++ b/crypto/asymmetric_keys/Kconfig @@ -12,6 +12,8 @@ if ASYMMETRIC_KEY_TYPE  config ASYMMETRIC_PUBLIC_KEY_SUBTYPE  	tristate "Asymmetric public-key crypto algorithm subtype"  	select MPILIB +	select PUBLIC_KEY_ALGO_RSA +	select CRYPTO_HASH_INFO  	help  	  This option provides support for asymmetric public key type handling.  	  If signature generation and/or verification are to be used, @@ -20,8 +22,8 @@ config ASYMMETRIC_PUBLIC_KEY_SUBTYPE  config PUBLIC_KEY_ALGO_RSA  	tristate "RSA public-key algorithm" -	depends on ASYMMETRIC_PUBLIC_KEY_SUBTYPE  	select MPILIB_EXTRA +	select MPILIB  	help  	  This option enables support for the RSA algorithm (PKCS#1, RFC3447). diff --git a/crypto/asymmetric_keys/asymmetric_type.c b/crypto/asymmetric_keys/asymmetric_type.c index cf807654d22..b77eb530478 100644 --- a/crypto/asymmetric_keys/asymmetric_type.c +++ b/crypto/asymmetric_keys/asymmetric_type.c @@ -209,6 +209,7 @@ struct key_type key_type_asymmetric = {  	.match		= asymmetric_key_match,  	.destroy	= asymmetric_key_destroy,  	.describe	= asymmetric_key_describe, +	.def_lookup_type = KEYRING_SEARCH_LOOKUP_ITERATE,  };  EXPORT_SYMBOL_GPL(key_type_asymmetric); diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c index cb2e29180a8..97eb001960b 100644 --- a/crypto/asymmetric_keys/public_key.c +++ b/crypto/asymmetric_keys/public_key.c @@ -22,29 +22,25 @@  MODULE_LICENSE("GPL"); -const char *const pkey_algo[PKEY_ALGO__LAST] = { +const char *const pkey_algo_name[PKEY_ALGO__LAST] = {  	[PKEY_ALGO_DSA]		= "DSA",  	[PKEY_ALGO_RSA]		= "RSA",  }; -EXPORT_SYMBOL_GPL(pkey_algo); +EXPORT_SYMBOL_GPL(pkey_algo_name); -const char *const pkey_hash_algo[PKEY_HASH__LAST] = { -	[PKEY_HASH_MD4]		= "md4", -	[PKEY_HASH_MD5]		= "md5", -	[PKEY_HASH_SHA1]	= "sha1", -	[PKEY_HASH_RIPE_MD_160]	= "rmd160", -	[PKEY_HASH_SHA256]	= "sha256", -	[PKEY_HASH_SHA384]	= "sha384", -	[PKEY_HASH_SHA512]	= "sha512", -	[PKEY_HASH_SHA224]	= "sha224", +const struct public_key_algorithm *pkey_algo[PKEY_ALGO__LAST] = { +#if defined(CONFIG_PUBLIC_KEY_ALGO_RSA) || \ +	defined(CONFIG_PUBLIC_KEY_ALGO_RSA_MODULE) +	[PKEY_ALGO_RSA]		= &RSA_public_key_algorithm, +#endif  }; -EXPORT_SYMBOL_GPL(pkey_hash_algo); +EXPORT_SYMBOL_GPL(pkey_algo); -const char *const pkey_id_type[PKEY_ID_TYPE__LAST] = { +const char *const pkey_id_type_name[PKEY_ID_TYPE__LAST] = {  	[PKEY_ID_PGP]		= "PGP",  	[PKEY_ID_X509]		= "X509",  }; -EXPORT_SYMBOL_GPL(pkey_id_type); +EXPORT_SYMBOL_GPL(pkey_id_type_name);  /*   * Provide a part of a description of the key for /proc/keys. @@ -56,7 +52,7 @@ static void public_key_describe(const struct key *asymmetric_key,  	if (key)  		seq_printf(m, "%s.%s", -			   pkey_id_type[key->id_type], key->algo->name); +			   pkey_id_type_name[key->id_type], key->algo->name);  }  /* @@ -78,21 +74,45 @@ EXPORT_SYMBOL_GPL(public_key_destroy);  /*   * Verify a signature using a public key.   */ -static int public_key_verify_signature(const struct key *key, -				       const struct public_key_signature *sig) +int public_key_verify_signature(const struct public_key *pk, +				const struct public_key_signature *sig)  { -	const struct public_key *pk = key->payload.data; +	const struct public_key_algorithm *algo; + +	BUG_ON(!pk); +	BUG_ON(!pk->mpi[0]); +	BUG_ON(!pk->mpi[1]); +	BUG_ON(!sig); +	BUG_ON(!sig->digest); +	BUG_ON(!sig->mpi[0]); + +	algo = pk->algo; +	if (!algo) { +		if (pk->pkey_algo >= PKEY_ALGO__LAST) +			return -ENOPKG; +		algo = pkey_algo[pk->pkey_algo]; +		if (!algo) +			return -ENOPKG; +	} -	if (!pk->algo->verify_signature) +	if (!algo->verify_signature)  		return -ENOTSUPP; -	if (sig->nr_mpi != pk->algo->n_sig_mpi) { +	if (sig->nr_mpi != algo->n_sig_mpi) {  		pr_debug("Signature has %u MPI not %u\n", -			 sig->nr_mpi, pk->algo->n_sig_mpi); +			 sig->nr_mpi, algo->n_sig_mpi);  		return -EINVAL;  	} -	return pk->algo->verify_signature(pk, sig); +	return algo->verify_signature(pk, sig); +} +EXPORT_SYMBOL_GPL(public_key_verify_signature); + +static int public_key_verify_signature_2(const struct key *key, +					 const struct public_key_signature *sig) +{ +	const struct public_key *pk = key->payload.data; +	return public_key_verify_signature(pk, sig);  }  /* @@ -103,6 +123,6 @@ struct asymmetric_key_subtype public_key_subtype = {  	.name			= "public_key",  	.describe		= public_key_describe,  	.destroy		= public_key_destroy, -	.verify_signature	= public_key_verify_signature, +	.verify_signature	= public_key_verify_signature_2,  };  EXPORT_SYMBOL_GPL(public_key_subtype); diff --git a/crypto/asymmetric_keys/public_key.h b/crypto/asymmetric_keys/public_key.h index 5e5e3562689..5c37a22a063 100644 --- a/crypto/asymmetric_keys/public_key.h +++ b/crypto/asymmetric_keys/public_key.h @@ -28,3 +28,9 @@ struct public_key_algorithm {  };  extern const struct public_key_algorithm RSA_public_key_algorithm; + +/* + * public_key.c + */ +extern int public_key_verify_signature(const struct public_key *pk, +				       const struct public_key_signature *sig); diff --git a/crypto/asymmetric_keys/rsa.c b/crypto/asymmetric_keys/rsa.c index 4a6a0696f8a..459cf97a75e 100644 --- a/crypto/asymmetric_keys/rsa.c +++ b/crypto/asymmetric_keys/rsa.c @@ -13,6 +13,7 @@  #include <linux/module.h>  #include <linux/kernel.h>  #include <linux/slab.h> +#include <crypto/algapi.h>  #include "public_key.h"  MODULE_LICENSE("GPL"); @@ -73,13 +74,13 @@ static const struct {  	size_t size;  } RSA_ASN1_templates[PKEY_HASH__LAST] = {  #define _(X) { RSA_digest_info_##X, sizeof(RSA_digest_info_##X) } -	[PKEY_HASH_MD5]		= _(MD5), -	[PKEY_HASH_SHA1]	= _(SHA1), -	[PKEY_HASH_RIPE_MD_160]	= _(RIPE_MD_160), -	[PKEY_HASH_SHA256]	= _(SHA256), -	[PKEY_HASH_SHA384]	= _(SHA384), -	[PKEY_HASH_SHA512]	= _(SHA512), -	[PKEY_HASH_SHA224]	= _(SHA224), +	[HASH_ALGO_MD5]		= _(MD5), +	[HASH_ALGO_SHA1]	= _(SHA1), +	[HASH_ALGO_RIPE_MD_160]	= _(RIPE_MD_160), +	[HASH_ALGO_SHA256]	= _(SHA256), +	[HASH_ALGO_SHA384]	= _(SHA384), +	[HASH_ALGO_SHA512]	= _(SHA512), +	[HASH_ALGO_SHA224]	= _(SHA224),  #undef _  }; @@ -189,12 +190,12 @@ static int RSA_verify(const u8 *H, const u8 *EM, size_t k, size_t hash_size,  		}  	} -	if (memcmp(asn1_template, EM + T_offset, asn1_size) != 0) { +	if (crypto_memneq(asn1_template, EM + T_offset, asn1_size) != 0) {  		kleave(" = -EBADMSG [EM[T] ASN.1 mismatch]");  		return -EBADMSG;  	} -	if (memcmp(H, EM + T_offset + asn1_size, hash_size) != 0) { +	if (crypto_memneq(H, EM + T_offset + asn1_size, hash_size) != 0) {  		kleave(" = -EKEYREJECTED [EM[T] hash mismatch]");  		return -EKEYREJECTED;  	} diff --git a/crypto/asymmetric_keys/x509_cert_parser.c b/crypto/asymmetric_keys/x509_cert_parser.c index facbf26bc6b..29893162497 100644 --- a/crypto/asymmetric_keys/x509_cert_parser.c +++ b/crypto/asymmetric_keys/x509_cert_parser.c @@ -47,6 +47,8 @@ void x509_free_certificate(struct x509_certificate *cert)  		kfree(cert->subject);  		kfree(cert->fingerprint);  		kfree(cert->authority); +		kfree(cert->sig.digest); +		mpi_free(cert->sig.rsa.s);  		kfree(cert);  	}  } @@ -152,33 +154,33 @@ int x509_note_pkey_algo(void *context, size_t hdrlen,  		return -ENOPKG; /* Unsupported combination */  	case OID_md4WithRSAEncryption: -		ctx->cert->sig_hash_algo = PKEY_HASH_MD5; -		ctx->cert->sig_pkey_algo = PKEY_ALGO_RSA; +		ctx->cert->sig.pkey_hash_algo = HASH_ALGO_MD5; +		ctx->cert->sig.pkey_algo = PKEY_ALGO_RSA;  		break;  	case OID_sha1WithRSAEncryption: -		ctx->cert->sig_hash_algo = PKEY_HASH_SHA1; -		ctx->cert->sig_pkey_algo = PKEY_ALGO_RSA; +		ctx->cert->sig.pkey_hash_algo = HASH_ALGO_SHA1; +		ctx->cert->sig.pkey_algo = PKEY_ALGO_RSA;  		break;  	case OID_sha256WithRSAEncryption: -		ctx->cert->sig_hash_algo = PKEY_HASH_SHA256; -		ctx->cert->sig_pkey_algo = PKEY_ALGO_RSA; +		ctx->cert->sig.pkey_hash_algo = HASH_ALGO_SHA256; +		ctx->cert->sig.pkey_algo = PKEY_ALGO_RSA;  		break;  	case OID_sha384WithRSAEncryption: -		ctx->cert->sig_hash_algo = PKEY_HASH_SHA384; -		ctx->cert->sig_pkey_algo = PKEY_ALGO_RSA; +		ctx->cert->sig.pkey_hash_algo = HASH_ALGO_SHA384; +		ctx->cert->sig.pkey_algo = PKEY_ALGO_RSA;  		break;  	case OID_sha512WithRSAEncryption: -		ctx->cert->sig_hash_algo = PKEY_HASH_SHA512; -		ctx->cert->sig_pkey_algo = PKEY_ALGO_RSA; +		ctx->cert->sig.pkey_hash_algo = HASH_ALGO_SHA512; +		ctx->cert->sig.pkey_algo = PKEY_ALGO_RSA;  		break;  	case OID_sha224WithRSAEncryption: -		ctx->cert->sig_hash_algo = PKEY_HASH_SHA224; -		ctx->cert->sig_pkey_algo = PKEY_ALGO_RSA; +		ctx->cert->sig.pkey_hash_algo = HASH_ALGO_SHA224; +		ctx->cert->sig.pkey_algo = PKEY_ALGO_RSA;  		break;  	} @@ -203,8 +205,8 @@ int x509_note_signature(void *context, size_t hdrlen,  		return -EINVAL;  	} -	ctx->cert->sig = value; -	ctx->cert->sig_size = vlen; +	ctx->cert->raw_sig = value; +	ctx->cert->raw_sig_size = vlen;  	return 0;  } @@ -343,8 +345,9 @@ int x509_extract_key_data(void *context, size_t hdrlen,  	if (ctx->last_oid != OID_rsaEncryption)  		return -ENOPKG; -	/* There seems to be an extraneous 0 byte on the front of the data */ -	ctx->cert->pkey_algo = PKEY_ALGO_RSA; +	ctx->cert->pub->pkey_algo = PKEY_ALGO_RSA; + +	/* Discard the BIT STRING metadata */  	ctx->key = value + 1;  	ctx->key_size = vlen - 1;  	return 0; diff --git a/crypto/asymmetric_keys/x509_parser.h b/crypto/asymmetric_keys/x509_parser.h index f86dc5fcc4a..87d9cc26f63 100644 --- a/crypto/asymmetric_keys/x509_parser.h +++ b/crypto/asymmetric_keys/x509_parser.h @@ -9,6 +9,7 @@   * 2 of the Licence, or (at your option) any later version.   */ +#include <linux/time.h>  #include <crypto/public_key.h>  struct x509_certificate { @@ -20,13 +21,11 @@ struct x509_certificate {  	char		*authority;		/* Authority key fingerprint as hex */  	struct tm	valid_from;  	struct tm	valid_to; -	enum pkey_algo	pkey_algo : 8;		/* Public key algorithm */ -	enum pkey_algo	sig_pkey_algo : 8;	/* Signature public key algorithm */ -	enum pkey_hash_algo sig_hash_algo : 8;	/* Signature hash algorithm */  	const void	*tbs;			/* Signed data */ -	size_t		tbs_size;		/* Size of signed data */ -	const void	*sig;			/* Signature data */ -	size_t		sig_size;		/* Size of sigature */ +	unsigned	tbs_size;		/* Size of signed data */ +	unsigned	raw_sig_size;		/* Size of sigature */ +	const void	*raw_sig;		/* Signature data */ +	struct public_key_signature sig;	/* Signature parameters */  };  /* @@ -34,3 +33,10 @@ struct x509_certificate {   */  extern void x509_free_certificate(struct x509_certificate *cert);  extern struct x509_certificate *x509_cert_parse(const void *data, size_t datalen); + +/* + * x509_public_key.c + */ +extern int x509_get_sig_params(struct x509_certificate *cert); +extern int x509_check_signature(const struct public_key *pub, +				struct x509_certificate *cert); diff --git a/crypto/asymmetric_keys/x509_public_key.c b/crypto/asymmetric_keys/x509_public_key.c index 06007f0e880..382ef0d2ff2 100644 --- a/crypto/asymmetric_keys/x509_public_key.c +++ b/crypto/asymmetric_keys/x509_public_key.c @@ -23,82 +23,84 @@  #include "public_key.h"  #include "x509_parser.h" -static const -struct public_key_algorithm *x509_public_key_algorithms[PKEY_ALGO__LAST] = { -	[PKEY_ALGO_DSA]		= NULL, -#if defined(CONFIG_PUBLIC_KEY_ALGO_RSA) || \ -	defined(CONFIG_PUBLIC_KEY_ALGO_RSA_MODULE) -	[PKEY_ALGO_RSA]		= &RSA_public_key_algorithm, -#endif -}; -  /* - * Check the signature on a certificate using the provided public key + * Set up the signature parameters in an X.509 certificate.  This involves + * digesting the signed data and extracting the signature.   */ -static int x509_check_signature(const struct public_key *pub, -				const struct x509_certificate *cert) +int x509_get_sig_params(struct x509_certificate *cert)  { -	struct public_key_signature *sig;  	struct crypto_shash *tfm;  	struct shash_desc *desc;  	size_t digest_size, desc_size; +	void *digest;  	int ret;  	pr_devel("==>%s()\n", __func__); -	 + +	if (cert->sig.rsa.s) +		return 0; + +	cert->sig.rsa.s = mpi_read_raw_data(cert->raw_sig, cert->raw_sig_size); +	if (!cert->sig.rsa.s) +		return -ENOMEM; +	cert->sig.nr_mpi = 1; +  	/* Allocate the hashing algorithm we're going to need and find out how  	 * big the hash operational data will be.  	 */ -	tfm = crypto_alloc_shash(pkey_hash_algo[cert->sig_hash_algo], 0, 0); +	tfm = crypto_alloc_shash(hash_algo_name[cert->sig.pkey_hash_algo], 0, 0);  	if (IS_ERR(tfm))  		return (PTR_ERR(tfm) == -ENOENT) ? -ENOPKG : PTR_ERR(tfm);  	desc_size = crypto_shash_descsize(tfm) + sizeof(*desc);  	digest_size = crypto_shash_digestsize(tfm); -	/* We allocate the hash operational data storage on the end of our -	 * context data. +	/* We allocate the hash operational data storage on the end of the +	 * digest storage space.  	 */  	ret = -ENOMEM; -	sig = kzalloc(sizeof(*sig) + desc_size + digest_size, GFP_KERNEL); -	if (!sig) -		goto error_no_sig; +	digest = kzalloc(digest_size + desc_size, GFP_KERNEL); +	if (!digest) +		goto error; -	sig->pkey_hash_algo	= cert->sig_hash_algo; -	sig->digest		= (u8 *)sig + sizeof(*sig) + desc_size; -	sig->digest_size	= digest_size; +	cert->sig.digest = digest; +	cert->sig.digest_size = digest_size; -	desc = (void *)sig + sizeof(*sig); -	desc->tfm	= tfm; -	desc->flags	= CRYPTO_TFM_REQ_MAY_SLEEP; +	desc = digest + digest_size; +	desc->tfm = tfm; +	desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;  	ret = crypto_shash_init(desc);  	if (ret < 0)  		goto error; +	might_sleep(); +	ret = crypto_shash_finup(desc, cert->tbs, cert->tbs_size, digest); +error: +	crypto_free_shash(tfm); +	pr_devel("<==%s() = %d\n", __func__, ret); +	return ret; +} +EXPORT_SYMBOL_GPL(x509_get_sig_params); -	ret = -ENOMEM; -	sig->rsa.s = mpi_read_raw_data(cert->sig, cert->sig_size); -	if (!sig->rsa.s) -		goto error; +/* + * Check the signature on a certificate using the provided public key + */ +int x509_check_signature(const struct public_key *pub, +			 struct x509_certificate *cert) +{ +	int ret; -	ret = crypto_shash_finup(desc, cert->tbs, cert->tbs_size, sig->digest); -	if (ret < 0) -		goto error_mpi; +	pr_devel("==>%s()\n", __func__); -	ret = pub->algo->verify_signature(pub, sig); +	ret = x509_get_sig_params(cert); +	if (ret < 0) +		return ret; +	ret = public_key_verify_signature(pub, &cert->sig);  	pr_debug("Cert Verification: %d\n", ret); - -error_mpi: -	mpi_free(sig->rsa.s); -error: -	kfree(sig); -error_no_sig: -	crypto_free_shash(tfm); - -	pr_devel("<==%s() = %d\n", __func__, ret);  	return ret;  } +EXPORT_SYMBOL_GPL(x509_check_signature);  /*   * Attempt to parse a data blob for a key as an X509 certificate. @@ -106,7 +108,6 @@ error_no_sig:  static int x509_key_preparse(struct key_preparsed_payload *prep)  {  	struct x509_certificate *cert; -	struct tm now;  	size_t srlen, sulen;  	char *desc = NULL;  	int ret; @@ -117,7 +118,18 @@ static int x509_key_preparse(struct key_preparsed_payload *prep)  	pr_devel("Cert Issuer: %s\n", cert->issuer);  	pr_devel("Cert Subject: %s\n", cert->subject); -	pr_devel("Cert Key Algo: %s\n", pkey_algo[cert->pkey_algo]); + +	if (cert->pub->pkey_algo >= PKEY_ALGO__LAST || +	    cert->sig.pkey_algo >= PKEY_ALGO__LAST || +	    cert->sig.pkey_hash_algo >= PKEY_HASH__LAST || +	    !pkey_algo[cert->pub->pkey_algo] || +	    !pkey_algo[cert->sig.pkey_algo] || +	    !hash_algo_name[cert->sig.pkey_hash_algo]) { +		ret = -ENOPKG; +		goto error_free_cert; +	} + +	pr_devel("Cert Key Algo: %s\n", pkey_algo_name[cert->pub->pkey_algo]);  	pr_devel("Cert Valid From: %04ld-%02d-%02d %02d:%02d:%02d\n",  		 cert->valid_from.tm_year + 1900, cert->valid_from.tm_mon + 1,  		 cert->valid_from.tm_mday, cert->valid_from.tm_hour, @@ -127,58 +139,22 @@ static int x509_key_preparse(struct key_preparsed_payload *prep)  		 cert->valid_to.tm_mday, cert->valid_to.tm_hour,  		 cert->valid_to.tm_min,  cert->valid_to.tm_sec);  	pr_devel("Cert Signature: %s + %s\n", -		 pkey_algo[cert->sig_pkey_algo], -		 pkey_hash_algo[cert->sig_hash_algo]); +		 pkey_algo_name[cert->sig.pkey_algo], +		 hash_algo_name[cert->sig.pkey_hash_algo]); -	if (!cert->fingerprint || !cert->authority) { -		pr_warn("Cert for '%s' must have SubjKeyId and AuthKeyId extensions\n", +	if (!cert->fingerprint) { +		pr_warn("Cert for '%s' must have a SubjKeyId extension\n",  			cert->subject);  		ret = -EKEYREJECTED;  		goto error_free_cert;  	} -	time_to_tm(CURRENT_TIME.tv_sec, 0, &now); -	pr_devel("Now: %04ld-%02d-%02d %02d:%02d:%02d\n", -		 now.tm_year + 1900, now.tm_mon + 1, now.tm_mday, -		 now.tm_hour, now.tm_min,  now.tm_sec); -	if (now.tm_year < cert->valid_from.tm_year || -	    (now.tm_year == cert->valid_from.tm_year && -	     (now.tm_mon < cert->valid_from.tm_mon || -	      (now.tm_mon == cert->valid_from.tm_mon && -	       (now.tm_mday < cert->valid_from.tm_mday || -		(now.tm_mday == cert->valid_from.tm_mday && -		 (now.tm_hour < cert->valid_from.tm_hour || -		  (now.tm_hour == cert->valid_from.tm_hour && -		   (now.tm_min < cert->valid_from.tm_min || -		    (now.tm_min == cert->valid_from.tm_min && -		     (now.tm_sec < cert->valid_from.tm_sec -		      ))))))))))) { -		pr_warn("Cert %s is not yet valid\n", cert->fingerprint); -		ret = -EKEYREJECTED; -		goto error_free_cert; -	} -	if (now.tm_year > cert->valid_to.tm_year || -	    (now.tm_year == cert->valid_to.tm_year && -	     (now.tm_mon > cert->valid_to.tm_mon || -	      (now.tm_mon == cert->valid_to.tm_mon && -	       (now.tm_mday > cert->valid_to.tm_mday || -		(now.tm_mday == cert->valid_to.tm_mday && -		 (now.tm_hour > cert->valid_to.tm_hour || -		  (now.tm_hour == cert->valid_to.tm_hour && -		   (now.tm_min > cert->valid_to.tm_min || -		    (now.tm_min == cert->valid_to.tm_min && -		     (now.tm_sec > cert->valid_to.tm_sec -		      ))))))))))) { -		pr_warn("Cert %s has expired\n", cert->fingerprint); -		ret = -EKEYEXPIRED; -		goto error_free_cert; -	} - -	cert->pub->algo = x509_public_key_algorithms[cert->pkey_algo]; +	cert->pub->algo = pkey_algo[cert->pub->pkey_algo];  	cert->pub->id_type = PKEY_ID_X509; -	/* Check the signature on the key */ -	if (strcmp(cert->fingerprint, cert->authority) == 0) { +	/* Check the signature on the key if it appears to be self-signed */ +	if (!cert->authority || +	    strcmp(cert->fingerprint, cert->authority) == 0) {  		ret = x509_check_signature(cert->pub, cert);  		if (ret < 0)  			goto error_free_cert; @@ -237,3 +213,6 @@ static void __exit x509_key_exit(void)  module_init(x509_key_init);  module_exit(x509_key_exit); + +MODULE_DESCRIPTION("X.509 certificate parser"); +MODULE_LICENSE("GPL"); diff --git a/crypto/async_tx/async_memcpy.c b/crypto/async_tx/async_memcpy.c index 9e62feffb37..f8c0b8dbeb7 100644 --- a/crypto/async_tx/async_memcpy.c +++ b/crypto/async_tx/async_memcpy.c @@ -50,33 +50,36 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,  						      &dest, 1, &src, 1, len);  	struct dma_device *device = chan ? chan->device : NULL;  	struct dma_async_tx_descriptor *tx = NULL; +	struct dmaengine_unmap_data *unmap = NULL; -	if (device && is_dma_copy_aligned(device, src_offset, dest_offset, len)) { -		dma_addr_t dma_dest, dma_src; +	if (device) +		unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOIO); + +	if (unmap && is_dma_copy_aligned(device, src_offset, dest_offset, len)) {  		unsigned long dma_prep_flags = 0;  		if (submit->cb_fn)  			dma_prep_flags |= DMA_PREP_INTERRUPT;  		if (submit->flags & ASYNC_TX_FENCE)  			dma_prep_flags |= DMA_PREP_FENCE; -		dma_dest = dma_map_page(device->dev, dest, dest_offset, len, -					DMA_FROM_DEVICE); - -		dma_src = dma_map_page(device->dev, src, src_offset, len, -				       DMA_TO_DEVICE); - -		tx = device->device_prep_dma_memcpy(chan, dma_dest, dma_src, -						    len, dma_prep_flags); -		if (!tx) { -			dma_unmap_page(device->dev, dma_dest, len, -				       DMA_FROM_DEVICE); -			dma_unmap_page(device->dev, dma_src, len, -				       DMA_TO_DEVICE); -		} + +		unmap->to_cnt = 1; +		unmap->addr[0] = dma_map_page(device->dev, src, src_offset, len, +					      DMA_TO_DEVICE); +		unmap->from_cnt = 1; +		unmap->addr[1] = dma_map_page(device->dev, dest, dest_offset, len, +					      DMA_FROM_DEVICE); +		unmap->len = len; + +		tx = device->device_prep_dma_memcpy(chan, unmap->addr[1], +						    unmap->addr[0], len, +						    dma_prep_flags);  	}  	if (tx) {  		pr_debug("%s: (async) len: %zu\n", __func__, len); + +		dma_set_unmap(tx, unmap);  		async_tx_submit(chan, tx, submit);  	} else {  		void *dest_buf, *src_buf; @@ -96,6 +99,8 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,  		async_tx_sync_epilog(submit);  	} +	dmaengine_unmap_put(unmap); +  	return tx;  }  EXPORT_SYMBOL_GPL(async_memcpy); diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c index 91d5d385899..d05327caf69 100644 --- a/crypto/async_tx/async_pq.c +++ b/crypto/async_tx/async_pq.c @@ -46,49 +46,24 @@ static struct page *pq_scribble_page;   * do_async_gen_syndrome - asynchronously calculate P and/or Q   */  static __async_inline struct dma_async_tx_descriptor * -do_async_gen_syndrome(struct dma_chan *chan, struct page **blocks, -		      const unsigned char *scfs, unsigned int offset, int disks, -		      size_t len, dma_addr_t *dma_src, +do_async_gen_syndrome(struct dma_chan *chan, +		      const unsigned char *scfs, int disks, +		      struct dmaengine_unmap_data *unmap, +		      enum dma_ctrl_flags dma_flags,  		      struct async_submit_ctl *submit)  {  	struct dma_async_tx_descriptor *tx = NULL;  	struct dma_device *dma = chan->device; -	enum dma_ctrl_flags dma_flags = 0;  	enum async_tx_flags flags_orig = submit->flags;  	dma_async_tx_callback cb_fn_orig = submit->cb_fn;  	dma_async_tx_callback cb_param_orig = submit->cb_param;  	int src_cnt = disks - 2; -	unsigned char coefs[src_cnt];  	unsigned short pq_src_cnt;  	dma_addr_t dma_dest[2];  	int src_off = 0; -	int idx; -	int i; -	/* DMAs use destinations as sources, so use BIDIRECTIONAL mapping */ -	if (P(blocks, disks)) -		dma_dest[0] = dma_map_page(dma->dev, P(blocks, disks), offset, -					   len, DMA_BIDIRECTIONAL); -	else -		dma_flags |= DMA_PREP_PQ_DISABLE_P; -	if (Q(blocks, disks)) -		dma_dest[1] = dma_map_page(dma->dev, Q(blocks, disks), offset, -					   len, DMA_BIDIRECTIONAL); -	else -		dma_flags |= DMA_PREP_PQ_DISABLE_Q; - -	/* convert source addresses being careful to collapse 'empty' -	 * sources and update the coefficients accordingly -	 */ -	for (i = 0, idx = 0; i < src_cnt; i++) { -		if (blocks[i] == NULL) -			continue; -		dma_src[idx] = dma_map_page(dma->dev, blocks[i], offset, len, -					    DMA_TO_DEVICE); -		coefs[idx] = scfs[i]; -		idx++; -	} -	src_cnt = idx; +	if (submit->flags & ASYNC_TX_FENCE) +		dma_flags |= DMA_PREP_FENCE;  	while (src_cnt > 0) {  		submit->flags = flags_orig; @@ -100,28 +75,25 @@ do_async_gen_syndrome(struct dma_chan *chan, struct page **blocks,  		if (src_cnt > pq_src_cnt) {  			submit->flags &= ~ASYNC_TX_ACK;  			submit->flags |= ASYNC_TX_FENCE; -			dma_flags |= DMA_COMPL_SKIP_DEST_UNMAP;  			submit->cb_fn = NULL;  			submit->cb_param = NULL;  		} else { -			dma_flags &= ~DMA_COMPL_SKIP_DEST_UNMAP;  			submit->cb_fn = cb_fn_orig;  			submit->cb_param = cb_param_orig;  			if (cb_fn_orig)  				dma_flags |= DMA_PREP_INTERRUPT;  		} -		if (submit->flags & ASYNC_TX_FENCE) -			dma_flags |= DMA_PREP_FENCE; -		/* Since we have clobbered the src_list we are committed -		 * to doing this asynchronously.  Drivers force forward -		 * progress in case they can not provide a descriptor +		/* Drivers force forward progress in case they can not provide +		 * a descriptor  		 */  		for (;;) { +			dma_dest[0] = unmap->addr[disks - 2]; +			dma_dest[1] = unmap->addr[disks - 1];  			tx = dma->device_prep_dma_pq(chan, dma_dest, -						     &dma_src[src_off], +						     &unmap->addr[src_off],  						     pq_src_cnt, -						     &coefs[src_off], len, +						     &scfs[src_off], unmap->len,  						     dma_flags);  			if (likely(tx))  				break; @@ -129,6 +101,7 @@ do_async_gen_syndrome(struct dma_chan *chan, struct page **blocks,  			dma_async_issue_pending(chan);  		} +		dma_set_unmap(tx, unmap);  		async_tx_submit(chan, tx, submit);  		submit->depend_tx = tx; @@ -188,10 +161,6 @@ do_sync_gen_syndrome(struct page **blocks, unsigned int offset, int disks,   * set to NULL those buffers will be replaced with the raid6_zero_page   * in the synchronous path and omitted in the hardware-asynchronous   * path. - * - * 'blocks' note: if submit->scribble is NULL then the contents of - * 'blocks' may be overwritten to perform address conversions - * (dma_map_page() or page_address()).   */  struct dma_async_tx_descriptor *  async_gen_syndrome(struct page **blocks, unsigned int offset, int disks, @@ -202,26 +171,69 @@ async_gen_syndrome(struct page **blocks, unsigned int offset, int disks,  						      &P(blocks, disks), 2,  						      blocks, src_cnt, len);  	struct dma_device *device = chan ? chan->device : NULL; -	dma_addr_t *dma_src = NULL; +	struct dmaengine_unmap_data *unmap = NULL;  	BUG_ON(disks > 255 || !(P(blocks, disks) || Q(blocks, disks))); -	if (submit->scribble) -		dma_src = submit->scribble; -	else if (sizeof(dma_addr_t) <= sizeof(struct page *)) -		dma_src = (dma_addr_t *) blocks; +	if (device) +		unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOIO); -	if (dma_src && device && +	if (unmap &&  	    (src_cnt <= dma_maxpq(device, 0) ||  	     dma_maxpq(device, DMA_PREP_CONTINUE) > 0) &&  	    is_dma_pq_aligned(device, offset, 0, len)) { +		struct dma_async_tx_descriptor *tx; +		enum dma_ctrl_flags dma_flags = 0; +		unsigned char coefs[src_cnt]; +		int i, j; +  		/* run the p+q asynchronously */  		pr_debug("%s: (async) disks: %d len: %zu\n",  			 __func__, disks, len); -		return do_async_gen_syndrome(chan, blocks, raid6_gfexp, offset, -					     disks, len, dma_src, submit); + +		/* convert source addresses being careful to collapse 'empty' +		 * sources and update the coefficients accordingly +		 */ +		unmap->len = len; +		for (i = 0, j = 0; i < src_cnt; i++) { +			if (blocks[i] == NULL) +				continue; +			unmap->addr[j] = dma_map_page(device->dev, blocks[i], offset, +						      len, DMA_TO_DEVICE); +			coefs[j] = raid6_gfexp[i]; +			unmap->to_cnt++; +			j++; +		} + +		/* +		 * DMAs use destinations as sources, +		 * so use BIDIRECTIONAL mapping +		 */ +		unmap->bidi_cnt++; +		if (P(blocks, disks)) +			unmap->addr[j++] = dma_map_page(device->dev, P(blocks, disks), +							offset, len, DMA_BIDIRECTIONAL); +		else { +			unmap->addr[j++] = 0; +			dma_flags |= DMA_PREP_PQ_DISABLE_P; +		} + +		unmap->bidi_cnt++; +		if (Q(blocks, disks)) +			unmap->addr[j++] = dma_map_page(device->dev, Q(blocks, disks), +						       offset, len, DMA_BIDIRECTIONAL); +		else { +			unmap->addr[j++] = 0; +			dma_flags |= DMA_PREP_PQ_DISABLE_Q; +		} + +		tx = do_async_gen_syndrome(chan, coefs, j, unmap, dma_flags, submit); +		dmaengine_unmap_put(unmap); +		return tx;  	} +	dmaengine_unmap_put(unmap); +  	/* run the pq synchronously */  	pr_debug("%s: (sync) disks: %d len: %zu\n", __func__, disks, len); @@ -277,50 +289,60 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks,  	struct dma_async_tx_descriptor *tx;  	unsigned char coefs[disks-2];  	enum dma_ctrl_flags dma_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0; -	dma_addr_t *dma_src = NULL; -	int src_cnt = 0; +	struct dmaengine_unmap_data *unmap = NULL;  	BUG_ON(disks < 4); -	if (submit->scribble) -		dma_src = submit->scribble; -	else if (sizeof(dma_addr_t) <= sizeof(struct page *)) -		dma_src = (dma_addr_t *) blocks; +	if (device) +		unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOIO); -	if (dma_src && device && disks <= dma_maxpq(device, 0) && +	if (unmap && disks <= dma_maxpq(device, 0) &&  	    is_dma_pq_aligned(device, offset, 0, len)) {  		struct device *dev = device->dev; -		dma_addr_t *pq = &dma_src[disks-2]; -		int i; +		dma_addr_t pq[2]; +		int i, j = 0, src_cnt = 0;  		pr_debug("%s: (async) disks: %d len: %zu\n",  			 __func__, disks, len); -		if (!P(blocks, disks)) + +		unmap->len = len; +		for (i = 0; i < disks-2; i++) +			if (likely(blocks[i])) { +				unmap->addr[j] = dma_map_page(dev, blocks[i], +							      offset, len, +							      DMA_TO_DEVICE); +				coefs[j] = raid6_gfexp[i]; +				unmap->to_cnt++; +				src_cnt++; +				j++; +			} + +		if (!P(blocks, disks)) { +			pq[0] = 0;  			dma_flags |= DMA_PREP_PQ_DISABLE_P; -		else +		} else {  			pq[0] = dma_map_page(dev, P(blocks, disks),  					     offset, len,  					     DMA_TO_DEVICE); -		if (!Q(blocks, disks)) +			unmap->addr[j++] = pq[0]; +			unmap->to_cnt++; +		} +		if (!Q(blocks, disks)) { +			pq[1] = 0;  			dma_flags |= DMA_PREP_PQ_DISABLE_Q; -		else +		} else {  			pq[1] = dma_map_page(dev, Q(blocks, disks),  					     offset, len,  					     DMA_TO_DEVICE); +			unmap->addr[j++] = pq[1]; +			unmap->to_cnt++; +		}  		if (submit->flags & ASYNC_TX_FENCE)  			dma_flags |= DMA_PREP_FENCE; -		for (i = 0; i < disks-2; i++) -			if (likely(blocks[i])) { -				dma_src[src_cnt] = dma_map_page(dev, blocks[i], -								offset, len, -								DMA_TO_DEVICE); -				coefs[src_cnt] = raid6_gfexp[i]; -				src_cnt++; -			} -  		for (;;) { -			tx = device->device_prep_dma_pq_val(chan, pq, dma_src, +			tx = device->device_prep_dma_pq_val(chan, pq, +							    unmap->addr,  							    src_cnt,  							    coefs,  							    len, pqres, @@ -330,6 +352,8 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks,  			async_tx_quiesce(&submit->depend_tx);  			dma_async_issue_pending(chan);  		} + +		dma_set_unmap(tx, unmap);  		async_tx_submit(chan, tx, submit);  		return tx; diff --git a/crypto/async_tx/async_raid6_recov.c b/crypto/async_tx/async_raid6_recov.c index a9f08a6a582..934a8498149 100644 --- a/crypto/async_tx/async_raid6_recov.c +++ b/crypto/async_tx/async_raid6_recov.c @@ -26,6 +26,7 @@  #include <linux/dma-mapping.h>  #include <linux/raid/pq.h>  #include <linux/async_tx.h> +#include <linux/dmaengine.h>  static struct dma_async_tx_descriptor *  async_sum_product(struct page *dest, struct page **srcs, unsigned char *coef, @@ -34,35 +35,45 @@ async_sum_product(struct page *dest, struct page **srcs, unsigned char *coef,  	struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ,  						      &dest, 1, srcs, 2, len);  	struct dma_device *dma = chan ? chan->device : NULL; +	struct dmaengine_unmap_data *unmap = NULL;  	const u8 *amul, *bmul;  	u8 ax, bx;  	u8 *a, *b, *c; -	if (dma) { -		dma_addr_t dma_dest[2]; -		dma_addr_t dma_src[2]; +	if (dma) +		unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOIO); + +	if (unmap) {  		struct device *dev = dma->dev; +		dma_addr_t pq[2];  		struct dma_async_tx_descriptor *tx;  		enum dma_ctrl_flags dma_flags = DMA_PREP_PQ_DISABLE_P;  		if (submit->flags & ASYNC_TX_FENCE)  			dma_flags |= DMA_PREP_FENCE; -		dma_dest[1] = dma_map_page(dev, dest, 0, len, DMA_BIDIRECTIONAL); -		dma_src[0] = dma_map_page(dev, srcs[0], 0, len, DMA_TO_DEVICE); -		dma_src[1] = dma_map_page(dev, srcs[1], 0, len, DMA_TO_DEVICE); -		tx = dma->device_prep_dma_pq(chan, dma_dest, dma_src, 2, coef, +		unmap->addr[0] = dma_map_page(dev, srcs[0], 0, len, DMA_TO_DEVICE); +		unmap->addr[1] = dma_map_page(dev, srcs[1], 0, len, DMA_TO_DEVICE); +		unmap->to_cnt = 2; + +		unmap->addr[2] = dma_map_page(dev, dest, 0, len, DMA_BIDIRECTIONAL); +		unmap->bidi_cnt = 1; +		/* engine only looks at Q, but expects it to follow P */ +		pq[1] = unmap->addr[2]; + +		unmap->len = len; +		tx = dma->device_prep_dma_pq(chan, pq, unmap->addr, 2, coef,  					     len, dma_flags);  		if (tx) { +			dma_set_unmap(tx, unmap);  			async_tx_submit(chan, tx, submit); +			dmaengine_unmap_put(unmap);  			return tx;  		}  		/* could not get a descriptor, unmap and fall through to  		 * the synchronous path  		 */ -		dma_unmap_page(dev, dma_dest[1], len, DMA_BIDIRECTIONAL); -		dma_unmap_page(dev, dma_src[0], len, DMA_TO_DEVICE); -		dma_unmap_page(dev, dma_src[1], len, DMA_TO_DEVICE); +		dmaengine_unmap_put(unmap);  	}  	/* run the operation synchronously */ @@ -89,23 +100,38 @@ async_mult(struct page *dest, struct page *src, u8 coef, size_t len,  	struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ,  						      &dest, 1, &src, 1, len);  	struct dma_device *dma = chan ? chan->device : NULL; +	struct dmaengine_unmap_data *unmap = NULL;  	const u8 *qmul; /* Q multiplier table */  	u8 *d, *s; -	if (dma) { +	if (dma) +		unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOIO); + +	if (unmap) {  		dma_addr_t dma_dest[2]; -		dma_addr_t dma_src[1];  		struct device *dev = dma->dev;  		struct dma_async_tx_descriptor *tx;  		enum dma_ctrl_flags dma_flags = DMA_PREP_PQ_DISABLE_P;  		if (submit->flags & ASYNC_TX_FENCE)  			dma_flags |= DMA_PREP_FENCE; -		dma_dest[1] = dma_map_page(dev, dest, 0, len, DMA_BIDIRECTIONAL); -		dma_src[0] = dma_map_page(dev, src, 0, len, DMA_TO_DEVICE); -		tx = dma->device_prep_dma_pq(chan, dma_dest, dma_src, 1, &coef, -					     len, dma_flags); +		unmap->addr[0] = dma_map_page(dev, src, 0, len, DMA_TO_DEVICE); +		unmap->to_cnt++; +		unmap->addr[1] = dma_map_page(dev, dest, 0, len, DMA_BIDIRECTIONAL); +		dma_dest[1] = unmap->addr[1]; +		unmap->bidi_cnt++; +		unmap->len = len; + +		/* this looks funny, but the engine looks for Q at +		 * dma_dest[1] and ignores dma_dest[0] as a dest +		 * due to DMA_PREP_PQ_DISABLE_P +		 */ +		tx = dma->device_prep_dma_pq(chan, dma_dest, unmap->addr, +					     1, &coef, len, dma_flags); +  		if (tx) { +			dma_set_unmap(tx, unmap); +			dmaengine_unmap_put(unmap);  			async_tx_submit(chan, tx, submit);  			return tx;  		} @@ -113,8 +139,7 @@ async_mult(struct page *dest, struct page *src, u8 coef, size_t len,  		/* could not get a descriptor, unmap and fall through to  		 * the synchronous path  		 */ -		dma_unmap_page(dev, dma_dest[1], len, DMA_BIDIRECTIONAL); -		dma_unmap_page(dev, dma_src[0], len, DMA_TO_DEVICE); +		dmaengine_unmap_put(unmap);  	}  	/* no channel available, or failed to allocate a descriptor, so diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c index 7be34248b45..39ea4791a3c 100644 --- a/crypto/async_tx/async_tx.c +++ b/crypto/async_tx/async_tx.c @@ -128,7 +128,7 @@ async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx,  		}  		device->device_issue_pending(chan);  	} else { -		if (dma_wait_for_async_tx(depend_tx) != DMA_SUCCESS) +		if (dma_wait_for_async_tx(depend_tx) != DMA_COMPLETE)  			panic("%s: DMA error waiting for depend_tx\n",  			      __func__);  		tx->tx_submit(tx); @@ -280,7 +280,7 @@ void async_tx_quiesce(struct dma_async_tx_descriptor **tx)  		 * we are referring to the correct operation  		 */  		BUG_ON(async_tx_test_ack(*tx)); -		if (dma_wait_for_async_tx(*tx) != DMA_SUCCESS) +		if (dma_wait_for_async_tx(*tx) != DMA_COMPLETE)  			panic("%s: DMA error waiting for transaction\n",  			      __func__);  		async_tx_ack(*tx); diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c index 8ade0a0481c..3c562f5a60b 100644 --- a/crypto/async_tx/async_xor.c +++ b/crypto/async_tx/async_xor.c @@ -33,48 +33,31 @@  /* do_async_xor - dma map the pages and perform the xor with an engine */  static __async_inline struct dma_async_tx_descriptor * -do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list, -	     unsigned int offset, int src_cnt, size_t len, dma_addr_t *dma_src, +do_async_xor(struct dma_chan *chan, struct dmaengine_unmap_data *unmap,  	     struct async_submit_ctl *submit)  {  	struct dma_device *dma = chan->device;  	struct dma_async_tx_descriptor *tx = NULL; -	int src_off = 0; -	int i;  	dma_async_tx_callback cb_fn_orig = submit->cb_fn;  	void *cb_param_orig = submit->cb_param;  	enum async_tx_flags flags_orig = submit->flags; -	enum dma_ctrl_flags dma_flags; -	int xor_src_cnt = 0; -	dma_addr_t dma_dest; - -	/* map the dest bidrectional in case it is re-used as a source */ -	dma_dest = dma_map_page(dma->dev, dest, offset, len, DMA_BIDIRECTIONAL); -	for (i = 0; i < src_cnt; i++) { -		/* only map the dest once */ -		if (!src_list[i]) -			continue; -		if (unlikely(src_list[i] == dest)) { -			dma_src[xor_src_cnt++] = dma_dest; -			continue; -		} -		dma_src[xor_src_cnt++] = dma_map_page(dma->dev, src_list[i], offset, -						      len, DMA_TO_DEVICE); -	} -	src_cnt = xor_src_cnt; +	enum dma_ctrl_flags dma_flags = 0; +	int src_cnt = unmap->to_cnt; +	int xor_src_cnt; +	dma_addr_t dma_dest = unmap->addr[unmap->to_cnt]; +	dma_addr_t *src_list = unmap->addr;  	while (src_cnt) { +		dma_addr_t tmp; +  		submit->flags = flags_orig; -		dma_flags = 0;  		xor_src_cnt = min(src_cnt, (int)dma->max_xor); -		/* if we are submitting additional xors, leave the chain open, -		 * clear the callback parameters, and leave the destination -		 * buffer mapped +		/* if we are submitting additional xors, leave the chain open +		 * and clear the callback parameters  		 */  		if (src_cnt > xor_src_cnt) {  			submit->flags &= ~ASYNC_TX_ACK;  			submit->flags |= ASYNC_TX_FENCE; -			dma_flags = DMA_COMPL_SKIP_DEST_UNMAP;  			submit->cb_fn = NULL;  			submit->cb_param = NULL;  		} else { @@ -85,12 +68,18 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list,  			dma_flags |= DMA_PREP_INTERRUPT;  		if (submit->flags & ASYNC_TX_FENCE)  			dma_flags |= DMA_PREP_FENCE; -		/* Since we have clobbered the src_list we are committed -		 * to doing this asynchronously.  Drivers force forward progress -		 * in case they can not provide a descriptor + +		/* Drivers force forward progress in case they can not provide a +		 * descriptor  		 */ -		tx = dma->device_prep_dma_xor(chan, dma_dest, &dma_src[src_off], -					      xor_src_cnt, len, dma_flags); +		tmp = src_list[0]; +		if (src_list > unmap->addr) +			src_list[0] = dma_dest; +		tx = dma->device_prep_dma_xor(chan, dma_dest, src_list, +					      xor_src_cnt, unmap->len, +					      dma_flags); +		src_list[0] = tmp; +  		if (unlikely(!tx))  			async_tx_quiesce(&submit->depend_tx); @@ -99,22 +88,21 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list,  		while (unlikely(!tx)) {  			dma_async_issue_pending(chan);  			tx = dma->device_prep_dma_xor(chan, dma_dest, -						      &dma_src[src_off], -						      xor_src_cnt, len, +						      src_list, +						      xor_src_cnt, unmap->len,  						      dma_flags);  		} +		dma_set_unmap(tx, unmap);  		async_tx_submit(chan, tx, submit);  		submit->depend_tx = tx;  		if (src_cnt > xor_src_cnt) {  			/* drop completed sources */  			src_cnt -= xor_src_cnt; -			src_off += xor_src_cnt; -  			/* use the intermediate result a source */ -			dma_src[--src_off] = dma_dest;  			src_cnt++; +			src_list += xor_src_cnt - 1;  		} else  			break;  	} @@ -189,22 +177,40 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset,  	struct dma_chan *chan = async_tx_find_channel(submit, DMA_XOR,  						      &dest, 1, src_list,  						      src_cnt, len); -	dma_addr_t *dma_src = NULL; +	struct dma_device *device = chan ? chan->device : NULL; +	struct dmaengine_unmap_data *unmap = NULL;  	BUG_ON(src_cnt <= 1); -	if (submit->scribble) -		dma_src = submit->scribble; -	else if (sizeof(dma_addr_t) <= sizeof(struct page *)) -		dma_src = (dma_addr_t *) src_list; +	if (device) +		unmap = dmaengine_get_unmap_data(device->dev, src_cnt+1, GFP_NOIO); + +	if (unmap && is_dma_xor_aligned(device, offset, 0, len)) { +		struct dma_async_tx_descriptor *tx; +		int i, j; -	if (dma_src && chan && is_dma_xor_aligned(chan->device, offset, 0, len)) {  		/* run the xor asynchronously */  		pr_debug("%s (async): len: %zu\n", __func__, len); -		return do_async_xor(chan, dest, src_list, offset, src_cnt, len, -				    dma_src, submit); +		unmap->len = len; +		for (i = 0, j = 0; i < src_cnt; i++) { +			if (!src_list[i]) +				continue; +			unmap->to_cnt++; +			unmap->addr[j++] = dma_map_page(device->dev, src_list[i], +							offset, len, DMA_TO_DEVICE); +		} + +		/* map it bidirectional as it may be re-used as a source */ +		unmap->addr[j] = dma_map_page(device->dev, dest, offset, len, +					      DMA_BIDIRECTIONAL); +		unmap->bidi_cnt = 1; + +		tx = do_async_xor(chan, unmap, submit); +		dmaengine_unmap_put(unmap); +		return tx;  	} else { +		dmaengine_unmap_put(unmap);  		/* run the xor synchronously */  		pr_debug("%s (sync): len: %zu\n", __func__, len);  		WARN_ONCE(chan, "%s: no space for dma address conversion\n", @@ -268,16 +274,14 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset,  	struct dma_chan *chan = xor_val_chan(submit, dest, src_list, src_cnt, len);  	struct dma_device *device = chan ? chan->device : NULL;  	struct dma_async_tx_descriptor *tx = NULL; -	dma_addr_t *dma_src = NULL; +	struct dmaengine_unmap_data *unmap = NULL;  	BUG_ON(src_cnt <= 1); -	if (submit->scribble) -		dma_src = submit->scribble; -	else if (sizeof(dma_addr_t) <= sizeof(struct page *)) -		dma_src = (dma_addr_t *) src_list; +	if (device) +		unmap = dmaengine_get_unmap_data(device->dev, src_cnt, GFP_NOIO); -	if (dma_src && device && src_cnt <= device->max_xor && +	if (unmap && src_cnt <= device->max_xor &&  	    is_dma_xor_aligned(device, offset, 0, len)) {  		unsigned long dma_prep_flags = 0;  		int i; @@ -288,11 +292,15 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset,  			dma_prep_flags |= DMA_PREP_INTERRUPT;  		if (submit->flags & ASYNC_TX_FENCE)  			dma_prep_flags |= DMA_PREP_FENCE; -		for (i = 0; i < src_cnt; i++) -			dma_src[i] = dma_map_page(device->dev, src_list[i], -						  offset, len, DMA_TO_DEVICE); -		tx = device->device_prep_dma_xor_val(chan, dma_src, src_cnt, +		for (i = 0; i < src_cnt; i++) { +			unmap->addr[i] = dma_map_page(device->dev, src_list[i], +						      offset, len, DMA_TO_DEVICE); +			unmap->to_cnt++; +		} +		unmap->len = len; + +		tx = device->device_prep_dma_xor_val(chan, unmap->addr, src_cnt,  						     len, result,  						     dma_prep_flags);  		if (unlikely(!tx)) { @@ -301,11 +309,11 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset,  			while (!tx) {  				dma_async_issue_pending(chan);  				tx = device->device_prep_dma_xor_val(chan, -					dma_src, src_cnt, len, result, +					unmap->addr, src_cnt, len, result,  					dma_prep_flags);  			}  		} - +		dma_set_unmap(tx, unmap);  		async_tx_submit(chan, tx, submit);  	} else {  		enum async_tx_flags flags_orig = submit->flags; @@ -327,6 +335,7 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset,  		async_tx_sync_epilog(submit);  		submit->flags = flags_orig;  	} +	dmaengine_unmap_put(unmap);  	return tx;  } diff --git a/crypto/async_tx/raid6test.c b/crypto/async_tx/raid6test.c index 4a92bac744d..dad95f45b88 100644 --- a/crypto/async_tx/raid6test.c +++ b/crypto/async_tx/raid6test.c @@ -28,7 +28,7 @@  #undef pr  #define pr(fmt, args...) pr_info("raid6test: " fmt, ##args) -#define NDISKS 16 /* Including P and Q */ +#define NDISKS 64 /* Including P and Q */  static struct page *dataptrs[NDISKS];  static addr_conv_t addr_conv[NDISKS]; @@ -219,6 +219,14 @@ static int raid6_test(void)  		err += test(11, &tests);  		err += test(12, &tests);  	} + +	/* the 24 disk case is special for ioatdma as it is the boudary point +	 * at which it needs to switch from 8-source ops to 16-source +	 * ops for continuation (assumes DMA_HAS_PQ_CONTINUE is not set) +	 */ +	if (NDISKS > 24) +		err += test(24, &tests); +  	err += test(NDISKS, &tests);  	pr("\n"); diff --git a/crypto/authenc.c b/crypto/authenc.c index ffce19de05c..e1223559d5d 100644 --- a/crypto/authenc.c +++ b/crypto/authenc.c @@ -52,40 +52,52 @@ static void authenc_request_complete(struct aead_request *req, int err)  		aead_request_complete(req, err);  } -static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key, -				 unsigned int keylen) +int crypto_authenc_extractkeys(struct crypto_authenc_keys *keys, const u8 *key, +			       unsigned int keylen)  { -	unsigned int authkeylen; -	unsigned int enckeylen; -	struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); -	struct crypto_ahash *auth = ctx->auth; -	struct crypto_ablkcipher *enc = ctx->enc; -	struct rtattr *rta = (void *)key; +	struct rtattr *rta = (struct rtattr *)key;  	struct crypto_authenc_key_param *param; -	int err = -EINVAL;  	if (!RTA_OK(rta, keylen)) -		goto badkey; +		return -EINVAL;  	if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) -		goto badkey; +		return -EINVAL;  	if (RTA_PAYLOAD(rta) < sizeof(*param)) -		goto badkey; +		return -EINVAL;  	param = RTA_DATA(rta); -	enckeylen = be32_to_cpu(param->enckeylen); +	keys->enckeylen = be32_to_cpu(param->enckeylen);  	key += RTA_ALIGN(rta->rta_len);  	keylen -= RTA_ALIGN(rta->rta_len); -	if (keylen < enckeylen) -		goto badkey; +	if (keylen < keys->enckeylen) +		return -EINVAL; -	authkeylen = keylen - enckeylen; +	keys->authkeylen = keylen - keys->enckeylen; +	keys->authkey = key; +	keys->enckey = key + keys->authkeylen; + +	return 0; +} +EXPORT_SYMBOL_GPL(crypto_authenc_extractkeys); + +static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key, +				 unsigned int keylen) +{ +	struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); +	struct crypto_ahash *auth = ctx->auth; +	struct crypto_ablkcipher *enc = ctx->enc; +	struct crypto_authenc_keys keys; +	int err = -EINVAL; + +	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) +		goto badkey;  	crypto_ahash_clear_flags(auth, CRYPTO_TFM_REQ_MASK);  	crypto_ahash_set_flags(auth, crypto_aead_get_flags(authenc) &  				    CRYPTO_TFM_REQ_MASK); -	err = crypto_ahash_setkey(auth, key, authkeylen); +	err = crypto_ahash_setkey(auth, keys.authkey, keys.authkeylen);  	crypto_aead_set_flags(authenc, crypto_ahash_get_flags(auth) &  				       CRYPTO_TFM_RES_MASK); @@ -95,7 +107,7 @@ static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key,  	crypto_ablkcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK);  	crypto_ablkcipher_set_flags(enc, crypto_aead_get_flags(authenc) &  					 CRYPTO_TFM_REQ_MASK); -	err = crypto_ablkcipher_setkey(enc, key + authkeylen, enckeylen); +	err = crypto_ablkcipher_setkey(enc, keys.enckey, keys.enckeylen);  	crypto_aead_set_flags(authenc, crypto_ablkcipher_get_flags(enc) &  				       CRYPTO_TFM_RES_MASK); @@ -188,7 +200,7 @@ static void authenc_verify_ahash_update_done(struct crypto_async_request *areq,  	scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,  				 authsize, 0); -	err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0; +	err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0;  	if (err)  		goto out; @@ -227,7 +239,7 @@ static void authenc_verify_ahash_done(struct crypto_async_request *areq,  	scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,  				 authsize, 0); -	err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0; +	err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0;  	if (err)  		goto out; @@ -368,9 +380,10 @@ static void crypto_authenc_encrypt_done(struct crypto_async_request *req,  	if (!err) {  		struct crypto_aead *authenc = crypto_aead_reqtfm(areq);  		struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); -		struct ablkcipher_request *abreq = aead_request_ctx(areq); -		u8 *iv = (u8 *)(abreq + 1) + -			 crypto_ablkcipher_reqsize(ctx->enc); +		struct authenc_request_ctx *areq_ctx = aead_request_ctx(areq); +		struct ablkcipher_request *abreq = (void *)(areq_ctx->tail +							    + ctx->reqoff); +		u8 *iv = (u8 *)abreq - crypto_ablkcipher_ivsize(ctx->enc);  		err = crypto_authenc_genicv(areq, iv, 0);  	} @@ -462,7 +475,7 @@ static int crypto_authenc_verify(struct aead_request *req,  	ihash = ohash + authsize;  	scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,  				 authsize, 0); -	return memcmp(ihash, ohash, authsize) ? -EBADMSG : 0; +	return crypto_memneq(ihash, ohash, authsize) ? -EBADMSG : 0;  }  static int crypto_authenc_iverify(struct aead_request *req, u8 *iv, diff --git a/crypto/authencesn.c b/crypto/authencesn.c index ab53762fc30..4be0dd4373a 100644 --- a/crypto/authencesn.c +++ b/crypto/authencesn.c @@ -59,37 +59,19 @@ static void authenc_esn_request_complete(struct aead_request *req, int err)  static int crypto_authenc_esn_setkey(struct crypto_aead *authenc_esn, const u8 *key,  				     unsigned int keylen)  { -	unsigned int authkeylen; -	unsigned int enckeylen;  	struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);  	struct crypto_ahash *auth = ctx->auth;  	struct crypto_ablkcipher *enc = ctx->enc; -	struct rtattr *rta = (void *)key; -	struct crypto_authenc_key_param *param; +	struct crypto_authenc_keys keys;  	int err = -EINVAL; -	if (!RTA_OK(rta, keylen)) +	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)  		goto badkey; -	if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) -		goto badkey; -	if (RTA_PAYLOAD(rta) < sizeof(*param)) -		goto badkey; - -	param = RTA_DATA(rta); -	enckeylen = be32_to_cpu(param->enckeylen); - -	key += RTA_ALIGN(rta->rta_len); -	keylen -= RTA_ALIGN(rta->rta_len); - -	if (keylen < enckeylen) -		goto badkey; - -	authkeylen = keylen - enckeylen;  	crypto_ahash_clear_flags(auth, CRYPTO_TFM_REQ_MASK);  	crypto_ahash_set_flags(auth, crypto_aead_get_flags(authenc_esn) &  				     CRYPTO_TFM_REQ_MASK); -	err = crypto_ahash_setkey(auth, key, authkeylen); +	err = crypto_ahash_setkey(auth, keys.authkey, keys.authkeylen);  	crypto_aead_set_flags(authenc_esn, crypto_ahash_get_flags(auth) &  					   CRYPTO_TFM_RES_MASK); @@ -99,7 +81,7 @@ static int crypto_authenc_esn_setkey(struct crypto_aead *authenc_esn, const u8 *  	crypto_ablkcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK);  	crypto_ablkcipher_set_flags(enc, crypto_aead_get_flags(authenc_esn) &  					 CRYPTO_TFM_REQ_MASK); -	err = crypto_ablkcipher_setkey(enc, key + authkeylen, enckeylen); +	err = crypto_ablkcipher_setkey(enc, keys.enckey, keys.enckeylen);  	crypto_aead_set_flags(authenc_esn, crypto_ablkcipher_get_flags(enc) &  					   CRYPTO_TFM_RES_MASK); @@ -247,7 +229,7 @@ static void authenc_esn_verify_ahash_update_done(struct crypto_async_request *ar  	scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,  				 authsize, 0); -	err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0; +	err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0;  	if (err)  		goto out; @@ -296,7 +278,7 @@ static void authenc_esn_verify_ahash_update_done2(struct crypto_async_request *a  	scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,  				 authsize, 0); -	err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0; +	err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0;  	if (err)  		goto out; @@ -336,7 +318,7 @@ static void authenc_esn_verify_ahash_done(struct crypto_async_request *areq,  	scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,  				 authsize, 0); -	err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0; +	err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0;  	if (err)  		goto out; @@ -568,7 +550,7 @@ static int crypto_authenc_esn_verify(struct aead_request *req)  	ihash = ohash + authsize;  	scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,  				 authsize, 0); -	return memcmp(ihash, ohash, authsize) ? -EBADMSG : 0; +	return crypto_memneq(ihash, ohash, authsize) ? -EBADMSG : 0;  }  static int crypto_authenc_esn_iverify(struct aead_request *req, u8 *iv, diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c index a79e7e9ab86..0122bec3856 100644 --- a/crypto/blkcipher.c +++ b/crypto/blkcipher.c @@ -70,14 +70,12 @@ static inline u8 *blkcipher_get_spot(u8 *start, unsigned int len)  	return max(start, end_page);  } -static inline unsigned int blkcipher_done_slow(struct crypto_blkcipher *tfm, -					       struct blkcipher_walk *walk, +static inline unsigned int blkcipher_done_slow(struct blkcipher_walk *walk,  					       unsigned int bsize)  {  	u8 *addr; -	unsigned int alignmask = crypto_blkcipher_alignmask(tfm); -	addr = (u8 *)ALIGN((unsigned long)walk->buffer, alignmask + 1); +	addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);  	addr = blkcipher_get_spot(addr, bsize);  	scatterwalk_copychunks(addr, &walk->out, bsize, 1);  	return bsize; @@ -105,7 +103,6 @@ static inline unsigned int blkcipher_done_fast(struct blkcipher_walk *walk,  int blkcipher_walk_done(struct blkcipher_desc *desc,  			struct blkcipher_walk *walk, int err)  { -	struct crypto_blkcipher *tfm = desc->tfm;  	unsigned int nbytes = 0;  	if (likely(err >= 0)) { @@ -117,7 +114,7 @@ int blkcipher_walk_done(struct blkcipher_desc *desc,  			err = -EINVAL;  			goto err;  		} else -			n = blkcipher_done_slow(tfm, walk, n); +			n = blkcipher_done_slow(walk, n);  		nbytes = walk->total - n;  		err = 0; @@ -136,7 +133,7 @@ err:  	}  	if (walk->iv != desc->info) -		memcpy(desc->info, walk->iv, crypto_blkcipher_ivsize(tfm)); +		memcpy(desc->info, walk->iv, walk->ivsize);  	if (walk->buffer != walk->page)  		kfree(walk->buffer);  	if (walk->page) @@ -226,22 +223,20 @@ static inline int blkcipher_next_fast(struct blkcipher_desc *desc,  static int blkcipher_walk_next(struct blkcipher_desc *desc,  			       struct blkcipher_walk *walk)  { -	struct crypto_blkcipher *tfm = desc->tfm; -	unsigned int alignmask = crypto_blkcipher_alignmask(tfm);  	unsigned int bsize;  	unsigned int n;  	int err;  	n = walk->total; -	if (unlikely(n < crypto_blkcipher_blocksize(tfm))) { +	if (unlikely(n < walk->cipher_blocksize)) {  		desc->flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;  		return blkcipher_walk_done(desc, walk, -EINVAL);  	}  	walk->flags &= ~(BLKCIPHER_WALK_SLOW | BLKCIPHER_WALK_COPY |  			 BLKCIPHER_WALK_DIFF); -	if (!scatterwalk_aligned(&walk->in, alignmask) || -	    !scatterwalk_aligned(&walk->out, alignmask)) { +	if (!scatterwalk_aligned(&walk->in, walk->alignmask) || +	    !scatterwalk_aligned(&walk->out, walk->alignmask)) {  		walk->flags |= BLKCIPHER_WALK_COPY;  		if (!walk->page) {  			walk->page = (void *)__get_free_page(GFP_ATOMIC); @@ -250,12 +245,12 @@ static int blkcipher_walk_next(struct blkcipher_desc *desc,  		}  	} -	bsize = min(walk->blocksize, n); +	bsize = min(walk->walk_blocksize, n);  	n = scatterwalk_clamp(&walk->in, n);  	n = scatterwalk_clamp(&walk->out, n);  	if (unlikely(n < bsize)) { -		err = blkcipher_next_slow(desc, walk, bsize, alignmask); +		err = blkcipher_next_slow(desc, walk, bsize, walk->alignmask);  		goto set_phys_lowmem;  	} @@ -277,28 +272,26 @@ set_phys_lowmem:  	return err;  } -static inline int blkcipher_copy_iv(struct blkcipher_walk *walk, -				    struct crypto_blkcipher *tfm, -				    unsigned int alignmask) +static inline int blkcipher_copy_iv(struct blkcipher_walk *walk)  { -	unsigned bs = walk->blocksize; -	unsigned int ivsize = crypto_blkcipher_ivsize(tfm); -	unsigned aligned_bs = ALIGN(bs, alignmask + 1); -	unsigned int size = aligned_bs * 2 + ivsize + max(aligned_bs, ivsize) - -			    (alignmask + 1); +	unsigned bs = walk->walk_blocksize; +	unsigned aligned_bs = ALIGN(bs, walk->alignmask + 1); +	unsigned int size = aligned_bs * 2 + +			    walk->ivsize + max(aligned_bs, walk->ivsize) - +			    (walk->alignmask + 1);  	u8 *iv; -	size += alignmask & ~(crypto_tfm_ctx_alignment() - 1); +	size += walk->alignmask & ~(crypto_tfm_ctx_alignment() - 1);  	walk->buffer = kmalloc(size, GFP_ATOMIC);  	if (!walk->buffer)  		return -ENOMEM; -	iv = (u8 *)ALIGN((unsigned long)walk->buffer, alignmask + 1); +	iv = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);  	iv = blkcipher_get_spot(iv, bs) + aligned_bs;  	iv = blkcipher_get_spot(iv, bs) + aligned_bs; -	iv = blkcipher_get_spot(iv, ivsize); +	iv = blkcipher_get_spot(iv, walk->ivsize); -	walk->iv = memcpy(iv, walk->iv, ivsize); +	walk->iv = memcpy(iv, walk->iv, walk->ivsize);  	return 0;  } @@ -306,7 +299,10 @@ int blkcipher_walk_virt(struct blkcipher_desc *desc,  			struct blkcipher_walk *walk)  {  	walk->flags &= ~BLKCIPHER_WALK_PHYS; -	walk->blocksize = crypto_blkcipher_blocksize(desc->tfm); +	walk->walk_blocksize = crypto_blkcipher_blocksize(desc->tfm); +	walk->cipher_blocksize = walk->walk_blocksize; +	walk->ivsize = crypto_blkcipher_ivsize(desc->tfm); +	walk->alignmask = crypto_blkcipher_alignmask(desc->tfm);  	return blkcipher_walk_first(desc, walk);  }  EXPORT_SYMBOL_GPL(blkcipher_walk_virt); @@ -315,7 +311,10 @@ int blkcipher_walk_phys(struct blkcipher_desc *desc,  			struct blkcipher_walk *walk)  {  	walk->flags |= BLKCIPHER_WALK_PHYS; -	walk->blocksize = crypto_blkcipher_blocksize(desc->tfm); +	walk->walk_blocksize = crypto_blkcipher_blocksize(desc->tfm); +	walk->cipher_blocksize = walk->walk_blocksize; +	walk->ivsize = crypto_blkcipher_ivsize(desc->tfm); +	walk->alignmask = crypto_blkcipher_alignmask(desc->tfm);  	return blkcipher_walk_first(desc, walk);  }  EXPORT_SYMBOL_GPL(blkcipher_walk_phys); @@ -323,9 +322,6 @@ EXPORT_SYMBOL_GPL(blkcipher_walk_phys);  static int blkcipher_walk_first(struct blkcipher_desc *desc,  				struct blkcipher_walk *walk)  { -	struct crypto_blkcipher *tfm = desc->tfm; -	unsigned int alignmask = crypto_blkcipher_alignmask(tfm); -  	if (WARN_ON_ONCE(in_irq()))  		return -EDEADLK; @@ -335,8 +331,8 @@ static int blkcipher_walk_first(struct blkcipher_desc *desc,  	walk->buffer = NULL;  	walk->iv = desc->info; -	if (unlikely(((unsigned long)walk->iv & alignmask))) { -		int err = blkcipher_copy_iv(walk, tfm, alignmask); +	if (unlikely(((unsigned long)walk->iv & walk->alignmask))) { +		int err = blkcipher_copy_iv(walk);  		if (err)  			return err;  	} @@ -353,11 +349,28 @@ int blkcipher_walk_virt_block(struct blkcipher_desc *desc,  			      unsigned int blocksize)  {  	walk->flags &= ~BLKCIPHER_WALK_PHYS; -	walk->blocksize = blocksize; +	walk->walk_blocksize = blocksize; +	walk->cipher_blocksize = crypto_blkcipher_blocksize(desc->tfm); +	walk->ivsize = crypto_blkcipher_ivsize(desc->tfm); +	walk->alignmask = crypto_blkcipher_alignmask(desc->tfm);  	return blkcipher_walk_first(desc, walk);  }  EXPORT_SYMBOL_GPL(blkcipher_walk_virt_block); +int blkcipher_aead_walk_virt_block(struct blkcipher_desc *desc, +				   struct blkcipher_walk *walk, +				   struct crypto_aead *tfm, +				   unsigned int blocksize) +{ +	walk->flags &= ~BLKCIPHER_WALK_PHYS; +	walk->walk_blocksize = blocksize; +	walk->cipher_blocksize = crypto_aead_blocksize(tfm); +	walk->ivsize = crypto_aead_ivsize(tfm); +	walk->alignmask = crypto_aead_alignmask(tfm); +	return blkcipher_walk_first(desc, walk); +} +EXPORT_SYMBOL_GPL(blkcipher_aead_walk_virt_block); +  static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,  			    unsigned int keylen)  { diff --git a/crypto/ccm.c b/crypto/ccm.c index 499c91717d9..1df84217f7c 100644 --- a/crypto/ccm.c +++ b/crypto/ccm.c @@ -271,7 +271,8 @@ static int crypto_ccm_auth(struct aead_request *req, struct scatterlist *plain,  	}  	/* compute plaintext into mac */ -	get_data_to_compute(cipher, pctx, plain, cryptlen); +	if (cryptlen) +		get_data_to_compute(cipher, pctx, plain, cryptlen);  out:  	return err; @@ -363,7 +364,7 @@ static void crypto_ccm_decrypt_done(struct crypto_async_request *areq,  	if (!err) {  		err = crypto_ccm_auth(req, req->dst, cryptlen); -		if (!err && memcmp(pctx->auth_tag, pctx->odata, authsize)) +		if (!err && crypto_memneq(pctx->auth_tag, pctx->odata, authsize))  			err = -EBADMSG;  	}  	aead_request_complete(req, err); @@ -422,7 +423,7 @@ static int crypto_ccm_decrypt(struct aead_request *req)  		return err;  	/* verify */ -	if (memcmp(authtag, odata, authsize)) +	if (crypto_memneq(authtag, odata, authsize))  		return -EBADMSG;  	return err; diff --git a/crypto/chainiv.c b/crypto/chainiv.c index 834d8dd3d4f..9c294c8f9a0 100644 --- a/crypto/chainiv.c +++ b/crypto/chainiv.c @@ -126,7 +126,7 @@ static int async_chainiv_schedule_work(struct async_chainiv_ctx *ctx)  	int err = ctx->err;  	if (!ctx->queue.qlen) { -		smp_mb__before_clear_bit(); +		smp_mb__before_atomic();  		clear_bit(CHAINIV_STATE_INUSE, &ctx->state);  		if (!ctx->queue.qlen || diff --git a/crypto/crc32c.c b/crypto/crc32c_generic.c index 06f7018c9d9..d9c7beba8e5 100644 --- a/crypto/crc32c.c +++ b/crypto/crc32c_generic.c @@ -170,3 +170,5 @@ module_exit(crc32c_mod_fini);  MODULE_AUTHOR("Clay Haapala <chaapala@cisco.com>");  MODULE_DESCRIPTION("CRC32c (Castagnoli) calculations wrapper for lib/crc32c");  MODULE_LICENSE("GPL"); +MODULE_ALIAS("crc32c"); +MODULE_SOFTDEP("pre: crc32c"); diff --git a/crypto/crypto_null.c b/crypto/crypto_null.c index fee7265cd35..1dc54bb95a8 100644 --- a/crypto/crypto_null.c +++ b/crypto/crypto_null.c @@ -17,6 +17,7 @@   *   */ +#include <crypto/null.h>  #include <crypto/internal/hash.h>  #include <crypto/internal/skcipher.h>  #include <linux/init.h> @@ -24,11 +25,6 @@  #include <linux/mm.h>  #include <linux/string.h> -#define NULL_KEY_SIZE		0 -#define NULL_BLOCK_SIZE		1 -#define NULL_DIGEST_SIZE	0 -#define NULL_IV_SIZE		0 -  static int null_compress(struct crypto_tfm *tfm, const u8 *src,  			 unsigned int slen, u8 *dst, unsigned int *dlen)  { diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c index 1512e41cd93..e2a34feec7a 100644 --- a/crypto/crypto_user.c +++ b/crypto/crypto_user.c @@ -265,6 +265,9 @@ static int crypto_update_alg(struct sk_buff *skb, struct nlmsghdr *nlh,  	struct nlattr *priority = attrs[CRYPTOCFGA_PRIORITY_VAL];  	LIST_HEAD(list); +	if (!netlink_capable(skb, CAP_NET_ADMIN)) +		return -EPERM; +  	if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))  		return -EINVAL; @@ -295,6 +298,9 @@ static int crypto_del_alg(struct sk_buff *skb, struct nlmsghdr *nlh,  	struct crypto_alg *alg;  	struct crypto_user_alg *p = nlmsg_data(nlh); +	if (!netlink_capable(skb, CAP_NET_ADMIN)) +		return -EPERM; +  	if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))  		return -EINVAL; @@ -379,6 +385,9 @@ static int crypto_add_alg(struct sk_buff *skb, struct nlmsghdr *nlh,  	struct crypto_user_alg *p = nlmsg_data(nlh);  	struct nlattr *priority = attrs[CRYPTOCFGA_PRIORITY_VAL]; +	if (!netlink_capable(skb, CAP_NET_ADMIN)) +		return -EPERM; +  	if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))  		return -EINVAL; @@ -466,9 +475,6 @@ static int crypto_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)  	type -= CRYPTO_MSG_BASE;  	link = &crypto_dispatch[type]; -	if (!capable(CAP_NET_ADMIN)) -		return -EPERM; -  	if ((type == (CRYPTO_MSG_GETALG - CRYPTO_MSG_BASE) &&  	    (nlh->nlmsg_flags & NLM_F_DUMP))) {  		struct crypto_alg *alg; diff --git a/crypto/crypto_wq.c b/crypto/crypto_wq.c index adad92a44ba..2f1b8d12952 100644 --- a/crypto/crypto_wq.c +++ b/crypto/crypto_wq.c @@ -33,7 +33,7 @@ static void __exit crypto_wq_exit(void)  	destroy_workqueue(kcrypto_wq);  } -module_init(crypto_wq_init); +subsys_initcall(crypto_wq_init);  module_exit(crypto_wq_exit);  MODULE_LICENSE("GPL"); diff --git a/crypto/gcm.c b/crypto/gcm.c index 43e1fb05ea5..b4f01793900 100644 --- a/crypto/gcm.c +++ b/crypto/gcm.c @@ -582,7 +582,7 @@ static int crypto_gcm_verify(struct aead_request *req,  	crypto_xor(auth_tag, iauth_tag, 16);  	scatterwalk_map_and_copy(iauth_tag, req->src, cryptlen, authsize, 0); -	return memcmp(iauth_tag, auth_tag, authsize) ? -EBADMSG : 0; +	return crypto_memneq(iauth_tag, auth_tag, authsize) ? -EBADMSG : 0;  }  static void gcm_decrypt_done(struct crypto_async_request *areq, int err) diff --git a/crypto/hash_info.c b/crypto/hash_info.c new file mode 100644 index 00000000000..3e7ff46f26e --- /dev/null +++ b/crypto/hash_info.c @@ -0,0 +1,56 @@ +/* + * Hash Info: Hash algorithms information + * + * Copyright (c) 2013 Dmitry Kasatkin <d.kasatkin@samsung.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ + +#include <linux/export.h> +#include <crypto/hash_info.h> + +const char *const hash_algo_name[HASH_ALGO__LAST] = { +	[HASH_ALGO_MD4]		= "md4", +	[HASH_ALGO_MD5]		= "md5", +	[HASH_ALGO_SHA1]	= "sha1", +	[HASH_ALGO_RIPE_MD_160]	= "rmd160", +	[HASH_ALGO_SHA256]	= "sha256", +	[HASH_ALGO_SHA384]	= "sha384", +	[HASH_ALGO_SHA512]	= "sha512", +	[HASH_ALGO_SHA224]	= "sha224", +	[HASH_ALGO_RIPE_MD_128]	= "rmd128", +	[HASH_ALGO_RIPE_MD_256]	= "rmd256", +	[HASH_ALGO_RIPE_MD_320]	= "rmd320", +	[HASH_ALGO_WP_256]	= "wp256", +	[HASH_ALGO_WP_384]	= "wp384", +	[HASH_ALGO_WP_512]	= "wp512", +	[HASH_ALGO_TGR_128]	= "tgr128", +	[HASH_ALGO_TGR_160]	= "tgr160", +	[HASH_ALGO_TGR_192]	= "tgr192", +}; +EXPORT_SYMBOL_GPL(hash_algo_name); + +const int hash_digest_size[HASH_ALGO__LAST] = { +	[HASH_ALGO_MD4]		= MD5_DIGEST_SIZE, +	[HASH_ALGO_MD5]		= MD5_DIGEST_SIZE, +	[HASH_ALGO_SHA1]	= SHA1_DIGEST_SIZE, +	[HASH_ALGO_RIPE_MD_160]	= RMD160_DIGEST_SIZE, +	[HASH_ALGO_SHA256]	= SHA256_DIGEST_SIZE, +	[HASH_ALGO_SHA384]	= SHA384_DIGEST_SIZE, +	[HASH_ALGO_SHA512]	= SHA512_DIGEST_SIZE, +	[HASH_ALGO_SHA224]	= SHA224_DIGEST_SIZE, +	[HASH_ALGO_RIPE_MD_128]	= RMD128_DIGEST_SIZE, +	[HASH_ALGO_RIPE_MD_256]	= RMD256_DIGEST_SIZE, +	[HASH_ALGO_RIPE_MD_320]	= RMD320_DIGEST_SIZE, +	[HASH_ALGO_WP_256]	= WP256_DIGEST_SIZE, +	[HASH_ALGO_WP_384]	= WP384_DIGEST_SIZE, +	[HASH_ALGO_WP_512]	= WP512_DIGEST_SIZE, +	[HASH_ALGO_TGR_128]	= TGR128_DIGEST_SIZE, +	[HASH_ALGO_TGR_160]	= TGR160_DIGEST_SIZE, +	[HASH_ALGO_TGR_192]	= TGR192_DIGEST_SIZE, +}; +EXPORT_SYMBOL_GPL(hash_digest_size); diff --git a/crypto/memneq.c b/crypto/memneq.c new file mode 100644 index 00000000000..afed1bd16ae --- /dev/null +++ b/crypto/memneq.c @@ -0,0 +1,168 @@ +/* + * Constant-time equality testing of memory regions. + * + * Authors: + * + *   James Yonan <james@openvpn.net> + *   Daniel Borkmann <dborkman@redhat.com> + * + * This file is provided under a dual BSD/GPLv2 license.  When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2013 OpenVPN Technologies, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * BSD LICENSE + * + * Copyright(c) 2013 OpenVPN Technologies, Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + *   * Redistributions of source code must retain the above copyright + *     notice, this list of conditions and the following disclaimer. + *   * Redistributions in binary form must reproduce the above copyright + *     notice, this list of conditions and the following disclaimer in + *     the documentation and/or other materials provided with the + *     distribution. + *   * Neither the name of OpenVPN Technologies nor the names of its + *     contributors may be used to endorse or promote products derived + *     from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <crypto/algapi.h> + +#ifndef __HAVE_ARCH_CRYPTO_MEMNEQ + +/* Generic path for arbitrary size */ +static inline unsigned long +__crypto_memneq_generic(const void *a, const void *b, size_t size) +{ +	unsigned long neq = 0; + +#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) +	while (size >= sizeof(unsigned long)) { +		neq |= *(unsigned long *)a ^ *(unsigned long *)b; +		OPTIMIZER_HIDE_VAR(neq); +		a += sizeof(unsigned long); +		b += sizeof(unsigned long); +		size -= sizeof(unsigned long); +	} +#endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */ +	while (size > 0) { +		neq |= *(unsigned char *)a ^ *(unsigned char *)b; +		OPTIMIZER_HIDE_VAR(neq); +		a += 1; +		b += 1; +		size -= 1; +	} +	return neq; +} + +/* Loop-free fast-path for frequently used 16-byte size */ +static inline unsigned long __crypto_memneq_16(const void *a, const void *b) +{ +	unsigned long neq = 0; + +#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +	if (sizeof(unsigned long) == 8) { +		neq |= *(unsigned long *)(a)   ^ *(unsigned long *)(b); +		OPTIMIZER_HIDE_VAR(neq); +		neq |= *(unsigned long *)(a+8) ^ *(unsigned long *)(b+8); +		OPTIMIZER_HIDE_VAR(neq); +	} else if (sizeof(unsigned int) == 4) { +		neq |= *(unsigned int *)(a)    ^ *(unsigned int *)(b); +		OPTIMIZER_HIDE_VAR(neq); +		neq |= *(unsigned int *)(a+4)  ^ *(unsigned int *)(b+4); +		OPTIMIZER_HIDE_VAR(neq); +		neq |= *(unsigned int *)(a+8)  ^ *(unsigned int *)(b+8); +		OPTIMIZER_HIDE_VAR(neq); +		neq |= *(unsigned int *)(a+12) ^ *(unsigned int *)(b+12); +		OPTIMIZER_HIDE_VAR(neq); +	} else +#endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */ +	{ +		neq |= *(unsigned char *)(a)    ^ *(unsigned char *)(b); +		OPTIMIZER_HIDE_VAR(neq); +		neq |= *(unsigned char *)(a+1)  ^ *(unsigned char *)(b+1); +		OPTIMIZER_HIDE_VAR(neq); +		neq |= *(unsigned char *)(a+2)  ^ *(unsigned char *)(b+2); +		OPTIMIZER_HIDE_VAR(neq); +		neq |= *(unsigned char *)(a+3)  ^ *(unsigned char *)(b+3); +		OPTIMIZER_HIDE_VAR(neq); +		neq |= *(unsigned char *)(a+4)  ^ *(unsigned char *)(b+4); +		OPTIMIZER_HIDE_VAR(neq); +		neq |= *(unsigned char *)(a+5)  ^ *(unsigned char *)(b+5); +		OPTIMIZER_HIDE_VAR(neq); +		neq |= *(unsigned char *)(a+6)  ^ *(unsigned char *)(b+6); +		OPTIMIZER_HIDE_VAR(neq); +		neq |= *(unsigned char *)(a+7)  ^ *(unsigned char *)(b+7); +		OPTIMIZER_HIDE_VAR(neq); +		neq |= *(unsigned char *)(a+8)  ^ *(unsigned char *)(b+8); +		OPTIMIZER_HIDE_VAR(neq); +		neq |= *(unsigned char *)(a+9)  ^ *(unsigned char *)(b+9); +		OPTIMIZER_HIDE_VAR(neq); +		neq |= *(unsigned char *)(a+10) ^ *(unsigned char *)(b+10); +		OPTIMIZER_HIDE_VAR(neq); +		neq |= *(unsigned char *)(a+11) ^ *(unsigned char *)(b+11); +		OPTIMIZER_HIDE_VAR(neq); +		neq |= *(unsigned char *)(a+12) ^ *(unsigned char *)(b+12); +		OPTIMIZER_HIDE_VAR(neq); +		neq |= *(unsigned char *)(a+13) ^ *(unsigned char *)(b+13); +		OPTIMIZER_HIDE_VAR(neq); +		neq |= *(unsigned char *)(a+14) ^ *(unsigned char *)(b+14); +		OPTIMIZER_HIDE_VAR(neq); +		neq |= *(unsigned char *)(a+15) ^ *(unsigned char *)(b+15); +		OPTIMIZER_HIDE_VAR(neq); +	} + +	return neq; +} + +/* Compare two areas of memory without leaking timing information, + * and with special optimizations for common sizes.  Users should + * not call this function directly, but should instead use + * crypto_memneq defined in crypto/algapi.h. + */ +noinline unsigned long __crypto_memneq(const void *a, const void *b, +				       size_t size) +{ +	switch (size) { +	case 16: +		return __crypto_memneq_16(a, b); +	default: +		return __crypto_memneq_generic(a, b, size); +	} +} +EXPORT_SYMBOL(__crypto_memneq); + +#endif /* __HAVE_ARCH_CRYPTO_MEMNEQ */ diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c index f8c920cafe6..309d345ead9 100644 --- a/crypto/pcrypt.c +++ b/crypto/pcrypt.c @@ -78,7 +78,7 @@ static int pcrypt_do_parallel(struct padata_priv *padata, unsigned int *cb_cpu,  	cpu = *cb_cpu;  	rcu_read_lock_bh(); -	cpumask = rcu_dereference(pcrypt->cb_cpumask); +	cpumask = rcu_dereference_bh(pcrypt->cb_cpumask);  	if (cpumask_test_cpu(cpu, cpumask->mask))  			goto out; diff --git a/crypto/shash.c b/crypto/shash.c index 929058a6856..47c713954bf 100644 --- a/crypto/shash.c +++ b/crypto/shash.c @@ -67,7 +67,8 @@ EXPORT_SYMBOL_GPL(crypto_shash_setkey);  static inline unsigned int shash_align_buffer_size(unsigned len,  						   unsigned long mask)  { -	return len + (mask & ~(__alignof__(u8 __attribute__ ((aligned))) - 1)); +	typedef u8 __attribute__ ((aligned)) u8_aligned; +	return len + (mask & ~(__alignof__(u8_aligned) - 1));  }  static int shash_update_unaligned(struct shash_desc *desc, const u8 *data, diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index 25a5934f0e5..ba247cf3085 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -137,7 +137,273 @@ out:  	return ret;  } +static int test_aead_jiffies(struct aead_request *req, int enc, +				int blen, int sec) +{ +	unsigned long start, end; +	int bcount; +	int ret; + +	for (start = jiffies, end = start + sec * HZ, bcount = 0; +	     time_before(jiffies, end); bcount++) { +		if (enc) +			ret = crypto_aead_encrypt(req); +		else +			ret = crypto_aead_decrypt(req); + +		if (ret) +			return ret; +	} + +	printk("%d operations in %d seconds (%ld bytes)\n", +	       bcount, sec, (long)bcount * blen); +	return 0; +} + +static int test_aead_cycles(struct aead_request *req, int enc, int blen) +{ +	unsigned long cycles = 0; +	int ret = 0; +	int i; + +	local_irq_disable(); + +	/* Warm-up run. */ +	for (i = 0; i < 4; i++) { +		if (enc) +			ret = crypto_aead_encrypt(req); +		else +			ret = crypto_aead_decrypt(req); + +		if (ret) +			goto out; +	} + +	/* The real thing. */ +	for (i = 0; i < 8; i++) { +		cycles_t start, end; + +		start = get_cycles(); +		if (enc) +			ret = crypto_aead_encrypt(req); +		else +			ret = crypto_aead_decrypt(req); +		end = get_cycles(); + +		if (ret) +			goto out; + +		cycles += end - start; +	} + +out: +	local_irq_enable(); + +	if (ret == 0) +		printk("1 operation in %lu cycles (%d bytes)\n", +		       (cycles + 4) / 8, blen); + +	return ret; +} +  static u32 block_sizes[] = { 16, 64, 256, 1024, 8192, 0 }; +static u32 aead_sizes[] = { 16, 64, 256, 512, 1024, 2048, 4096, 8192, 0 }; + +#define XBUFSIZE 8 +#define MAX_IVLEN 32 + +static int testmgr_alloc_buf(char *buf[XBUFSIZE]) +{ +	int i; + +	for (i = 0; i < XBUFSIZE; i++) { +		buf[i] = (void *)__get_free_page(GFP_KERNEL); +		if (!buf[i]) +			goto err_free_buf; +	} + +	return 0; + +err_free_buf: +	while (i-- > 0) +		free_page((unsigned long)buf[i]); + +	return -ENOMEM; +} + +static void testmgr_free_buf(char *buf[XBUFSIZE]) +{ +	int i; + +	for (i = 0; i < XBUFSIZE; i++) +		free_page((unsigned long)buf[i]); +} + +static void sg_init_aead(struct scatterlist *sg, char *xbuf[XBUFSIZE], +			unsigned int buflen) +{ +	int np = (buflen + PAGE_SIZE - 1)/PAGE_SIZE; +	int k, rem; + +	np = (np > XBUFSIZE) ? XBUFSIZE : np; +	rem = buflen % PAGE_SIZE; +	if (np > XBUFSIZE) { +		rem = PAGE_SIZE; +		np = XBUFSIZE; +	} +	sg_init_table(sg, np); +	for (k = 0; k < np; ++k) { +		if (k == (np-1)) +			sg_set_buf(&sg[k], xbuf[k], rem); +		else +			sg_set_buf(&sg[k], xbuf[k], PAGE_SIZE); +	} +} + +static void test_aead_speed(const char *algo, int enc, unsigned int sec, +			    struct aead_speed_template *template, +			    unsigned int tcount, u8 authsize, +			    unsigned int aad_size, u8 *keysize) +{ +	unsigned int i, j; +	struct crypto_aead *tfm; +	int ret = -ENOMEM; +	const char *key; +	struct aead_request *req; +	struct scatterlist *sg; +	struct scatterlist *asg; +	struct scatterlist *sgout; +	const char *e; +	void *assoc; +	char iv[MAX_IVLEN]; +	char *xbuf[XBUFSIZE]; +	char *xoutbuf[XBUFSIZE]; +	char *axbuf[XBUFSIZE]; +	unsigned int *b_size; +	unsigned int iv_len; + +	if (aad_size >= PAGE_SIZE) { +		pr_err("associate data length (%u) too big\n", aad_size); +		return; +	} + +	if (enc == ENCRYPT) +		e = "encryption"; +	else +		e = "decryption"; + +	if (testmgr_alloc_buf(xbuf)) +		goto out_noxbuf; +	if (testmgr_alloc_buf(axbuf)) +		goto out_noaxbuf; +	if (testmgr_alloc_buf(xoutbuf)) +		goto out_nooutbuf; + +	sg = kmalloc(sizeof(*sg) * 8 * 3, GFP_KERNEL); +	if (!sg) +		goto out_nosg; +	asg = &sg[8]; +	sgout = &asg[8]; + + +	printk(KERN_INFO "\ntesting speed of %s %s\n", algo, e); + +	tfm = crypto_alloc_aead(algo, 0, 0); + +	if (IS_ERR(tfm)) { +		pr_err("alg: aead: Failed to load transform for %s: %ld\n", algo, +		       PTR_ERR(tfm)); +		goto out_notfm; +	} + +	req = aead_request_alloc(tfm, GFP_KERNEL); +	if (!req) { +		pr_err("alg: aead: Failed to allocate request for %s\n", +		       algo); +		goto out_noreq; +	} + +	i = 0; +	do { +		b_size = aead_sizes; +		do { +			assoc = axbuf[0]; +			memset(assoc, 0xff, aad_size); +			sg_init_one(&asg[0], assoc, aad_size); + +			if ((*keysize + *b_size) > TVMEMSIZE * PAGE_SIZE) { +				pr_err("template (%u) too big for tvmem (%lu)\n", +				       *keysize + *b_size, +					TVMEMSIZE * PAGE_SIZE); +				goto out; +			} + +			key = tvmem[0]; +			for (j = 0; j < tcount; j++) { +				if (template[j].klen == *keysize) { +					key = template[j].key; +					break; +				} +			} +			ret = crypto_aead_setkey(tfm, key, *keysize); +			ret = crypto_aead_setauthsize(tfm, authsize); + +			iv_len = crypto_aead_ivsize(tfm); +			if (iv_len) +				memset(&iv, 0xff, iv_len); + +			crypto_aead_clear_flags(tfm, ~0); +			printk(KERN_INFO "test %u (%d bit key, %d byte blocks): ", +					i, *keysize * 8, *b_size); + + +			memset(tvmem[0], 0xff, PAGE_SIZE); + +			if (ret) { +				pr_err("setkey() failed flags=%x\n", +						crypto_aead_get_flags(tfm)); +				goto out; +			} + +			sg_init_aead(&sg[0], xbuf, +				    *b_size + (enc ? authsize : 0)); + +			sg_init_aead(&sgout[0], xoutbuf, +				    *b_size + (enc ? authsize : 0)); + +			aead_request_set_crypt(req, sg, sgout, *b_size, iv); +			aead_request_set_assoc(req, asg, aad_size); + +			if (sec) +				ret = test_aead_jiffies(req, enc, *b_size, sec); +			else +				ret = test_aead_cycles(req, enc, *b_size); + +			if (ret) { +				pr_err("%s() failed return code=%d\n", e, ret); +				break; +			} +			b_size++; +			i++; +		} while (*b_size); +		keysize++; +	} while (*keysize); + +out: +	aead_request_free(req); +out_noreq: +	crypto_free_aead(tfm); +out_notfm: +	kfree(sg); +out_nosg: +	testmgr_free_buf(xoutbuf); +out_nooutbuf: +	testmgr_free_buf(axbuf); +out_noaxbuf: +	testmgr_free_buf(xbuf); +out_noxbuf: +	return; +}  static void test_cipher_speed(const char *algo, int enc, unsigned int sec,  			      struct cipher_speed_template *template, @@ -493,7 +759,7 @@ static inline int do_one_ahash_op(struct ahash_request *req, int ret)  		ret = wait_for_completion_interruptible(&tr->completion);  		if (!ret)  			ret = tr->err; -		INIT_COMPLETION(tr->completion); +		reinit_completion(&tr->completion);  	}  	return ret;  } @@ -721,7 +987,7 @@ static inline int do_one_acipher_op(struct ablkcipher_request *req, int ret)  		ret = wait_for_completion_interruptible(&tr->completion);  		if (!ret)  			ret = tr->err; -		INIT_COMPLETION(tr->completion); +		reinit_completion(&tr->completion);  	}  	return ret; @@ -1242,6 +1508,47 @@ static int do_test(int m)  		ret += tcrypt_test("cmac(des3_ede)");  		break; +	case 155: +		ret += tcrypt_test("authenc(hmac(sha1),cbc(aes))"); +		break; + +	case 156: +		ret += tcrypt_test("authenc(hmac(md5),ecb(cipher_null))"); +		break; + +	case 157: +		ret += tcrypt_test("authenc(hmac(sha1),ecb(cipher_null))"); +		break; +	case 181: +		ret += tcrypt_test("authenc(hmac(sha1),cbc(des))"); +		break; +	case 182: +		ret += tcrypt_test("authenc(hmac(sha1),cbc(des3_ede))"); +		break; +	case 183: +		ret += tcrypt_test("authenc(hmac(sha224),cbc(des))"); +		break; +	case 184: +		ret += tcrypt_test("authenc(hmac(sha224),cbc(des3_ede))"); +		break; +	case 185: +		ret += tcrypt_test("authenc(hmac(sha256),cbc(des))"); +		break; +	case 186: +		ret += tcrypt_test("authenc(hmac(sha256),cbc(des3_ede))"); +		break; +	case 187: +		ret += tcrypt_test("authenc(hmac(sha384),cbc(des))"); +		break; +	case 188: +		ret += tcrypt_test("authenc(hmac(sha384),cbc(des3_ede))"); +		break; +	case 189: +		ret += tcrypt_test("authenc(hmac(sha512),cbc(des))"); +		break; +	case 190: +		ret += tcrypt_test("authenc(hmac(sha512),cbc(des3_ede))"); +		break;  	case 200:  		test_cipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0,  				speed_template_16_24_32); @@ -1423,6 +1730,11 @@ static int do_test(int m)  				  speed_template_32_64);  		break; +	case 211: +		test_aead_speed("rfc4106(gcm(aes))", ENCRYPT, sec, +				NULL, 0, 16, 8, aead_speed_template_20); +		break; +  	case 300:  		/* fall through */ diff --git a/crypto/tcrypt.h b/crypto/tcrypt.h index ecdeeb1a7b0..6c7e21a09f7 100644 --- a/crypto/tcrypt.h +++ b/crypto/tcrypt.h @@ -22,6 +22,11 @@ struct cipher_speed_template {  	unsigned int klen;  }; +struct aead_speed_template { +	const char *key; +	unsigned int klen; +}; +  struct hash_speed {  	unsigned int blen;	/* buffer length */  	unsigned int plen;	/* per-update length */ @@ -58,6 +63,11 @@ static u8 speed_template_32_48_64[] = {32, 48, 64, 0};  static u8 speed_template_32_64[] = {32, 64, 0};  /* + * AEAD speed tests + */ +static u8 aead_speed_template_20[] = {20, 0}; + +/*   * Digest speed tests   */  static struct hash_speed generic_hash_speed_template[] = { diff --git a/crypto/testmgr.c b/crypto/testmgr.c index e091ef6e179..498649ac195 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -179,7 +179,7 @@ static int do_one_async_hash_op(struct ahash_request *req,  		ret = wait_for_completion_interruptible(&tr->completion);  		if (!ret)  			ret = tr->err; -		INIT_COMPLETION(tr->completion); +		reinit_completion(&tr->completion);  	}  	return ret;  } @@ -336,7 +336,7 @@ static int __test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,  				ret = wait_for_completion_interruptible(  					&tresult.completion);  				if (!ret && !(ret = tresult.err)) { -					INIT_COMPLETION(tresult.completion); +					reinit_completion(&tresult.completion);  					break;  				}  				/* fall through */ @@ -414,16 +414,18 @@ static int __test_aead(struct crypto_aead *tfm, int enc,  	void *input;  	void *output;  	void *assoc; -	char iv[MAX_IVLEN]; +	char *iv;  	char *xbuf[XBUFSIZE];  	char *xoutbuf[XBUFSIZE];  	char *axbuf[XBUFSIZE]; +	iv = kzalloc(MAX_IVLEN, GFP_KERNEL); +	if (!iv) +		return ret;  	if (testmgr_alloc_buf(xbuf))  		goto out_noxbuf;  	if (testmgr_alloc_buf(axbuf))  		goto out_noaxbuf; -  	if (diff_dst && testmgr_alloc_buf(xoutbuf))  		goto out_nooutbuf; @@ -503,16 +505,16 @@ static int __test_aead(struct crypto_aead *tfm, int enc,  				goto out;  			} -			sg_init_one(&sg[0], input, -				    template[i].ilen + (enc ? authsize : 0)); -  			if (diff_dst) {  				output = xoutbuf[0];  				output += align_offset; +				sg_init_one(&sg[0], input, template[i].ilen);  				sg_init_one(&sgout[0], output, +					    template[i].rlen); +			} else { +				sg_init_one(&sg[0], input,  					    template[i].ilen +  						(enc ? authsize : 0)); -			} else {  				output = input;  			} @@ -543,7 +545,7 @@ static int __test_aead(struct crypto_aead *tfm, int enc,  				ret = wait_for_completion_interruptible(  					&result.completion);  				if (!ret && !(ret = result.err)) { -					INIT_COMPLETION(result.completion); +					reinit_completion(&result.completion);  					break;  				}  			case -EBADMSG: @@ -612,12 +614,6 @@ static int __test_aead(struct crypto_aead *tfm, int enc,  				memcpy(q, template[i].input + temp,  				       template[i].tap[k]); -				n = template[i].tap[k]; -				if (k == template[i].np - 1 && enc) -					n += authsize; -				if (offset_in_page(q) + n < PAGE_SIZE) -					q[n] = 0; -  				sg_set_buf(&sg[k], q, template[i].tap[k]);  				if (diff_dst) { @@ -625,13 +621,17 @@ static int __test_aead(struct crypto_aead *tfm, int enc,  					    offset_in_page(IDX[k]);  					memset(q, 0, template[i].tap[k]); -					if (offset_in_page(q) + n < PAGE_SIZE) -						q[n] = 0;  					sg_set_buf(&sgout[k], q,  						   template[i].tap[k]);  				} +				n = template[i].tap[k]; +				if (k == template[i].np - 1 && enc) +					n += authsize; +				if (offset_in_page(q) + n < PAGE_SIZE) +					q[n] = 0; +  				temp += template[i].tap[k];  			} @@ -650,10 +650,10 @@ static int __test_aead(struct crypto_aead *tfm, int enc,  					goto out;  				} -				sg[k - 1].length += authsize; -  				if (diff_dst)  					sgout[k - 1].length += authsize; +				else +					sg[k - 1].length += authsize;  			}  			sg_init_table(asg, template[i].anp); @@ -697,7 +697,7 @@ static int __test_aead(struct crypto_aead *tfm, int enc,  				ret = wait_for_completion_interruptible(  					&result.completion);  				if (!ret && !(ret = result.err)) { -					INIT_COMPLETION(result.completion); +					reinit_completion(&result.completion);  					break;  				}  			case -EBADMSG: @@ -769,6 +769,7 @@ out_nooutbuf:  out_noaxbuf:  	testmgr_free_buf(xbuf);  out_noxbuf: +	kfree(iv);  	return ret;  } @@ -983,7 +984,7 @@ static int __test_skcipher(struct crypto_ablkcipher *tfm, int enc,  				ret = wait_for_completion_interruptible(  					&result.completion);  				if (!ret && !((ret = result.err))) { -					INIT_COMPLETION(result.completion); +					reinit_completion(&result.completion);  					break;  				}  				/* fall through */ @@ -1086,7 +1087,7 @@ static int __test_skcipher(struct crypto_ablkcipher *tfm, int enc,  				ret = wait_for_completion_interruptible(  					&result.completion);  				if (!ret && !((ret = result.err))) { -					INIT_COMPLETION(result.completion); +					reinit_completion(&result.completion);  					break;  				}  				/* fall through */ @@ -1811,14 +1812,108 @@ static const struct alg_test_desc alg_test_descs[] = {  			}  		}  	}, { +		.alg = "authenc(hmac(md5),ecb(cipher_null))", +		.test = alg_test_aead, +		.fips_allowed = 1, +		.suite = { +			.aead = { +				.enc = { +					.vecs = hmac_md5_ecb_cipher_null_enc_tv_template, +					.count = HMAC_MD5_ECB_CIPHER_NULL_ENC_TEST_VECTORS +				}, +				.dec = { +					.vecs = hmac_md5_ecb_cipher_null_dec_tv_template, +					.count = HMAC_MD5_ECB_CIPHER_NULL_DEC_TEST_VECTORS +				} +			} +		} +	}, {  		.alg = "authenc(hmac(sha1),cbc(aes))",  		.test = alg_test_aead,  		.fips_allowed = 1,  		.suite = {  			.aead = {  				.enc = { -					.vecs = hmac_sha1_aes_cbc_enc_tv_template, -					.count = HMAC_SHA1_AES_CBC_ENC_TEST_VECTORS +					.vecs = +					hmac_sha1_aes_cbc_enc_tv_temp, +					.count = +					HMAC_SHA1_AES_CBC_ENC_TEST_VEC +				} +			} +		} +	}, { +		.alg = "authenc(hmac(sha1),cbc(des))", +		.test = alg_test_aead, +		.fips_allowed = 1, +		.suite = { +			.aead = { +				.enc = { +					.vecs = +					hmac_sha1_des_cbc_enc_tv_temp, +					.count = +					HMAC_SHA1_DES_CBC_ENC_TEST_VEC +				} +			} +		} +	}, { +		.alg = "authenc(hmac(sha1),cbc(des3_ede))", +		.test = alg_test_aead, +		.fips_allowed = 1, +		.suite = { +			.aead = { +				.enc = { +					.vecs = +					hmac_sha1_des3_ede_cbc_enc_tv_temp, +					.count = +					HMAC_SHA1_DES3_EDE_CBC_ENC_TEST_VEC +				} +			} +		} +	}, { +		.alg = "authenc(hmac(sha1),ecb(cipher_null))", +		.test = alg_test_aead, +		.fips_allowed = 1, +		.suite = { +			.aead = { +				.enc = { +					.vecs = +					hmac_sha1_ecb_cipher_null_enc_tv_temp, +					.count = +					HMAC_SHA1_ECB_CIPHER_NULL_ENC_TEST_VEC +				}, +				.dec = { +					.vecs = +					hmac_sha1_ecb_cipher_null_dec_tv_temp, +					.count = +					HMAC_SHA1_ECB_CIPHER_NULL_DEC_TEST_VEC +				} +			} +		} +	}, { +		.alg = "authenc(hmac(sha224),cbc(des))", +		.test = alg_test_aead, +		.fips_allowed = 1, +		.suite = { +			.aead = { +				.enc = { +					.vecs = +					hmac_sha224_des_cbc_enc_tv_temp, +					.count = +					HMAC_SHA224_DES_CBC_ENC_TEST_VEC +				} +			} +		} +	}, { +		.alg = "authenc(hmac(sha224),cbc(des3_ede))", +		.test = alg_test_aead, +		.fips_allowed = 1, +		.suite = { +			.aead = { +				.enc = { +					.vecs = +					hmac_sha224_des3_ede_cbc_enc_tv_temp, +					.count = +					HMAC_SHA224_DES3_EDE_CBC_ENC_TEST_VEC  				}  			}  		} @@ -1829,8 +1924,66 @@ static const struct alg_test_desc alg_test_descs[] = {  		.suite = {  			.aead = {  				.enc = { -					.vecs = hmac_sha256_aes_cbc_enc_tv_template, -					.count = HMAC_SHA256_AES_CBC_ENC_TEST_VECTORS +					.vecs = +					hmac_sha256_aes_cbc_enc_tv_temp, +					.count = +					HMAC_SHA256_AES_CBC_ENC_TEST_VEC +				} +			} +		} +	}, { +		.alg = "authenc(hmac(sha256),cbc(des))", +		.test = alg_test_aead, +		.fips_allowed = 1, +		.suite = { +			.aead = { +				.enc = { +					.vecs = +					hmac_sha256_des_cbc_enc_tv_temp, +					.count = +					HMAC_SHA256_DES_CBC_ENC_TEST_VEC +				} +			} +		} +	}, { +		.alg = "authenc(hmac(sha256),cbc(des3_ede))", +		.test = alg_test_aead, +		.fips_allowed = 1, +		.suite = { +			.aead = { +				.enc = { +					.vecs = +					hmac_sha256_des3_ede_cbc_enc_tv_temp, +					.count = +					HMAC_SHA256_DES3_EDE_CBC_ENC_TEST_VEC +				} +			} +		} +	}, { +		.alg = "authenc(hmac(sha384),cbc(des))", +		.test = alg_test_aead, +		.fips_allowed = 1, +		.suite = { +			.aead = { +				.enc = { +					.vecs = +					hmac_sha384_des_cbc_enc_tv_temp, +					.count = +					HMAC_SHA384_DES_CBC_ENC_TEST_VEC +				} +			} +		} +	}, { +		.alg = "authenc(hmac(sha384),cbc(des3_ede))", +		.test = alg_test_aead, +		.fips_allowed = 1, +		.suite = { +			.aead = { +				.enc = { +					.vecs = +					hmac_sha384_des3_ede_cbc_enc_tv_temp, +					.count = +					HMAC_SHA384_DES3_EDE_CBC_ENC_TEST_VEC  				}  			}  		} @@ -1841,8 +1994,38 @@ static const struct alg_test_desc alg_test_descs[] = {  		.suite = {  			.aead = {  				.enc = { -					.vecs = hmac_sha512_aes_cbc_enc_tv_template, -					.count = HMAC_SHA512_AES_CBC_ENC_TEST_VECTORS +					.vecs = +					hmac_sha512_aes_cbc_enc_tv_temp, +					.count = +					HMAC_SHA512_AES_CBC_ENC_TEST_VEC +				} +			} +		} +	}, { +		.alg = "authenc(hmac(sha512),cbc(des))", +		.test = alg_test_aead, +		.fips_allowed = 1, +		.suite = { +			.aead = { +				.enc = { +					.vecs = +					hmac_sha512_des_cbc_enc_tv_temp, +					.count = +					HMAC_SHA512_DES_CBC_ENC_TEST_VEC +				} +			} +		} +	}, { +		.alg = "authenc(hmac(sha512),cbc(des3_ede))", +		.test = alg_test_aead, +		.fips_allowed = 1, +		.suite = { +			.aead = { +				.enc = { +					.vecs = +					hmac_sha512_des3_ede_cbc_enc_tv_temp, +					.count = +					HMAC_SHA512_DES3_EDE_CBC_ENC_TEST_VEC  				}  			}  		} @@ -3243,8 +3426,8 @@ test_done:  		panic("%s: %s alg self test failed in fips mode!\n", driver, alg);  	if (fips_enabled && !rc) -		printk(KERN_INFO "alg: self-tests for %s (%s) passed\n", -		       driver, alg); +		pr_info(KERN_INFO "alg: self-tests for %s (%s) passed\n", +			driver, alg);  	return rc; diff --git a/crypto/testmgr.h b/crypto/testmgr.h index 7d44aa3d6b4..69d0dd8ef27 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h @@ -487,10 +487,15 @@ static struct hash_testvec crct10dif_tv_template[] = {   * SHA1 test vectors  from from FIPS PUB 180-1   * Long vector from CAVS 5.0   */ -#define SHA1_TEST_VECTORS	3 +#define SHA1_TEST_VECTORS	6  static struct hash_testvec sha1_tv_template[] = {  	{ +		.plaintext = "", +		.psize	= 0, +		.digest	= "\xda\x39\xa3\xee\x5e\x6b\x4b\x0d\x32\x55" +			  "\xbf\xef\x95\x60\x18\x90\xaf\xd8\x07\x09", +	}, {  		.plaintext = "abc",  		.psize	= 3,  		.digest	= "\xa9\x99\x3e\x36\x47\x06\x81\x6a\xba\x3e" @@ -529,6 +534,144 @@ static struct hash_testvec sha1_tv_template[] = {  			  "\x45\x9c\x02\xb6\x9b\x4a\xa8\xf5\x82\x17",  		.np	= 4,  		.tap	= { 63, 64, 31, 5 } +	}, { +		.plaintext = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+-", +		.psize	= 64, +		.digest = "\xc8\x71\xf6\x9a\x63\xcc\xa9\x84\x84\x82" +			  "\x64\xe7\x79\x95\x5d\xd7\x19\x41\x7c\x91", +	}, { +		.plaintext = "\x08\x9f\x13\xaa\x41\xd8\x4c\xe3" +			     "\x7a\x11\x85\x1c\xb3\x27\xbe\x55" +			     "\xec\x60\xf7\x8e\x02\x99\x30\xc7" +			     "\x3b\xd2\x69\x00\x74\x0b\xa2\x16" +			     "\xad\x44\xdb\x4f\xe6\x7d\x14\x88" +			     "\x1f\xb6\x2a\xc1\x58\xef\x63\xfa" +			     "\x91\x05\x9c\x33\xca\x3e\xd5\x6c" +			     "\x03\x77\x0e\xa5\x19\xb0\x47\xde" +			     "\x52\xe9\x80\x17\x8b\x22\xb9\x2d" +			     "\xc4\x5b\xf2\x66\xfd\x94\x08\x9f" +			     "\x36\xcd\x41\xd8\x6f\x06\x7a\x11" +			     "\xa8\x1c\xb3\x4a\xe1\x55\xec\x83" +			     "\x1a\x8e\x25\xbc\x30\xc7\x5e\xf5" +			     "\x69\x00\x97\x0b\xa2\x39\xd0\x44" +			     "\xdb\x72\x09\x7d\x14\xab\x1f\xb6" +			     "\x4d\xe4\x58\xef\x86\x1d\x91\x28" +			     "\xbf\x33\xca\x61\xf8\x6c\x03\x9a" +			     "\x0e\xa5\x3c\xd3\x47\xde\x75\x0c" +			     "\x80\x17\xae\x22\xb9\x50\xe7\x5b" +			     "\xf2\x89\x20\x94\x2b\xc2\x36\xcd" +			     "\x64\xfb\x6f\x06\x9d\x11\xa8\x3f" +			     "\xd6\x4a\xe1\x78\x0f\x83\x1a\xb1" +			     "\x25\xbc\x53\xea\x5e\xf5\x8c\x00" +			     "\x97\x2e\xc5\x39\xd0\x67\xfe\x72" +			     "\x09\xa0\x14\xab\x42\xd9\x4d\xe4" +			     "\x7b\x12\x86\x1d\xb4\x28\xbf\x56" +			     "\xed\x61\xf8\x8f\x03\x9a\x31\xc8" +			     "\x3c\xd3\x6a\x01\x75\x0c\xa3\x17" +			     "\xae\x45\xdc\x50\xe7\x7e\x15\x89" +			     "\x20\xb7\x2b\xc2\x59\xf0\x64\xfb" +			     "\x92\x06\x9d\x34\xcb\x3f\xd6\x6d" +			     "\x04\x78\x0f\xa6\x1a\xb1\x48\xdf" +			     "\x53\xea\x81\x18\x8c\x23\xba\x2e" +			     "\xc5\x5c\xf3\x67\xfe\x95\x09\xa0" +			     "\x37\xce\x42\xd9\x70\x07\x7b\x12" +			     "\xa9\x1d\xb4\x4b\xe2\x56\xed\x84" +			     "\x1b\x8f\x26\xbd\x31\xc8\x5f\xf6" +			     "\x6a\x01\x98\x0c\xa3\x3a\xd1\x45" +			     "\xdc\x73\x0a\x7e\x15\xac\x20\xb7" +			     "\x4e\xe5\x59\xf0\x87\x1e\x92\x29" +			     "\xc0\x34\xcb\x62\xf9\x6d\x04\x9b" +			     "\x0f\xa6\x3d\xd4\x48\xdf\x76\x0d" +			     "\x81\x18\xaf\x23\xba\x51\xe8\x5c" +			     "\xf3\x8a\x21\x95\x2c\xc3\x37\xce" +			     "\x65\xfc\x70\x07\x9e\x12\xa9\x40" +			     "\xd7\x4b\xe2\x79\x10\x84\x1b\xb2" +			     "\x26\xbd\x54\xeb\x5f\xf6\x8d\x01" +			     "\x98\x2f\xc6\x3a\xd1\x68\xff\x73" +			     "\x0a\xa1\x15\xac\x43\xda\x4e\xe5" +			     "\x7c\x13\x87\x1e\xb5\x29\xc0\x57" +			     "\xee\x62\xf9\x90\x04\x9b\x32\xc9" +			     "\x3d\xd4\x6b\x02\x76\x0d\xa4\x18" +			     "\xaf\x46\xdd\x51\xe8\x7f\x16\x8a" +			     "\x21\xb8\x2c\xc3\x5a\xf1\x65\xfc" +			     "\x93\x07\x9e\x35\xcc\x40\xd7\x6e" +			     "\x05\x79\x10\xa7\x1b\xb2\x49\xe0" +			     "\x54\xeb\x82\x19\x8d\x24\xbb\x2f" +			     "\xc6\x5d\xf4\x68\xff\x96\x0a\xa1" +			     "\x38\xcf\x43\xda\x71\x08\x7c\x13" +			     "\xaa\x1e\xb5\x4c\xe3\x57\xee\x85" +			     "\x1c\x90\x27\xbe\x32\xc9\x60\xf7" +			     "\x6b\x02\x99\x0d\xa4\x3b\xd2\x46" +			     "\xdd\x74\x0b\x7f\x16\xad\x21\xb8" +			     "\x4f\xe6\x5a\xf1\x88\x1f\x93\x2a" +			     "\xc1\x35\xcc\x63\xfa\x6e\x05\x9c" +			     "\x10\xa7\x3e\xd5\x49\xe0\x77\x0e" +			     "\x82\x19\xb0\x24\xbb\x52\xe9\x5d" +			     "\xf4\x8b\x22\x96\x2d\xc4\x38\xcf" +			     "\x66\xfd\x71\x08\x9f\x13\xaa\x41" +			     "\xd8\x4c\xe3\x7a\x11\x85\x1c\xb3" +			     "\x27\xbe\x55\xec\x60\xf7\x8e\x02" +			     "\x99\x30\xc7\x3b\xd2\x69\x00\x74" +			     "\x0b\xa2\x16\xad\x44\xdb\x4f\xe6" +			     "\x7d\x14\x88\x1f\xb6\x2a\xc1\x58" +			     "\xef\x63\xfa\x91\x05\x9c\x33\xca" +			     "\x3e\xd5\x6c\x03\x77\x0e\xa5\x19" +			     "\xb0\x47\xde\x52\xe9\x80\x17\x8b" +			     "\x22\xb9\x2d\xc4\x5b\xf2\x66\xfd" +			     "\x94\x08\x9f\x36\xcd\x41\xd8\x6f" +			     "\x06\x7a\x11\xa8\x1c\xb3\x4a\xe1" +			     "\x55\xec\x83\x1a\x8e\x25\xbc\x30" +			     "\xc7\x5e\xf5\x69\x00\x97\x0b\xa2" +			     "\x39\xd0\x44\xdb\x72\x09\x7d\x14" +			     "\xab\x1f\xb6\x4d\xe4\x58\xef\x86" +			     "\x1d\x91\x28\xbf\x33\xca\x61\xf8" +			     "\x6c\x03\x9a\x0e\xa5\x3c\xd3\x47" +			     "\xde\x75\x0c\x80\x17\xae\x22\xb9" +			     "\x50\xe7\x5b\xf2\x89\x20\x94\x2b" +			     "\xc2\x36\xcd\x64\xfb\x6f\x06\x9d" +			     "\x11\xa8\x3f\xd6\x4a\xe1\x78\x0f" +			     "\x83\x1a\xb1\x25\xbc\x53\xea\x5e" +			     "\xf5\x8c\x00\x97\x2e\xc5\x39\xd0" +			     "\x67\xfe\x72\x09\xa0\x14\xab\x42" +			     "\xd9\x4d\xe4\x7b\x12\x86\x1d\xb4" +			     "\x28\xbf\x56\xed\x61\xf8\x8f\x03" +			     "\x9a\x31\xc8\x3c\xd3\x6a\x01\x75" +			     "\x0c\xa3\x17\xae\x45\xdc\x50\xe7" +			     "\x7e\x15\x89\x20\xb7\x2b\xc2\x59" +			     "\xf0\x64\xfb\x92\x06\x9d\x34\xcb" +			     "\x3f\xd6\x6d\x04\x78\x0f\xa6\x1a" +			     "\xb1\x48\xdf\x53\xea\x81\x18\x8c" +			     "\x23\xba\x2e\xc5\x5c\xf3\x67\xfe" +			     "\x95\x09\xa0\x37\xce\x42\xd9\x70" +			     "\x07\x7b\x12\xa9\x1d\xb4\x4b\xe2" +			     "\x56\xed\x84\x1b\x8f\x26\xbd\x31" +			     "\xc8\x5f\xf6\x6a\x01\x98\x0c\xa3" +			     "\x3a\xd1\x45\xdc\x73\x0a\x7e\x15" +			     "\xac\x20\xb7\x4e\xe5\x59\xf0\x87" +			     "\x1e\x92\x29\xc0\x34\xcb\x62\xf9" +			     "\x6d\x04\x9b\x0f\xa6\x3d\xd4\x48" +			     "\xdf\x76\x0d\x81\x18\xaf\x23\xba" +			     "\x51\xe8\x5c\xf3\x8a\x21\x95\x2c" +			     "\xc3\x37\xce\x65\xfc\x70\x07\x9e" +			     "\x12\xa9\x40\xd7\x4b\xe2\x79\x10" +			     "\x84\x1b\xb2\x26\xbd\x54\xeb\x5f" +			     "\xf6\x8d\x01\x98\x2f\xc6\x3a\xd1" +			     "\x68\xff\x73\x0a\xa1\x15\xac\x43" +			     "\xda\x4e\xe5\x7c\x13\x87\x1e\xb5" +			     "\x29\xc0\x57\xee\x62\xf9\x90\x04" +			     "\x9b\x32\xc9\x3d\xd4\x6b\x02\x76" +			     "\x0d\xa4\x18\xaf\x46\xdd\x51\xe8" +			     "\x7f\x16\x8a\x21\xb8\x2c\xc3\x5a" +			     "\xf1\x65\xfc\x93\x07\x9e\x35\xcc" +			     "\x40\xd7\x6e\x05\x79\x10\xa7\x1b" +			     "\xb2\x49\xe0\x54\xeb\x82\x19\x8d" +			     "\x24\xbb\x2f\xc6\x5d\xf4\x68\xff" +			     "\x96\x0a\xa1\x38\xcf\x43\xda\x71" +			     "\x08\x7c\x13\xaa\x1e\xb5\x4c", +		.psize     = 1023, +		.digest    = "\xb8\xe3\x54\xed\xc5\xfc\xef\xa4" +			     "\x55\x73\x4a\x81\x99\xe4\x47\x2a" +			     "\x30\xd6\xc9\x85",  	}  }; @@ -536,10 +679,17 @@ static struct hash_testvec sha1_tv_template[] = {  /*   * SHA224 test vectors from from FIPS PUB 180-2   */ -#define SHA224_TEST_VECTORS     2 +#define SHA224_TEST_VECTORS     5  static struct hash_testvec sha224_tv_template[] = {  	{ +		.plaintext = "", +		.psize	= 0, +		.digest	= "\xd1\x4a\x02\x8c\x2a\x3a\x2b\xc9" +			  "\x47\x61\x02\xbb\x28\x82\x34\xc4" +			  "\x15\xa2\xb0\x1f\x82\x8e\xa6\x2a" +			  "\xc5\xb3\xe4\x2f", +	}, {  		.plaintext = "abc",  		.psize  = 3,  		.digest = "\x23\x09\x7D\x22\x34\x05\xD8\x22" @@ -556,16 +706,164 @@ static struct hash_testvec sha224_tv_template[] = {  			  "\x52\x52\x25\x25",  		.np     = 2,  		.tap    = { 28, 28 } +	}, { +		.plaintext = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+-", +		.psize	= 64, +		.digest = "\xc4\xdb\x2b\x3a\x58\xc3\x99\x01" +			  "\x42\xfd\x10\x92\xaa\x4e\x04\x08" +			  "\x58\xbb\xbb\xe8\xf8\x14\xa7\x0c" +			  "\xef\x3b\xcb\x0e", +	}, { +		.plaintext = "\x08\x9f\x13\xaa\x41\xd8\x4c\xe3" +			     "\x7a\x11\x85\x1c\xb3\x27\xbe\x55" +			     "\xec\x60\xf7\x8e\x02\x99\x30\xc7" +			     "\x3b\xd2\x69\x00\x74\x0b\xa2\x16" +			     "\xad\x44\xdb\x4f\xe6\x7d\x14\x88" +			     "\x1f\xb6\x2a\xc1\x58\xef\x63\xfa" +			     "\x91\x05\x9c\x33\xca\x3e\xd5\x6c" +			     "\x03\x77\x0e\xa5\x19\xb0\x47\xde" +			     "\x52\xe9\x80\x17\x8b\x22\xb9\x2d" +			     "\xc4\x5b\xf2\x66\xfd\x94\x08\x9f" +			     "\x36\xcd\x41\xd8\x6f\x06\x7a\x11" +			     "\xa8\x1c\xb3\x4a\xe1\x55\xec\x83" +			     "\x1a\x8e\x25\xbc\x30\xc7\x5e\xf5" +			     "\x69\x00\x97\x0b\xa2\x39\xd0\x44" +			     "\xdb\x72\x09\x7d\x14\xab\x1f\xb6" +			     "\x4d\xe4\x58\xef\x86\x1d\x91\x28" +			     "\xbf\x33\xca\x61\xf8\x6c\x03\x9a" +			     "\x0e\xa5\x3c\xd3\x47\xde\x75\x0c" +			     "\x80\x17\xae\x22\xb9\x50\xe7\x5b" +			     "\xf2\x89\x20\x94\x2b\xc2\x36\xcd" +			     "\x64\xfb\x6f\x06\x9d\x11\xa8\x3f" +			     "\xd6\x4a\xe1\x78\x0f\x83\x1a\xb1" +			     "\x25\xbc\x53\xea\x5e\xf5\x8c\x00" +			     "\x97\x2e\xc5\x39\xd0\x67\xfe\x72" +			     "\x09\xa0\x14\xab\x42\xd9\x4d\xe4" +			     "\x7b\x12\x86\x1d\xb4\x28\xbf\x56" +			     "\xed\x61\xf8\x8f\x03\x9a\x31\xc8" +			     "\x3c\xd3\x6a\x01\x75\x0c\xa3\x17" +			     "\xae\x45\xdc\x50\xe7\x7e\x15\x89" +			     "\x20\xb7\x2b\xc2\x59\xf0\x64\xfb" +			     "\x92\x06\x9d\x34\xcb\x3f\xd6\x6d" +			     "\x04\x78\x0f\xa6\x1a\xb1\x48\xdf" +			     "\x53\xea\x81\x18\x8c\x23\xba\x2e" +			     "\xc5\x5c\xf3\x67\xfe\x95\x09\xa0" +			     "\x37\xce\x42\xd9\x70\x07\x7b\x12" +			     "\xa9\x1d\xb4\x4b\xe2\x56\xed\x84" +			     "\x1b\x8f\x26\xbd\x31\xc8\x5f\xf6" +			     "\x6a\x01\x98\x0c\xa3\x3a\xd1\x45" +			     "\xdc\x73\x0a\x7e\x15\xac\x20\xb7" +			     "\x4e\xe5\x59\xf0\x87\x1e\x92\x29" +			     "\xc0\x34\xcb\x62\xf9\x6d\x04\x9b" +			     "\x0f\xa6\x3d\xd4\x48\xdf\x76\x0d" +			     "\x81\x18\xaf\x23\xba\x51\xe8\x5c" +			     "\xf3\x8a\x21\x95\x2c\xc3\x37\xce" +			     "\x65\xfc\x70\x07\x9e\x12\xa9\x40" +			     "\xd7\x4b\xe2\x79\x10\x84\x1b\xb2" +			     "\x26\xbd\x54\xeb\x5f\xf6\x8d\x01" +			     "\x98\x2f\xc6\x3a\xd1\x68\xff\x73" +			     "\x0a\xa1\x15\xac\x43\xda\x4e\xe5" +			     "\x7c\x13\x87\x1e\xb5\x29\xc0\x57" +			     "\xee\x62\xf9\x90\x04\x9b\x32\xc9" +			     "\x3d\xd4\x6b\x02\x76\x0d\xa4\x18" +			     "\xaf\x46\xdd\x51\xe8\x7f\x16\x8a" +			     "\x21\xb8\x2c\xc3\x5a\xf1\x65\xfc" +			     "\x93\x07\x9e\x35\xcc\x40\xd7\x6e" +			     "\x05\x79\x10\xa7\x1b\xb2\x49\xe0" +			     "\x54\xeb\x82\x19\x8d\x24\xbb\x2f" +			     "\xc6\x5d\xf4\x68\xff\x96\x0a\xa1" +			     "\x38\xcf\x43\xda\x71\x08\x7c\x13" +			     "\xaa\x1e\xb5\x4c\xe3\x57\xee\x85" +			     "\x1c\x90\x27\xbe\x32\xc9\x60\xf7" +			     "\x6b\x02\x99\x0d\xa4\x3b\xd2\x46" +			     "\xdd\x74\x0b\x7f\x16\xad\x21\xb8" +			     "\x4f\xe6\x5a\xf1\x88\x1f\x93\x2a" +			     "\xc1\x35\xcc\x63\xfa\x6e\x05\x9c" +			     "\x10\xa7\x3e\xd5\x49\xe0\x77\x0e" +			     "\x82\x19\xb0\x24\xbb\x52\xe9\x5d" +			     "\xf4\x8b\x22\x96\x2d\xc4\x38\xcf" +			     "\x66\xfd\x71\x08\x9f\x13\xaa\x41" +			     "\xd8\x4c\xe3\x7a\x11\x85\x1c\xb3" +			     "\x27\xbe\x55\xec\x60\xf7\x8e\x02" +			     "\x99\x30\xc7\x3b\xd2\x69\x00\x74" +			     "\x0b\xa2\x16\xad\x44\xdb\x4f\xe6" +			     "\x7d\x14\x88\x1f\xb6\x2a\xc1\x58" +			     "\xef\x63\xfa\x91\x05\x9c\x33\xca" +			     "\x3e\xd5\x6c\x03\x77\x0e\xa5\x19" +			     "\xb0\x47\xde\x52\xe9\x80\x17\x8b" +			     "\x22\xb9\x2d\xc4\x5b\xf2\x66\xfd" +			     "\x94\x08\x9f\x36\xcd\x41\xd8\x6f" +			     "\x06\x7a\x11\xa8\x1c\xb3\x4a\xe1" +			     "\x55\xec\x83\x1a\x8e\x25\xbc\x30" +			     "\xc7\x5e\xf5\x69\x00\x97\x0b\xa2" +			     "\x39\xd0\x44\xdb\x72\x09\x7d\x14" +			     "\xab\x1f\xb6\x4d\xe4\x58\xef\x86" +			     "\x1d\x91\x28\xbf\x33\xca\x61\xf8" +			     "\x6c\x03\x9a\x0e\xa5\x3c\xd3\x47" +			     "\xde\x75\x0c\x80\x17\xae\x22\xb9" +			     "\x50\xe7\x5b\xf2\x89\x20\x94\x2b" +			     "\xc2\x36\xcd\x64\xfb\x6f\x06\x9d" +			     "\x11\xa8\x3f\xd6\x4a\xe1\x78\x0f" +			     "\x83\x1a\xb1\x25\xbc\x53\xea\x5e" +			     "\xf5\x8c\x00\x97\x2e\xc5\x39\xd0" +			     "\x67\xfe\x72\x09\xa0\x14\xab\x42" +			     "\xd9\x4d\xe4\x7b\x12\x86\x1d\xb4" +			     "\x28\xbf\x56\xed\x61\xf8\x8f\x03" +			     "\x9a\x31\xc8\x3c\xd3\x6a\x01\x75" +			     "\x0c\xa3\x17\xae\x45\xdc\x50\xe7" +			     "\x7e\x15\x89\x20\xb7\x2b\xc2\x59" +			     "\xf0\x64\xfb\x92\x06\x9d\x34\xcb" +			     "\x3f\xd6\x6d\x04\x78\x0f\xa6\x1a" +			     "\xb1\x48\xdf\x53\xea\x81\x18\x8c" +			     "\x23\xba\x2e\xc5\x5c\xf3\x67\xfe" +			     "\x95\x09\xa0\x37\xce\x42\xd9\x70" +			     "\x07\x7b\x12\xa9\x1d\xb4\x4b\xe2" +			     "\x56\xed\x84\x1b\x8f\x26\xbd\x31" +			     "\xc8\x5f\xf6\x6a\x01\x98\x0c\xa3" +			     "\x3a\xd1\x45\xdc\x73\x0a\x7e\x15" +			     "\xac\x20\xb7\x4e\xe5\x59\xf0\x87" +			     "\x1e\x92\x29\xc0\x34\xcb\x62\xf9" +			     "\x6d\x04\x9b\x0f\xa6\x3d\xd4\x48" +			     "\xdf\x76\x0d\x81\x18\xaf\x23\xba" +			     "\x51\xe8\x5c\xf3\x8a\x21\x95\x2c" +			     "\xc3\x37\xce\x65\xfc\x70\x07\x9e" +			     "\x12\xa9\x40\xd7\x4b\xe2\x79\x10" +			     "\x84\x1b\xb2\x26\xbd\x54\xeb\x5f" +			     "\xf6\x8d\x01\x98\x2f\xc6\x3a\xd1" +			     "\x68\xff\x73\x0a\xa1\x15\xac\x43" +			     "\xda\x4e\xe5\x7c\x13\x87\x1e\xb5" +			     "\x29\xc0\x57\xee\x62\xf9\x90\x04" +			     "\x9b\x32\xc9\x3d\xd4\x6b\x02\x76" +			     "\x0d\xa4\x18\xaf\x46\xdd\x51\xe8" +			     "\x7f\x16\x8a\x21\xb8\x2c\xc3\x5a" +			     "\xf1\x65\xfc\x93\x07\x9e\x35\xcc" +			     "\x40\xd7\x6e\x05\x79\x10\xa7\x1b" +			     "\xb2\x49\xe0\x54\xeb\x82\x19\x8d" +			     "\x24\xbb\x2f\xc6\x5d\xf4\x68\xff" +			     "\x96\x0a\xa1\x38\xcf\x43\xda\x71" +			     "\x08\x7c\x13\xaa\x1e\xb5\x4c", +		.psize     = 1023, +		.digest    = "\x98\x43\x07\x63\x75\xe0\xa7\x1c" +			     "\x78\xb1\x8b\xfd\x04\xf5\x2d\x91" +			     "\x20\x48\xa4\x28\xff\x55\xb1\xd3" +			     "\xe6\xf9\x4f\xcc",  	}  };  /*   * SHA256 test vectors from from NIST   */ -#define SHA256_TEST_VECTORS	2 +#define SHA256_TEST_VECTORS	5  static struct hash_testvec sha256_tv_template[] = {  	{ +		.plaintext = "", +		.psize	= 0, +		.digest	= "\xe3\xb0\xc4\x42\x98\xfc\x1c\x14" +			  "\x9a\xfb\xf4\xc8\x99\x6f\xb9\x24" +			  "\x27\xae\x41\xe4\x64\x9b\x93\x4c" +			  "\xa4\x95\x99\x1b\x78\x52\xb8\x55", +	}, {  		.plaintext = "abc",  		.psize	= 3,  		.digest	= "\xba\x78\x16\xbf\x8f\x01\xcf\xea" @@ -581,16 +879,166 @@ static struct hash_testvec sha256_tv_template[] = {  			  "\xf6\xec\xed\xd4\x19\xdb\x06\xc1",  		.np	= 2,  		.tap	= { 28, 28 } -	}, +	}, { +		.plaintext = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+-", +		.psize	= 64, +		.digest = "\xb5\xfe\xad\x56\x7d\xff\xcb\xa4" +			  "\x2c\x32\x29\x32\x19\xbb\xfb\xfa" +			  "\xd6\xff\x94\xa3\x72\x91\x85\x66" +			  "\x3b\xa7\x87\x77\x58\xa3\x40\x3a", +	}, { +		.plaintext = "\x08\x9f\x13\xaa\x41\xd8\x4c\xe3" +			     "\x7a\x11\x85\x1c\xb3\x27\xbe\x55" +			     "\xec\x60\xf7\x8e\x02\x99\x30\xc7" +			     "\x3b\xd2\x69\x00\x74\x0b\xa2\x16" +			     "\xad\x44\xdb\x4f\xe6\x7d\x14\x88" +			     "\x1f\xb6\x2a\xc1\x58\xef\x63\xfa" +			     "\x91\x05\x9c\x33\xca\x3e\xd5\x6c" +			     "\x03\x77\x0e\xa5\x19\xb0\x47\xde" +			     "\x52\xe9\x80\x17\x8b\x22\xb9\x2d" +			     "\xc4\x5b\xf2\x66\xfd\x94\x08\x9f" +			     "\x36\xcd\x41\xd8\x6f\x06\x7a\x11" +			     "\xa8\x1c\xb3\x4a\xe1\x55\xec\x83" +			     "\x1a\x8e\x25\xbc\x30\xc7\x5e\xf5" +			     "\x69\x00\x97\x0b\xa2\x39\xd0\x44" +			     "\xdb\x72\x09\x7d\x14\xab\x1f\xb6" +			     "\x4d\xe4\x58\xef\x86\x1d\x91\x28" +			     "\xbf\x33\xca\x61\xf8\x6c\x03\x9a" +			     "\x0e\xa5\x3c\xd3\x47\xde\x75\x0c" +			     "\x80\x17\xae\x22\xb9\x50\xe7\x5b" +			     "\xf2\x89\x20\x94\x2b\xc2\x36\xcd" +			     "\x64\xfb\x6f\x06\x9d\x11\xa8\x3f" +			     "\xd6\x4a\xe1\x78\x0f\x83\x1a\xb1" +			     "\x25\xbc\x53\xea\x5e\xf5\x8c\x00" +			     "\x97\x2e\xc5\x39\xd0\x67\xfe\x72" +			     "\x09\xa0\x14\xab\x42\xd9\x4d\xe4" +			     "\x7b\x12\x86\x1d\xb4\x28\xbf\x56" +			     "\xed\x61\xf8\x8f\x03\x9a\x31\xc8" +			     "\x3c\xd3\x6a\x01\x75\x0c\xa3\x17" +			     "\xae\x45\xdc\x50\xe7\x7e\x15\x89" +			     "\x20\xb7\x2b\xc2\x59\xf0\x64\xfb" +			     "\x92\x06\x9d\x34\xcb\x3f\xd6\x6d" +			     "\x04\x78\x0f\xa6\x1a\xb1\x48\xdf" +			     "\x53\xea\x81\x18\x8c\x23\xba\x2e" +			     "\xc5\x5c\xf3\x67\xfe\x95\x09\xa0" +			     "\x37\xce\x42\xd9\x70\x07\x7b\x12" +			     "\xa9\x1d\xb4\x4b\xe2\x56\xed\x84" +			     "\x1b\x8f\x26\xbd\x31\xc8\x5f\xf6" +			     "\x6a\x01\x98\x0c\xa3\x3a\xd1\x45" +			     "\xdc\x73\x0a\x7e\x15\xac\x20\xb7" +			     "\x4e\xe5\x59\xf0\x87\x1e\x92\x29" +			     "\xc0\x34\xcb\x62\xf9\x6d\x04\x9b" +			     "\x0f\xa6\x3d\xd4\x48\xdf\x76\x0d" +			     "\x81\x18\xaf\x23\xba\x51\xe8\x5c" +			     "\xf3\x8a\x21\x95\x2c\xc3\x37\xce" +			     "\x65\xfc\x70\x07\x9e\x12\xa9\x40" +			     "\xd7\x4b\xe2\x79\x10\x84\x1b\xb2" +			     "\x26\xbd\x54\xeb\x5f\xf6\x8d\x01" +			     "\x98\x2f\xc6\x3a\xd1\x68\xff\x73" +			     "\x0a\xa1\x15\xac\x43\xda\x4e\xe5" +			     "\x7c\x13\x87\x1e\xb5\x29\xc0\x57" +			     "\xee\x62\xf9\x90\x04\x9b\x32\xc9" +			     "\x3d\xd4\x6b\x02\x76\x0d\xa4\x18" +			     "\xaf\x46\xdd\x51\xe8\x7f\x16\x8a" +			     "\x21\xb8\x2c\xc3\x5a\xf1\x65\xfc" +			     "\x93\x07\x9e\x35\xcc\x40\xd7\x6e" +			     "\x05\x79\x10\xa7\x1b\xb2\x49\xe0" +			     "\x54\xeb\x82\x19\x8d\x24\xbb\x2f" +			     "\xc6\x5d\xf4\x68\xff\x96\x0a\xa1" +			     "\x38\xcf\x43\xda\x71\x08\x7c\x13" +			     "\xaa\x1e\xb5\x4c\xe3\x57\xee\x85" +			     "\x1c\x90\x27\xbe\x32\xc9\x60\xf7" +			     "\x6b\x02\x99\x0d\xa4\x3b\xd2\x46" +			     "\xdd\x74\x0b\x7f\x16\xad\x21\xb8" +			     "\x4f\xe6\x5a\xf1\x88\x1f\x93\x2a" +			     "\xc1\x35\xcc\x63\xfa\x6e\x05\x9c" +			     "\x10\xa7\x3e\xd5\x49\xe0\x77\x0e" +			     "\x82\x19\xb0\x24\xbb\x52\xe9\x5d" +			     "\xf4\x8b\x22\x96\x2d\xc4\x38\xcf" +			     "\x66\xfd\x71\x08\x9f\x13\xaa\x41" +			     "\xd8\x4c\xe3\x7a\x11\x85\x1c\xb3" +			     "\x27\xbe\x55\xec\x60\xf7\x8e\x02" +			     "\x99\x30\xc7\x3b\xd2\x69\x00\x74" +			     "\x0b\xa2\x16\xad\x44\xdb\x4f\xe6" +			     "\x7d\x14\x88\x1f\xb6\x2a\xc1\x58" +			     "\xef\x63\xfa\x91\x05\x9c\x33\xca" +			     "\x3e\xd5\x6c\x03\x77\x0e\xa5\x19" +			     "\xb0\x47\xde\x52\xe9\x80\x17\x8b" +			     "\x22\xb9\x2d\xc4\x5b\xf2\x66\xfd" +			     "\x94\x08\x9f\x36\xcd\x41\xd8\x6f" +			     "\x06\x7a\x11\xa8\x1c\xb3\x4a\xe1" +			     "\x55\xec\x83\x1a\x8e\x25\xbc\x30" +			     "\xc7\x5e\xf5\x69\x00\x97\x0b\xa2" +			     "\x39\xd0\x44\xdb\x72\x09\x7d\x14" +			     "\xab\x1f\xb6\x4d\xe4\x58\xef\x86" +			     "\x1d\x91\x28\xbf\x33\xca\x61\xf8" +			     "\x6c\x03\x9a\x0e\xa5\x3c\xd3\x47" +			     "\xde\x75\x0c\x80\x17\xae\x22\xb9" +			     "\x50\xe7\x5b\xf2\x89\x20\x94\x2b" +			     "\xc2\x36\xcd\x64\xfb\x6f\x06\x9d" +			     "\x11\xa8\x3f\xd6\x4a\xe1\x78\x0f" +			     "\x83\x1a\xb1\x25\xbc\x53\xea\x5e" +			     "\xf5\x8c\x00\x97\x2e\xc5\x39\xd0" +			     "\x67\xfe\x72\x09\xa0\x14\xab\x42" +			     "\xd9\x4d\xe4\x7b\x12\x86\x1d\xb4" +			     "\x28\xbf\x56\xed\x61\xf8\x8f\x03" +			     "\x9a\x31\xc8\x3c\xd3\x6a\x01\x75" +			     "\x0c\xa3\x17\xae\x45\xdc\x50\xe7" +			     "\x7e\x15\x89\x20\xb7\x2b\xc2\x59" +			     "\xf0\x64\xfb\x92\x06\x9d\x34\xcb" +			     "\x3f\xd6\x6d\x04\x78\x0f\xa6\x1a" +			     "\xb1\x48\xdf\x53\xea\x81\x18\x8c" +			     "\x23\xba\x2e\xc5\x5c\xf3\x67\xfe" +			     "\x95\x09\xa0\x37\xce\x42\xd9\x70" +			     "\x07\x7b\x12\xa9\x1d\xb4\x4b\xe2" +			     "\x56\xed\x84\x1b\x8f\x26\xbd\x31" +			     "\xc8\x5f\xf6\x6a\x01\x98\x0c\xa3" +			     "\x3a\xd1\x45\xdc\x73\x0a\x7e\x15" +			     "\xac\x20\xb7\x4e\xe5\x59\xf0\x87" +			     "\x1e\x92\x29\xc0\x34\xcb\x62\xf9" +			     "\x6d\x04\x9b\x0f\xa6\x3d\xd4\x48" +			     "\xdf\x76\x0d\x81\x18\xaf\x23\xba" +			     "\x51\xe8\x5c\xf3\x8a\x21\x95\x2c" +			     "\xc3\x37\xce\x65\xfc\x70\x07\x9e" +			     "\x12\xa9\x40\xd7\x4b\xe2\x79\x10" +			     "\x84\x1b\xb2\x26\xbd\x54\xeb\x5f" +			     "\xf6\x8d\x01\x98\x2f\xc6\x3a\xd1" +			     "\x68\xff\x73\x0a\xa1\x15\xac\x43" +			     "\xda\x4e\xe5\x7c\x13\x87\x1e\xb5" +			     "\x29\xc0\x57\xee\x62\xf9\x90\x04" +			     "\x9b\x32\xc9\x3d\xd4\x6b\x02\x76" +			     "\x0d\xa4\x18\xaf\x46\xdd\x51\xe8" +			     "\x7f\x16\x8a\x21\xb8\x2c\xc3\x5a" +			     "\xf1\x65\xfc\x93\x07\x9e\x35\xcc" +			     "\x40\xd7\x6e\x05\x79\x10\xa7\x1b" +			     "\xb2\x49\xe0\x54\xeb\x82\x19\x8d" +			     "\x24\xbb\x2f\xc6\x5d\xf4\x68\xff" +			     "\x96\x0a\xa1\x38\xcf\x43\xda\x71" +			     "\x08\x7c\x13\xaa\x1e\xb5\x4c", +		.psize     = 1023, +		.digest    = "\xc5\xce\x0c\xca\x01\x4f\x53\x3a" +			     "\x32\x32\x17\xcc\xd4\x6a\x71\xa9" +			     "\xf3\xed\x50\x10\x64\x8e\x06\xbe" +			     "\x9b\x4a\xa6\xbb\x05\x89\x59\x51", +	}  };  /*   * SHA384 test vectors from from NIST and kerneli   */ -#define SHA384_TEST_VECTORS	4 +#define SHA384_TEST_VECTORS	6  static struct hash_testvec sha384_tv_template[] = {  	{ +		.plaintext = "", +		.psize	= 0, +		.digest	= "\x38\xb0\x60\xa7\x51\xac\x96\x38" +			  "\x4c\xd9\x32\x7e\xb1\xb1\xe3\x6a" +			  "\x21\xfd\xb7\x11\x14\xbe\x07\x43" +			  "\x4c\x0c\xc7\xbf\x63\xf6\xe1\xda" +			  "\x27\x4e\xde\xbf\xe7\x6f\x65\xfb" +			  "\xd5\x1a\xd2\xf1\x48\x98\xb9\x5b", +	}, {  		.plaintext= "abc",  		.psize	= 3,  		.digest	= "\xcb\x00\x75\x3f\x45\xa3\x5e\x8b" @@ -630,16 +1078,163 @@ static struct hash_testvec sha384_tv_template[] = {  			  "\xc9\x38\xe2\xd1\x99\xe8\xbe\xa4",  		.np	= 4,  		.tap	= { 26, 26, 26, 26 } -	}, +	}, { +		.plaintext = "\x08\x9f\x13\xaa\x41\xd8\x4c\xe3" +			     "\x7a\x11\x85\x1c\xb3\x27\xbe\x55" +			     "\xec\x60\xf7\x8e\x02\x99\x30\xc7" +			     "\x3b\xd2\x69\x00\x74\x0b\xa2\x16" +			     "\xad\x44\xdb\x4f\xe6\x7d\x14\x88" +			     "\x1f\xb6\x2a\xc1\x58\xef\x63\xfa" +			     "\x91\x05\x9c\x33\xca\x3e\xd5\x6c" +			     "\x03\x77\x0e\xa5\x19\xb0\x47\xde" +			     "\x52\xe9\x80\x17\x8b\x22\xb9\x2d" +			     "\xc4\x5b\xf2\x66\xfd\x94\x08\x9f" +			     "\x36\xcd\x41\xd8\x6f\x06\x7a\x11" +			     "\xa8\x1c\xb3\x4a\xe1\x55\xec\x83" +			     "\x1a\x8e\x25\xbc\x30\xc7\x5e\xf5" +			     "\x69\x00\x97\x0b\xa2\x39\xd0\x44" +			     "\xdb\x72\x09\x7d\x14\xab\x1f\xb6" +			     "\x4d\xe4\x58\xef\x86\x1d\x91\x28" +			     "\xbf\x33\xca\x61\xf8\x6c\x03\x9a" +			     "\x0e\xa5\x3c\xd3\x47\xde\x75\x0c" +			     "\x80\x17\xae\x22\xb9\x50\xe7\x5b" +			     "\xf2\x89\x20\x94\x2b\xc2\x36\xcd" +			     "\x64\xfb\x6f\x06\x9d\x11\xa8\x3f" +			     "\xd6\x4a\xe1\x78\x0f\x83\x1a\xb1" +			     "\x25\xbc\x53\xea\x5e\xf5\x8c\x00" +			     "\x97\x2e\xc5\x39\xd0\x67\xfe\x72" +			     "\x09\xa0\x14\xab\x42\xd9\x4d\xe4" +			     "\x7b\x12\x86\x1d\xb4\x28\xbf\x56" +			     "\xed\x61\xf8\x8f\x03\x9a\x31\xc8" +			     "\x3c\xd3\x6a\x01\x75\x0c\xa3\x17" +			     "\xae\x45\xdc\x50\xe7\x7e\x15\x89" +			     "\x20\xb7\x2b\xc2\x59\xf0\x64\xfb" +			     "\x92\x06\x9d\x34\xcb\x3f\xd6\x6d" +			     "\x04\x78\x0f\xa6\x1a\xb1\x48\xdf" +			     "\x53\xea\x81\x18\x8c\x23\xba\x2e" +			     "\xc5\x5c\xf3\x67\xfe\x95\x09\xa0" +			     "\x37\xce\x42\xd9\x70\x07\x7b\x12" +			     "\xa9\x1d\xb4\x4b\xe2\x56\xed\x84" +			     "\x1b\x8f\x26\xbd\x31\xc8\x5f\xf6" +			     "\x6a\x01\x98\x0c\xa3\x3a\xd1\x45" +			     "\xdc\x73\x0a\x7e\x15\xac\x20\xb7" +			     "\x4e\xe5\x59\xf0\x87\x1e\x92\x29" +			     "\xc0\x34\xcb\x62\xf9\x6d\x04\x9b" +			     "\x0f\xa6\x3d\xd4\x48\xdf\x76\x0d" +			     "\x81\x18\xaf\x23\xba\x51\xe8\x5c" +			     "\xf3\x8a\x21\x95\x2c\xc3\x37\xce" +			     "\x65\xfc\x70\x07\x9e\x12\xa9\x40" +			     "\xd7\x4b\xe2\x79\x10\x84\x1b\xb2" +			     "\x26\xbd\x54\xeb\x5f\xf6\x8d\x01" +			     "\x98\x2f\xc6\x3a\xd1\x68\xff\x73" +			     "\x0a\xa1\x15\xac\x43\xda\x4e\xe5" +			     "\x7c\x13\x87\x1e\xb5\x29\xc0\x57" +			     "\xee\x62\xf9\x90\x04\x9b\x32\xc9" +			     "\x3d\xd4\x6b\x02\x76\x0d\xa4\x18" +			     "\xaf\x46\xdd\x51\xe8\x7f\x16\x8a" +			     "\x21\xb8\x2c\xc3\x5a\xf1\x65\xfc" +			     "\x93\x07\x9e\x35\xcc\x40\xd7\x6e" +			     "\x05\x79\x10\xa7\x1b\xb2\x49\xe0" +			     "\x54\xeb\x82\x19\x8d\x24\xbb\x2f" +			     "\xc6\x5d\xf4\x68\xff\x96\x0a\xa1" +			     "\x38\xcf\x43\xda\x71\x08\x7c\x13" +			     "\xaa\x1e\xb5\x4c\xe3\x57\xee\x85" +			     "\x1c\x90\x27\xbe\x32\xc9\x60\xf7" +			     "\x6b\x02\x99\x0d\xa4\x3b\xd2\x46" +			     "\xdd\x74\x0b\x7f\x16\xad\x21\xb8" +			     "\x4f\xe6\x5a\xf1\x88\x1f\x93\x2a" +			     "\xc1\x35\xcc\x63\xfa\x6e\x05\x9c" +			     "\x10\xa7\x3e\xd5\x49\xe0\x77\x0e" +			     "\x82\x19\xb0\x24\xbb\x52\xe9\x5d" +			     "\xf4\x8b\x22\x96\x2d\xc4\x38\xcf" +			     "\x66\xfd\x71\x08\x9f\x13\xaa\x41" +			     "\xd8\x4c\xe3\x7a\x11\x85\x1c\xb3" +			     "\x27\xbe\x55\xec\x60\xf7\x8e\x02" +			     "\x99\x30\xc7\x3b\xd2\x69\x00\x74" +			     "\x0b\xa2\x16\xad\x44\xdb\x4f\xe6" +			     "\x7d\x14\x88\x1f\xb6\x2a\xc1\x58" +			     "\xef\x63\xfa\x91\x05\x9c\x33\xca" +			     "\x3e\xd5\x6c\x03\x77\x0e\xa5\x19" +			     "\xb0\x47\xde\x52\xe9\x80\x17\x8b" +			     "\x22\xb9\x2d\xc4\x5b\xf2\x66\xfd" +			     "\x94\x08\x9f\x36\xcd\x41\xd8\x6f" +			     "\x06\x7a\x11\xa8\x1c\xb3\x4a\xe1" +			     "\x55\xec\x83\x1a\x8e\x25\xbc\x30" +			     "\xc7\x5e\xf5\x69\x00\x97\x0b\xa2" +			     "\x39\xd0\x44\xdb\x72\x09\x7d\x14" +			     "\xab\x1f\xb6\x4d\xe4\x58\xef\x86" +			     "\x1d\x91\x28\xbf\x33\xca\x61\xf8" +			     "\x6c\x03\x9a\x0e\xa5\x3c\xd3\x47" +			     "\xde\x75\x0c\x80\x17\xae\x22\xb9" +			     "\x50\xe7\x5b\xf2\x89\x20\x94\x2b" +			     "\xc2\x36\xcd\x64\xfb\x6f\x06\x9d" +			     "\x11\xa8\x3f\xd6\x4a\xe1\x78\x0f" +			     "\x83\x1a\xb1\x25\xbc\x53\xea\x5e" +			     "\xf5\x8c\x00\x97\x2e\xc5\x39\xd0" +			     "\x67\xfe\x72\x09\xa0\x14\xab\x42" +			     "\xd9\x4d\xe4\x7b\x12\x86\x1d\xb4" +			     "\x28\xbf\x56\xed\x61\xf8\x8f\x03" +			     "\x9a\x31\xc8\x3c\xd3\x6a\x01\x75" +			     "\x0c\xa3\x17\xae\x45\xdc\x50\xe7" +			     "\x7e\x15\x89\x20\xb7\x2b\xc2\x59" +			     "\xf0\x64\xfb\x92\x06\x9d\x34\xcb" +			     "\x3f\xd6\x6d\x04\x78\x0f\xa6\x1a" +			     "\xb1\x48\xdf\x53\xea\x81\x18\x8c" +			     "\x23\xba\x2e\xc5\x5c\xf3\x67\xfe" +			     "\x95\x09\xa0\x37\xce\x42\xd9\x70" +			     "\x07\x7b\x12\xa9\x1d\xb4\x4b\xe2" +			     "\x56\xed\x84\x1b\x8f\x26\xbd\x31" +			     "\xc8\x5f\xf6\x6a\x01\x98\x0c\xa3" +			     "\x3a\xd1\x45\xdc\x73\x0a\x7e\x15" +			     "\xac\x20\xb7\x4e\xe5\x59\xf0\x87" +			     "\x1e\x92\x29\xc0\x34\xcb\x62\xf9" +			     "\x6d\x04\x9b\x0f\xa6\x3d\xd4\x48" +			     "\xdf\x76\x0d\x81\x18\xaf\x23\xba" +			     "\x51\xe8\x5c\xf3\x8a\x21\x95\x2c" +			     "\xc3\x37\xce\x65\xfc\x70\x07\x9e" +			     "\x12\xa9\x40\xd7\x4b\xe2\x79\x10" +			     "\x84\x1b\xb2\x26\xbd\x54\xeb\x5f" +			     "\xf6\x8d\x01\x98\x2f\xc6\x3a\xd1" +			     "\x68\xff\x73\x0a\xa1\x15\xac\x43" +			     "\xda\x4e\xe5\x7c\x13\x87\x1e\xb5" +			     "\x29\xc0\x57\xee\x62\xf9\x90\x04" +			     "\x9b\x32\xc9\x3d\xd4\x6b\x02\x76" +			     "\x0d\xa4\x18\xaf\x46\xdd\x51\xe8" +			     "\x7f\x16\x8a\x21\xb8\x2c\xc3\x5a" +			     "\xf1\x65\xfc\x93\x07\x9e\x35\xcc" +			     "\x40\xd7\x6e\x05\x79\x10\xa7\x1b" +			     "\xb2\x49\xe0\x54\xeb\x82\x19\x8d" +			     "\x24\xbb\x2f\xc6\x5d\xf4\x68\xff" +			     "\x96\x0a\xa1\x38\xcf\x43\xda\x71" +			     "\x08\x7c\x13\xaa\x1e\xb5\x4c", +		.psize     = 1023, +		.digest    = "\x4d\x97\x23\xc8\xea\x7a\x7c\x15" +			     "\xb8\xff\x97\x9c\xf5\x13\x4f\x31" +			     "\xde\x67\xf7\x24\x73\xcd\x70\x1c" +			     "\x03\x4a\xba\x8a\x87\x49\xfe\xdc" +			     "\x75\x29\x62\x83\xae\x3f\x17\xab" +			     "\xfd\x10\x4d\x8e\x17\x1c\x1f\xca", +	}  };  /*   * SHA512 test vectors from from NIST and kerneli   */ -#define SHA512_TEST_VECTORS	4 +#define SHA512_TEST_VECTORS	6  static struct hash_testvec sha512_tv_template[] = {  	{ +		.plaintext = "", +		.psize	= 0, +		.digest	= "\xcf\x83\xe1\x35\x7e\xef\xb8\xbd" +			  "\xf1\x54\x28\x50\xd6\x6d\x80\x07" +			  "\xd6\x20\xe4\x05\x0b\x57\x15\xdc" +			  "\x83\xf4\xa9\x21\xd3\x6c\xe9\xce" +			  "\x47\xd0\xd1\x3c\x5d\x85\xf2\xb0" +			  "\xff\x83\x18\xd2\x87\x7e\xec\x2f" +			  "\x63\xb9\x31\xbd\x47\x41\x7a\x81" +			  "\xa5\x38\x32\x7a\xf9\x27\xda\x3e", +	}, {  		.plaintext = "abc",  		.psize	= 3,  		.digest	= "\xdd\xaf\x35\xa1\x93\x61\x7a\xba" @@ -687,7 +1282,145 @@ static struct hash_testvec sha512_tv_template[] = {  			  "\xed\xb4\x19\x87\x23\x28\x50\xc9",  		.np	= 4,  		.tap	= { 26, 26, 26, 26 } -	}, +	}, { +		.plaintext = "\x08\x9f\x13\xaa\x41\xd8\x4c\xe3" +			     "\x7a\x11\x85\x1c\xb3\x27\xbe\x55" +			     "\xec\x60\xf7\x8e\x02\x99\x30\xc7" +			     "\x3b\xd2\x69\x00\x74\x0b\xa2\x16" +			     "\xad\x44\xdb\x4f\xe6\x7d\x14\x88" +			     "\x1f\xb6\x2a\xc1\x58\xef\x63\xfa" +			     "\x91\x05\x9c\x33\xca\x3e\xd5\x6c" +			     "\x03\x77\x0e\xa5\x19\xb0\x47\xde" +			     "\x52\xe9\x80\x17\x8b\x22\xb9\x2d" +			     "\xc4\x5b\xf2\x66\xfd\x94\x08\x9f" +			     "\x36\xcd\x41\xd8\x6f\x06\x7a\x11" +			     "\xa8\x1c\xb3\x4a\xe1\x55\xec\x83" +			     "\x1a\x8e\x25\xbc\x30\xc7\x5e\xf5" +			     "\x69\x00\x97\x0b\xa2\x39\xd0\x44" +			     "\xdb\x72\x09\x7d\x14\xab\x1f\xb6" +			     "\x4d\xe4\x58\xef\x86\x1d\x91\x28" +			     "\xbf\x33\xca\x61\xf8\x6c\x03\x9a" +			     "\x0e\xa5\x3c\xd3\x47\xde\x75\x0c" +			     "\x80\x17\xae\x22\xb9\x50\xe7\x5b" +			     "\xf2\x89\x20\x94\x2b\xc2\x36\xcd" +			     "\x64\xfb\x6f\x06\x9d\x11\xa8\x3f" +			     "\xd6\x4a\xe1\x78\x0f\x83\x1a\xb1" +			     "\x25\xbc\x53\xea\x5e\xf5\x8c\x00" +			     "\x97\x2e\xc5\x39\xd0\x67\xfe\x72" +			     "\x09\xa0\x14\xab\x42\xd9\x4d\xe4" +			     "\x7b\x12\x86\x1d\xb4\x28\xbf\x56" +			     "\xed\x61\xf8\x8f\x03\x9a\x31\xc8" +			     "\x3c\xd3\x6a\x01\x75\x0c\xa3\x17" +			     "\xae\x45\xdc\x50\xe7\x7e\x15\x89" +			     "\x20\xb7\x2b\xc2\x59\xf0\x64\xfb" +			     "\x92\x06\x9d\x34\xcb\x3f\xd6\x6d" +			     "\x04\x78\x0f\xa6\x1a\xb1\x48\xdf" +			     "\x53\xea\x81\x18\x8c\x23\xba\x2e" +			     "\xc5\x5c\xf3\x67\xfe\x95\x09\xa0" +			     "\x37\xce\x42\xd9\x70\x07\x7b\x12" +			     "\xa9\x1d\xb4\x4b\xe2\x56\xed\x84" +			     "\x1b\x8f\x26\xbd\x31\xc8\x5f\xf6" +			     "\x6a\x01\x98\x0c\xa3\x3a\xd1\x45" +			     "\xdc\x73\x0a\x7e\x15\xac\x20\xb7" +			     "\x4e\xe5\x59\xf0\x87\x1e\x92\x29" +			     "\xc0\x34\xcb\x62\xf9\x6d\x04\x9b" +			     "\x0f\xa6\x3d\xd4\x48\xdf\x76\x0d" +			     "\x81\x18\xaf\x23\xba\x51\xe8\x5c" +			     "\xf3\x8a\x21\x95\x2c\xc3\x37\xce" +			     "\x65\xfc\x70\x07\x9e\x12\xa9\x40" +			     "\xd7\x4b\xe2\x79\x10\x84\x1b\xb2" +			     "\x26\xbd\x54\xeb\x5f\xf6\x8d\x01" +			     "\x98\x2f\xc6\x3a\xd1\x68\xff\x73" +			     "\x0a\xa1\x15\xac\x43\xda\x4e\xe5" +			     "\x7c\x13\x87\x1e\xb5\x29\xc0\x57" +			     "\xee\x62\xf9\x90\x04\x9b\x32\xc9" +			     "\x3d\xd4\x6b\x02\x76\x0d\xa4\x18" +			     "\xaf\x46\xdd\x51\xe8\x7f\x16\x8a" +			     "\x21\xb8\x2c\xc3\x5a\xf1\x65\xfc" +			     "\x93\x07\x9e\x35\xcc\x40\xd7\x6e" +			     "\x05\x79\x10\xa7\x1b\xb2\x49\xe0" +			     "\x54\xeb\x82\x19\x8d\x24\xbb\x2f" +			     "\xc6\x5d\xf4\x68\xff\x96\x0a\xa1" +			     "\x38\xcf\x43\xda\x71\x08\x7c\x13" +			     "\xaa\x1e\xb5\x4c\xe3\x57\xee\x85" +			     "\x1c\x90\x27\xbe\x32\xc9\x60\xf7" +			     "\x6b\x02\x99\x0d\xa4\x3b\xd2\x46" +			     "\xdd\x74\x0b\x7f\x16\xad\x21\xb8" +			     "\x4f\xe6\x5a\xf1\x88\x1f\x93\x2a" +			     "\xc1\x35\xcc\x63\xfa\x6e\x05\x9c" +			     "\x10\xa7\x3e\xd5\x49\xe0\x77\x0e" +			     "\x82\x19\xb0\x24\xbb\x52\xe9\x5d" +			     "\xf4\x8b\x22\x96\x2d\xc4\x38\xcf" +			     "\x66\xfd\x71\x08\x9f\x13\xaa\x41" +			     "\xd8\x4c\xe3\x7a\x11\x85\x1c\xb3" +			     "\x27\xbe\x55\xec\x60\xf7\x8e\x02" +			     "\x99\x30\xc7\x3b\xd2\x69\x00\x74" +			     "\x0b\xa2\x16\xad\x44\xdb\x4f\xe6" +			     "\x7d\x14\x88\x1f\xb6\x2a\xc1\x58" +			     "\xef\x63\xfa\x91\x05\x9c\x33\xca" +			     "\x3e\xd5\x6c\x03\x77\x0e\xa5\x19" +			     "\xb0\x47\xde\x52\xe9\x80\x17\x8b" +			     "\x22\xb9\x2d\xc4\x5b\xf2\x66\xfd" +			     "\x94\x08\x9f\x36\xcd\x41\xd8\x6f" +			     "\x06\x7a\x11\xa8\x1c\xb3\x4a\xe1" +			     "\x55\xec\x83\x1a\x8e\x25\xbc\x30" +			     "\xc7\x5e\xf5\x69\x00\x97\x0b\xa2" +			     "\x39\xd0\x44\xdb\x72\x09\x7d\x14" +			     "\xab\x1f\xb6\x4d\xe4\x58\xef\x86" +			     "\x1d\x91\x28\xbf\x33\xca\x61\xf8" +			     "\x6c\x03\x9a\x0e\xa5\x3c\xd3\x47" +			     "\xde\x75\x0c\x80\x17\xae\x22\xb9" +			     "\x50\xe7\x5b\xf2\x89\x20\x94\x2b" +			     "\xc2\x36\xcd\x64\xfb\x6f\x06\x9d" +			     "\x11\xa8\x3f\xd6\x4a\xe1\x78\x0f" +			     "\x83\x1a\xb1\x25\xbc\x53\xea\x5e" +			     "\xf5\x8c\x00\x97\x2e\xc5\x39\xd0" +			     "\x67\xfe\x72\x09\xa0\x14\xab\x42" +			     "\xd9\x4d\xe4\x7b\x12\x86\x1d\xb4" +			     "\x28\xbf\x56\xed\x61\xf8\x8f\x03" +			     "\x9a\x31\xc8\x3c\xd3\x6a\x01\x75" +			     "\x0c\xa3\x17\xae\x45\xdc\x50\xe7" +			     "\x7e\x15\x89\x20\xb7\x2b\xc2\x59" +			     "\xf0\x64\xfb\x92\x06\x9d\x34\xcb" +			     "\x3f\xd6\x6d\x04\x78\x0f\xa6\x1a" +			     "\xb1\x48\xdf\x53\xea\x81\x18\x8c" +			     "\x23\xba\x2e\xc5\x5c\xf3\x67\xfe" +			     "\x95\x09\xa0\x37\xce\x42\xd9\x70" +			     "\x07\x7b\x12\xa9\x1d\xb4\x4b\xe2" +			     "\x56\xed\x84\x1b\x8f\x26\xbd\x31" +			     "\xc8\x5f\xf6\x6a\x01\x98\x0c\xa3" +			     "\x3a\xd1\x45\xdc\x73\x0a\x7e\x15" +			     "\xac\x20\xb7\x4e\xe5\x59\xf0\x87" +			     "\x1e\x92\x29\xc0\x34\xcb\x62\xf9" +			     "\x6d\x04\x9b\x0f\xa6\x3d\xd4\x48" +			     "\xdf\x76\x0d\x81\x18\xaf\x23\xba" +			     "\x51\xe8\x5c\xf3\x8a\x21\x95\x2c" +			     "\xc3\x37\xce\x65\xfc\x70\x07\x9e" +			     "\x12\xa9\x40\xd7\x4b\xe2\x79\x10" +			     "\x84\x1b\xb2\x26\xbd\x54\xeb\x5f" +			     "\xf6\x8d\x01\x98\x2f\xc6\x3a\xd1" +			     "\x68\xff\x73\x0a\xa1\x15\xac\x43" +			     "\xda\x4e\xe5\x7c\x13\x87\x1e\xb5" +			     "\x29\xc0\x57\xee\x62\xf9\x90\x04" +			     "\x9b\x32\xc9\x3d\xd4\x6b\x02\x76" +			     "\x0d\xa4\x18\xaf\x46\xdd\x51\xe8" +			     "\x7f\x16\x8a\x21\xb8\x2c\xc3\x5a" +			     "\xf1\x65\xfc\x93\x07\x9e\x35\xcc" +			     "\x40\xd7\x6e\x05\x79\x10\xa7\x1b" +			     "\xb2\x49\xe0\x54\xeb\x82\x19\x8d" +			     "\x24\xbb\x2f\xc6\x5d\xf4\x68\xff" +			     "\x96\x0a\xa1\x38\xcf\x43\xda\x71" +			     "\x08\x7c\x13\xaa\x1e\xb5\x4c", +		.psize     = 1023, +		.digest    = "\x76\xc9\xd4\x91\x7a\x5f\x0f\xaa" +			     "\x13\x39\xf3\x01\x7a\xfa\xe5\x41" +			     "\x5f\x0b\xf8\xeb\x32\xfc\xbf\xb0" +			     "\xfa\x8c\xcd\x17\x83\xe2\xfa\xeb" +			     "\x1c\x19\xde\xe2\x75\xdc\x34\x64" +			     "\x5f\x35\x9c\x61\x2f\x10\xf9\xec" +			     "\x59\xca\x9d\xcc\x25\x0c\x43\xba" +			     "\x85\xa8\xf8\xfe\xb5\x24\xb2\xee", +	}  }; @@ -12821,9 +13554,13 @@ static struct cipher_testvec cast6_xts_dec_tv_template[] = {  #define AES_DEC_TEST_VECTORS 4  #define AES_CBC_ENC_TEST_VECTORS 5  #define AES_CBC_DEC_TEST_VECTORS 5 -#define HMAC_SHA1_AES_CBC_ENC_TEST_VECTORS 7 -#define HMAC_SHA256_AES_CBC_ENC_TEST_VECTORS 7 -#define HMAC_SHA512_AES_CBC_ENC_TEST_VECTORS 7 +#define HMAC_MD5_ECB_CIPHER_NULL_ENC_TEST_VECTORS 2 +#define HMAC_MD5_ECB_CIPHER_NULL_DEC_TEST_VECTORS 2 +#define HMAC_SHA1_ECB_CIPHER_NULL_ENC_TEST_VEC 2 +#define HMAC_SHA1_ECB_CIPHER_NULL_DEC_TEST_VEC 2 +#define HMAC_SHA1_AES_CBC_ENC_TEST_VEC 7 +#define HMAC_SHA256_AES_CBC_ENC_TEST_VEC 7 +#define HMAC_SHA512_AES_CBC_ENC_TEST_VEC 7  #define AES_LRW_ENC_TEST_VECTORS 8  #define AES_LRW_DEC_TEST_VECTORS 8  #define AES_XTS_ENC_TEST_VECTORS 5 @@ -12840,7 +13577,7 @@ static struct cipher_testvec cast6_xts_dec_tv_template[] = {  #define AES_GCM_4106_DEC_TEST_VECTORS 7  #define AES_GCM_4543_ENC_TEST_VECTORS 1  #define AES_GCM_4543_DEC_TEST_VECTORS 2 -#define AES_CCM_ENC_TEST_VECTORS 7 +#define AES_CCM_ENC_TEST_VECTORS 8  #define AES_CCM_DEC_TEST_VECTORS 7  #define AES_CCM_4309_ENC_TEST_VECTORS 7  #define AES_CCM_4309_DEC_TEST_VECTORS 10 @@ -13627,7 +14364,91 @@ static struct cipher_testvec aes_cbc_dec_tv_template[] = {  	},  }; -static struct aead_testvec hmac_sha1_aes_cbc_enc_tv_template[] = { +static struct aead_testvec hmac_md5_ecb_cipher_null_enc_tv_template[] = { +	{ /* Input data from RFC 2410 Case 1 */ +#ifdef __LITTLE_ENDIAN +		.key    = "\x08\x00"		/* rta length */ +			  "\x01\x00"		/* rta type */ +#else +		.key    = "\x00\x08"		/* rta length */ +			  "\x00\x01"		/* rta type */ +#endif +			  "\x00\x00\x00\x00"	/* enc key length */ +			  "\x00\x00\x00\x00\x00\x00\x00\x00" +			  "\x00\x00\x00\x00\x00\x00\x00\x00", +		.klen   = 8 + 16 + 0, +		.iv     = "", +		.input  = "\x01\x23\x45\x67\x89\xab\xcd\xef", +		.ilen   = 8, +		.result = "\x01\x23\x45\x67\x89\xab\xcd\xef" +			  "\xaa\x42\xfe\x43\x8d\xea\xa3\x5a" +			  "\xb9\x3d\x9f\xb1\xa3\x8e\x9b\xae", +		.rlen   = 8 + 16, +	}, { /* Input data from RFC 2410 Case 2 */ +#ifdef __LITTLE_ENDIAN +		.key    = "\x08\x00"		/* rta length */ +			  "\x01\x00"		/* rta type */ +#else +		.key    = "\x00\x08"		/* rta length */ +			  "\x00\x01"		/* rta type */ +#endif +			  "\x00\x00\x00\x00"	/* enc key length */ +			  "\x00\x00\x00\x00\x00\x00\x00\x00" +			  "\x00\x00\x00\x00\x00\x00\x00\x00", +		.klen   = 8 + 16 + 0, +		.iv     = "", +		.input  = "Network Security People Have A Strange Sense Of Humor", +		.ilen   = 53, +		.result = "Network Security People Have A Strange Sense Of Humor" +			  "\x73\xa5\x3e\x1c\x08\x0e\x8a\x8a" +			  "\x8e\xb5\x5f\x90\x8e\xfe\x13\x23", +		.rlen   = 53 + 16, +	}, +}; + +static struct aead_testvec hmac_md5_ecb_cipher_null_dec_tv_template[] = { +	{ +#ifdef __LITTLE_ENDIAN +		.key    = "\x08\x00"		/* rta length */ +			  "\x01\x00"		/* rta type */ +#else +		.key    = "\x00\x08"		/* rta length */ +			  "\x00\x01"		/* rta type */ +#endif +			  "\x00\x00\x00\x00"	/* enc key length */ +			  "\x00\x00\x00\x00\x00\x00\x00\x00" +			  "\x00\x00\x00\x00\x00\x00\x00\x00", +		.klen   = 8 + 16 + 0, +		.iv     = "", +		.input  = "\x01\x23\x45\x67\x89\xab\xcd\xef" +			  "\xaa\x42\xfe\x43\x8d\xea\xa3\x5a" +			  "\xb9\x3d\x9f\xb1\xa3\x8e\x9b\xae", +		.ilen   = 8 + 16, +		.result = "\x01\x23\x45\x67\x89\xab\xcd\xef", +		.rlen   = 8, +	}, { +#ifdef __LITTLE_ENDIAN +		.key    = "\x08\x00"		/* rta length */ +			  "\x01\x00"		/* rta type */ +#else +		.key    = "\x00\x08"		/* rta length */ +			  "\x00\x01"		/* rta type */ +#endif +			  "\x00\x00\x00\x00"	/* enc key length */ +			  "\x00\x00\x00\x00\x00\x00\x00\x00" +			  "\x00\x00\x00\x00\x00\x00\x00\x00", +		.klen   = 8 + 16 + 0, +		.iv     = "", +		.input  = "Network Security People Have A Strange Sense Of Humor" +			  "\x73\xa5\x3e\x1c\x08\x0e\x8a\x8a" +			  "\x8e\xb5\x5f\x90\x8e\xfe\x13\x23", +		.ilen   = 53 + 16, +		.result = "Network Security People Have A Strange Sense Of Humor", +		.rlen   = 53, +	}, +}; + +static struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = {  	{ /* RFC 3602 Case 1 */  #ifdef __LITTLE_ENDIAN  		.key    = "\x08\x00"		/* rta length */ @@ -13876,7 +14697,99 @@ static struct aead_testvec hmac_sha1_aes_cbc_enc_tv_template[] = {  	},  }; -static struct aead_testvec hmac_sha256_aes_cbc_enc_tv_template[] = { +static struct aead_testvec hmac_sha1_ecb_cipher_null_enc_tv_temp[] = { +	{ /* Input data from RFC 2410 Case 1 */ +#ifdef __LITTLE_ENDIAN +		.key    = "\x08\x00"		/* rta length */ +			  "\x01\x00"		/* rta type */ +#else +		.key    = "\x00\x08"		/* rta length */ +			  "\x00\x01"		/* rta type */ +#endif +			  "\x00\x00\x00\x00"	/* enc key length */ +			  "\x00\x00\x00\x00\x00\x00\x00\x00" +			  "\x00\x00\x00\x00\x00\x00\x00\x00" +			  "\x00\x00\x00\x00", +		.klen   = 8 + 20 + 0, +		.iv     = "", +		.input  = "\x01\x23\x45\x67\x89\xab\xcd\xef", +		.ilen   = 8, +		.result = "\x01\x23\x45\x67\x89\xab\xcd\xef" +			  "\x40\xc3\x0a\xa1\xc9\xa0\x28\xab" +			  "\x99\x5e\x19\x04\xd1\x72\xef\xb8" +			  "\x8c\x5e\xe4\x08", +		.rlen   = 8 + 20, +	}, { /* Input data from RFC 2410 Case 2 */ +#ifdef __LITTLE_ENDIAN +		.key    = "\x08\x00"		/* rta length */ +			  "\x01\x00"		/* rta type */ +#else +		.key    = "\x00\x08"		/* rta length */ +			  "\x00\x01"		/* rta type */ +#endif +			  "\x00\x00\x00\x00"	/* enc key length */ +			  "\x00\x00\x00\x00\x00\x00\x00\x00" +			  "\x00\x00\x00\x00\x00\x00\x00\x00" +			  "\x00\x00\x00\x00", +		.klen   = 8 + 20 + 0, +		.iv     = "", +		.input  = "Network Security People Have A Strange Sense Of Humor", +		.ilen   = 53, +		.result = "Network Security People Have A Strange Sense Of Humor" +			  "\x75\x6f\x42\x1e\xf8\x50\x21\xd2" +			  "\x65\x47\xee\x8e\x1a\xef\x16\xf6" +			  "\x91\x56\xe4\xd6", +		.rlen   = 53 + 20, +	}, +}; + +static struct aead_testvec hmac_sha1_ecb_cipher_null_dec_tv_temp[] = { +	{ +#ifdef __LITTLE_ENDIAN +		.key    = "\x08\x00"		/* rta length */ +			  "\x01\x00"		/* rta type */ +#else +		.key    = "\x00\x08"		/* rta length */ +			  "\x00\x01"		/* rta type */ +#endif +			  "\x00\x00\x00\x00"	/* enc key length */ +			  "\x00\x00\x00\x00\x00\x00\x00\x00" +			  "\x00\x00\x00\x00\x00\x00\x00\x00" +			  "\x00\x00\x00\x00", +		.klen   = 8 + 20 + 0, +		.iv     = "", +		.input  = "\x01\x23\x45\x67\x89\xab\xcd\xef" +			  "\x40\xc3\x0a\xa1\xc9\xa0\x28\xab" +			  "\x99\x5e\x19\x04\xd1\x72\xef\xb8" +			  "\x8c\x5e\xe4\x08", +		.ilen   = 8 + 20, +		.result = "\x01\x23\x45\x67\x89\xab\xcd\xef", +		.rlen   = 8, +	}, { +#ifdef __LITTLE_ENDIAN +		.key    = "\x08\x00"		/* rta length */ +			  "\x01\x00"		/* rta type */ +#else +		.key    = "\x00\x08"		/* rta length */ +			  "\x00\x01"		/* rta type */ +#endif +			  "\x00\x00\x00\x00"	/* enc key length */ +			  "\x00\x00\x00\x00\x00\x00\x00\x00" +			  "\x00\x00\x00\x00\x00\x00\x00\x00" +			  "\x00\x00\x00\x00", +		.klen   = 8 + 20 + 0, +		.iv     = "", +		.input  = "Network Security People Have A Strange Sense Of Humor" +			  "\x75\x6f\x42\x1e\xf8\x50\x21\xd2" +			  "\x65\x47\xee\x8e\x1a\xef\x16\xf6" +			  "\x91\x56\xe4\xd6", +		.ilen   = 53 + 20, +		.result = "Network Security People Have A Strange Sense Of Humor", +		.rlen   = 53, +	}, +}; + +static struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = {  	{ /* RFC 3602 Case 1 */  #ifdef __LITTLE_ENDIAN  		.key    = "\x08\x00"		/* rta length */ @@ -14139,7 +15052,7 @@ static struct aead_testvec hmac_sha256_aes_cbc_enc_tv_template[] = {  	},  }; -static struct aead_testvec hmac_sha512_aes_cbc_enc_tv_template[] = { +static struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = {  	{ /* RFC 3602 Case 1 */  #ifdef __LITTLE_ENDIAN  		.key    = "\x08\x00"		/* rta length */ @@ -14458,6 +15371,652 @@ static struct aead_testvec hmac_sha512_aes_cbc_enc_tv_template[] = {  	},  }; +#define HMAC_SHA1_DES_CBC_ENC_TEST_VEC	1 + +static struct aead_testvec hmac_sha1_des_cbc_enc_tv_temp[] = { +	{ /*Generated with cryptopp*/ +#ifdef __LITTLE_ENDIAN +		.key    = "\x08\x00"		/* rta length */ +			  "\x01\x00"		/* rta type */ +#else +	.key    = "\x00\x08"		/* rta length */ +			  "\x00\x01"		/* rta type */ +#endif +			  "\x00\x00\x00\x08"	/* enc key length */ +			  "\x11\x22\x33\x44\x55\x66\x77\x88" +		  "\x99\xaa\xbb\xcc\xdd\xee\xff\x11" +			  "\x22\x33\x44\x55" +			  "\xE9\xC0\xFF\x2E\x76\x0B\x64\x24", +		.klen	= 8 + 20 + 8, +		.iv	= "\x7D\x33\x88\x93\x0F\x93\xB2\x42", +		.assoc  = "\x00\x00\x43\x21\x00\x00\x00\x01", +		.alen   = 8, +		.input	= "\x6f\x54\x20\x6f\x61\x4d\x79\x6e" +			  "\x53\x20\x63\x65\x65\x72\x73\x74" +			  "\x54\x20\x6f\x6f\x4d\x20\x6e\x61" +			  "\x20\x79\x65\x53\x72\x63\x74\x65" +			  "\x20\x73\x6f\x54\x20\x6f\x61\x4d" +			  "\x79\x6e\x53\x20\x63\x65\x65\x72" +			  "\x73\x74\x54\x20\x6f\x6f\x4d\x20" +			  "\x6e\x61\x20\x79\x65\x53\x72\x63" +			  "\x74\x65\x20\x73\x6f\x54\x20\x6f" +			  "\x61\x4d\x79\x6e\x53\x20\x63\x65" +			  "\x65\x72\x73\x74\x54\x20\x6f\x6f" +			  "\x4d\x20\x6e\x61\x20\x79\x65\x53" +			  "\x72\x63\x74\x65\x20\x73\x6f\x54" +			  "\x20\x6f\x61\x4d\x79\x6e\x53\x20" +			  "\x63\x65\x65\x72\x73\x74\x54\x20" +			  "\x6f\x6f\x4d\x20\x6e\x61\x0a\x79", +		.ilen	= 128, +		.result	= "\x70\xd6\xde\x64\x87\x17\xf1\xe8" +			  "\x54\x31\x85\x37\xed\x6b\x01\x8d" +			  "\xe3\xcc\xe0\x1d\x5e\xf3\xfe\xf1" +			  "\x41\xaa\x33\x91\xa7\x7d\x99\x88" +			  "\x4d\x85\x6e\x2f\xa3\x69\xf5\x82" +			  "\x3a\x6f\x25\xcb\x7d\x58\x1f\x9b" +			  "\xaa\x9c\x11\xd5\x76\x67\xce\xde" +			  "\x56\xd7\x5a\x80\x69\xea\x3a\x02" +			  "\xf0\xc7\x7c\xe3\xcb\x40\xe5\x52" +			  "\xd1\x10\x92\x78\x0b\x8e\x5b\xf1" +			  "\xe3\x26\x1f\xe1\x15\x41\xc7\xba" +			  "\x99\xdb\x08\x51\x1c\xd3\x01\xf4" +			  "\x87\x47\x39\xb8\xd2\xdd\xbd\xfb" +			  "\x66\x13\xdf\x1c\x01\x44\xf0\x7a" +			  "\x1a\x6b\x13\xf5\xd5\x0b\xb8\xba" +			  "\x53\xba\xe1\x76\xe3\x82\x07\x86" +			  "\x95\x16\x20\x09\xf5\x95\x19\xfd" +			  "\x3c\xc7\xe0\x42\xc0\x14\x69\xfa" +			  "\x5c\x44\xa9\x37", +			  .rlen	= 128 + 20, +	}, +}; + +#define HMAC_SHA224_DES_CBC_ENC_TEST_VEC	1 + +static struct aead_testvec hmac_sha224_des_cbc_enc_tv_temp[] = { +	{ /*Generated with cryptopp*/ +#ifdef __LITTLE_ENDIAN +		.key    = "\x08\x00"		/* rta length */ +			  "\x01\x00"		/* rta type */ +#else +		.key    = "\x00\x08"		/* rta length */ +			  "\x00\x01"		/* rta type */ +#endif +			  "\x00\x00\x00\x08"	/* enc key length */ +			  "\x11\x22\x33\x44\x55\x66\x77\x88" +			  "\x99\xaa\xbb\xcc\xdd\xee\xff\x11" +		  "\x22\x33\x44\x55\x66\x77\x88\x99" +			  "\xE9\xC0\xFF\x2E\x76\x0B\x64\x24", +		.klen	= 8 + 24 + 8, +		.iv	= "\x7D\x33\x88\x93\x0F\x93\xB2\x42", +		.assoc  = "\x00\x00\x43\x21\x00\x00\x00\x01", +		.alen   = 8, +		.input	= "\x6f\x54\x20\x6f\x61\x4d\x79\x6e" +			  "\x53\x20\x63\x65\x65\x72\x73\x74" +			  "\x54\x20\x6f\x6f\x4d\x20\x6e\x61" +			  "\x20\x79\x65\x53\x72\x63\x74\x65" +			  "\x20\x73\x6f\x54\x20\x6f\x61\x4d" +			  "\x79\x6e\x53\x20\x63\x65\x65\x72" +			  "\x73\x74\x54\x20\x6f\x6f\x4d\x20" +			  "\x6e\x61\x20\x79\x65\x53\x72\x63" +			  "\x74\x65\x20\x73\x6f\x54\x20\x6f" +			  "\x61\x4d\x79\x6e\x53\x20\x63\x65" +			  "\x65\x72\x73\x74\x54\x20\x6f\x6f" +			  "\x4d\x20\x6e\x61\x20\x79\x65\x53" +			  "\x72\x63\x74\x65\x20\x73\x6f\x54" +			  "\x20\x6f\x61\x4d\x79\x6e\x53\x20" +			  "\x63\x65\x65\x72\x73\x74\x54\x20" +			  "\x6f\x6f\x4d\x20\x6e\x61\x0a\x79", +		.ilen	= 128, +		.result	= "\x70\xd6\xde\x64\x87\x17\xf1\xe8" +			  "\x54\x31\x85\x37\xed\x6b\x01\x8d" +			  "\xe3\xcc\xe0\x1d\x5e\xf3\xfe\xf1" +			  "\x41\xaa\x33\x91\xa7\x7d\x99\x88" +			  "\x4d\x85\x6e\x2f\xa3\x69\xf5\x82" +			  "\x3a\x6f\x25\xcb\x7d\x58\x1f\x9b" +			  "\xaa\x9c\x11\xd5\x76\x67\xce\xde" +			  "\x56\xd7\x5a\x80\x69\xea\x3a\x02" +			  "\xf0\xc7\x7c\xe3\xcb\x40\xe5\x52" +			  "\xd1\x10\x92\x78\x0b\x8e\x5b\xf1" +			  "\xe3\x26\x1f\xe1\x15\x41\xc7\xba" +			  "\x99\xdb\x08\x51\x1c\xd3\x01\xf4" +		  "\x87\x47\x39\xb8\xd2\xdd\xbd\xfb" +			  "\x66\x13\xdf\x1c\x01\x44\xf0\x7a" +			  "\x1a\x6b\x13\xf5\xd5\x0b\xb8\xba" +			  "\x53\xba\xe1\x76\xe3\x82\x07\x86" +			  "\x9c\x2d\x7e\xee\x20\x34\x55\x0a" +			  "\xce\xb5\x4e\x64\x53\xe7\xbf\x91" +			  "\xab\xd4\xd9\xda\xc9\x12\xae\xf7", +		.rlen	= 128 + 24, +	}, +}; + +#define HMAC_SHA256_DES_CBC_ENC_TEST_VEC	1 + +static struct aead_testvec hmac_sha256_des_cbc_enc_tv_temp[] = { +	{ /*Generated with cryptopp*/ +#ifdef __LITTLE_ENDIAN +		.key    = "\x08\x00"		/* rta length */ +			  "\x01\x00"		/* rta type */ +#else +		.key    = "\x00\x08"		/* rta length */ +			  "\x00\x01"		/* rta type */ +#endif +			  "\x00\x00\x00\x08"	/* enc key length */ +			  "\x11\x22\x33\x44\x55\x66\x77\x88" +			  "\x99\xaa\xbb\xcc\xdd\xee\xff\x11" +			  "\x22\x33\x44\x55\x66\x77\x88\x99" +			  "\xaa\xbb\xcc\xdd\xee\xff\x11\x22" +			  "\xE9\xC0\xFF\x2E\x76\x0B\x64\x24", +		.klen	= 8 + 32 + 8, +		.iv	= "\x7D\x33\x88\x93\x0F\x93\xB2\x42", +		.assoc  = "\x00\x00\x43\x21\x00\x00\x00\x01", +		.alen   = 8, +		.input	= "\x6f\x54\x20\x6f\x61\x4d\x79\x6e" +			  "\x53\x20\x63\x65\x65\x72\x73\x74" +			  "\x54\x20\x6f\x6f\x4d\x20\x6e\x61" +			  "\x20\x79\x65\x53\x72\x63\x74\x65" +			  "\x20\x73\x6f\x54\x20\x6f\x61\x4d" +			  "\x79\x6e\x53\x20\x63\x65\x65\x72" +			  "\x73\x74\x54\x20\x6f\x6f\x4d\x20" +			  "\x6e\x61\x20\x79\x65\x53\x72\x63" +			  "\x74\x65\x20\x73\x6f\x54\x20\x6f" +			  "\x61\x4d\x79\x6e\x53\x20\x63\x65" +			  "\x65\x72\x73\x74\x54\x20\x6f\x6f" +			  "\x4d\x20\x6e\x61\x20\x79\x65\x53" +			  "\x72\x63\x74\x65\x20\x73\x6f\x54" +			  "\x20\x6f\x61\x4d\x79\x6e\x53\x20" +			  "\x63\x65\x65\x72\x73\x74\x54\x20" +			  "\x6f\x6f\x4d\x20\x6e\x61\x0a\x79", +		.ilen	= 128, +		.result	= "\x70\xd6\xde\x64\x87\x17\xf1\xe8" +			  "\x54\x31\x85\x37\xed\x6b\x01\x8d" +			  "\xe3\xcc\xe0\x1d\x5e\xf3\xfe\xf1" +			  "\x41\xaa\x33\x91\xa7\x7d\x99\x88" +			  "\x4d\x85\x6e\x2f\xa3\x69\xf5\x82" +			  "\x3a\x6f\x25\xcb\x7d\x58\x1f\x9b" +			  "\xaa\x9c\x11\xd5\x76\x67\xce\xde" +			  "\x56\xd7\x5a\x80\x69\xea\x3a\x02" +			  "\xf0\xc7\x7c\xe3\xcb\x40\xe5\x52" +			  "\xd1\x10\x92\x78\x0b\x8e\x5b\xf1" +			  "\xe3\x26\x1f\xe1\x15\x41\xc7\xba" +		  "\x99\xdb\x08\x51\x1c\xd3\x01\xf4" +			  "\x87\x47\x39\xb8\xd2\xdd\xbd\xfb" +		  "\x66\x13\xdf\x1c\x01\x44\xf0\x7a" +		  "\x1a\x6b\x13\xf5\xd5\x0b\xb8\xba" +			  "\x53\xba\xe1\x76\xe3\x82\x07\x86" +			  "\xc6\x58\xa1\x60\x70\x91\x39\x36" +			  "\x50\xf6\x5d\xab\x4b\x51\x4e\x5e" +			  "\xde\x63\xde\x76\x52\xde\x9f\xba" +			  "\x90\xcf\x15\xf2\xbb\x6e\x84\x00", +		.rlen	= 128 + 32, +	}, +}; + +#define HMAC_SHA384_DES_CBC_ENC_TEST_VEC	1 + +static struct aead_testvec hmac_sha384_des_cbc_enc_tv_temp[] = { +	{ /*Generated with cryptopp*/ +#ifdef __LITTLE_ENDIAN +		.key    = "\x08\x00"		/* rta length */ +			  "\x01\x00"		/* rta type */ +#else +		.key    = "\x00\x08"		/* rta length */ +			  "\x00\x01"		/* rta type */ +#endif +			  "\x00\x00\x00\x08"	/* enc key length */ +			  "\x11\x22\x33\x44\x55\x66\x77\x88" +			  "\x99\xaa\xbb\xcc\xdd\xee\xff\x11" +			  "\x22\x33\x44\x55\x66\x77\x88\x99" +			  "\xaa\xbb\xcc\xdd\xee\xff\x11\x22" +			  "\x33\x44\x55\x66\x77\x88\x99\xaa" +			  "\xbb\xcc\xdd\xee\xff\x11\x22\x33" +			  "\xE9\xC0\xFF\x2E\x76\x0B\x64\x24", +		.klen	= 8 + 48 + 8, +		.iv	= "\x7D\x33\x88\x93\x0F\x93\xB2\x42", +		.assoc  = "\x00\x00\x43\x21\x00\x00\x00\x01", +		.alen   = 8, +		.input	= "\x6f\x54\x20\x6f\x61\x4d\x79\x6e" +			  "\x53\x20\x63\x65\x65\x72\x73\x74" +			  "\x54\x20\x6f\x6f\x4d\x20\x6e\x61" +			  "\x20\x79\x65\x53\x72\x63\x74\x65" +			  "\x20\x73\x6f\x54\x20\x6f\x61\x4d" +			  "\x79\x6e\x53\x20\x63\x65\x65\x72" +			  "\x73\x74\x54\x20\x6f\x6f\x4d\x20" +			  "\x6e\x61\x20\x79\x65\x53\x72\x63" +			  "\x74\x65\x20\x73\x6f\x54\x20\x6f" +			  "\x61\x4d\x79\x6e\x53\x20\x63\x65" +			  "\x65\x72\x73\x74\x54\x20\x6f\x6f" +			  "\x4d\x20\x6e\x61\x20\x79\x65\x53" +			  "\x72\x63\x74\x65\x20\x73\x6f\x54" +			  "\x20\x6f\x61\x4d\x79\x6e\x53\x20" +			  "\x63\x65\x65\x72\x73\x74\x54\x20" +			  "\x6f\x6f\x4d\x20\x6e\x61\x0a\x79", +		.ilen	= 128, +		.result	= "\x70\xd6\xde\x64\x87\x17\xf1\xe8" +			  "\x54\x31\x85\x37\xed\x6b\x01\x8d" +			  "\xe3\xcc\xe0\x1d\x5e\xf3\xfe\xf1" +			  "\x41\xaa\x33\x91\xa7\x7d\x99\x88" +			  "\x4d\x85\x6e\x2f\xa3\x69\xf5\x82" +			  "\x3a\x6f\x25\xcb\x7d\x58\x1f\x9b" +			  "\xaa\x9c\x11\xd5\x76\x67\xce\xde" +			  "\x56\xd7\x5a\x80\x69\xea\x3a\x02" +			  "\xf0\xc7\x7c\xe3\xcb\x40\xe5\x52" +			  "\xd1\x10\x92\x78\x0b\x8e\x5b\xf1" +			  "\xe3\x26\x1f\xe1\x15\x41\xc7\xba" +			  "\x99\xdb\x08\x51\x1c\xd3\x01\xf4" +			  "\x87\x47\x39\xb8\xd2\xdd\xbd\xfb" +			  "\x66\x13\xdf\x1c\x01\x44\xf0\x7a" +			  "\x1a\x6b\x13\xf5\xd5\x0b\xb8\xba" +			  "\x53\xba\xe1\x76\xe3\x82\x07\x86" +			  "\xa8\x8e\x9c\x74\x8c\x2b\x99\xa0" +			  "\xc8\x8c\xef\x25\x07\x83\x11\x3a" +			  "\x31\x8d\xbe\x3b\x6a\xd7\x96\xfe" +			  "\x5e\x67\xb5\x74\xe7\xe7\x85\x61" +			  "\x6a\x95\x26\x75\xcc\x53\x89\xf3" +			  "\x74\xc9\x2a\x76\x20\xa2\x64\x62", +		.rlen	= 128 + 48, +	}, +}; + +#define HMAC_SHA512_DES_CBC_ENC_TEST_VEC	1 + +static struct aead_testvec hmac_sha512_des_cbc_enc_tv_temp[] = { +	{ /*Generated with cryptopp*/ +#ifdef __LITTLE_ENDIAN +		.key    = "\x08\x00"		/* rta length */ +		  "\x01\x00"		/* rta type */ +#else +		.key    = "\x00\x08"		/* rta length */ +			  "\x00\x01"		/* rta type */ +#endif +			  "\x00\x00\x00\x08"	/* enc key length */ +		  "\x11\x22\x33\x44\x55\x66\x77\x88" +			  "\x99\xaa\xbb\xcc\xdd\xee\xff\x11" +			  "\x22\x33\x44\x55\x66\x77\x88\x99" +			  "\xaa\xbb\xcc\xdd\xee\xff\x11\x22" +			  "\x33\x44\x55\x66\x77\x88\x99\xaa" +			  "\xbb\xcc\xdd\xee\xff\x11\x22\x33" +			  "\x44\x55\x66\x77\x88\x99\xaa\xbb" +			  "\xcc\xdd\xee\xff\x11\x22\x33\x44" +			  "\xE9\xC0\xFF\x2E\x76\x0B\x64\x24", +		.klen	= 8 + 64 + 8, +		.iv	= "\x7D\x33\x88\x93\x0F\x93\xB2\x42", +		.assoc  = "\x00\x00\x43\x21\x00\x00\x00\x01", +		.alen   = 8, +		.input	= "\x6f\x54\x20\x6f\x61\x4d\x79\x6e" +			  "\x53\x20\x63\x65\x65\x72\x73\x74" +			  "\x54\x20\x6f\x6f\x4d\x20\x6e\x61" +			  "\x20\x79\x65\x53\x72\x63\x74\x65" +			  "\x20\x73\x6f\x54\x20\x6f\x61\x4d" +			  "\x79\x6e\x53\x20\x63\x65\x65\x72" +			  "\x73\x74\x54\x20\x6f\x6f\x4d\x20" +			  "\x6e\x61\x20\x79\x65\x53\x72\x63" +			  "\x74\x65\x20\x73\x6f\x54\x20\x6f" +			  "\x61\x4d\x79\x6e\x53\x20\x63\x65" +			  "\x65\x72\x73\x74\x54\x20\x6f\x6f" +			  "\x4d\x20\x6e\x61\x20\x79\x65\x53" +			  "\x72\x63\x74\x65\x20\x73\x6f\x54" +			  "\x20\x6f\x61\x4d\x79\x6e\x53\x20" +			  "\x63\x65\x65\x72\x73\x74\x54\x20" +			  "\x6f\x6f\x4d\x20\x6e\x61\x0a\x79", +		.ilen	= 128, +		.result	= "\x70\xd6\xde\x64\x87\x17\xf1\xe8" +			  "\x54\x31\x85\x37\xed\x6b\x01\x8d" +			  "\xe3\xcc\xe0\x1d\x5e\xf3\xfe\xf1" +			  "\x41\xaa\x33\x91\xa7\x7d\x99\x88" +			  "\x4d\x85\x6e\x2f\xa3\x69\xf5\x82" +			  "\x3a\x6f\x25\xcb\x7d\x58\x1f\x9b" +			  "\xaa\x9c\x11\xd5\x76\x67\xce\xde" +		  "\x56\xd7\x5a\x80\x69\xea\x3a\x02" +			  "\xf0\xc7\x7c\xe3\xcb\x40\xe5\x52" +		  "\xd1\x10\x92\x78\x0b\x8e\x5b\xf1" +			  "\xe3\x26\x1f\xe1\x15\x41\xc7\xba" +			  "\x99\xdb\x08\x51\x1c\xd3\x01\xf4" +			  "\x87\x47\x39\xb8\xd2\xdd\xbd\xfb" +			  "\x66\x13\xdf\x1c\x01\x44\xf0\x7a" +		  "\x1a\x6b\x13\xf5\xd5\x0b\xb8\xba" +			  "\x53\xba\xe1\x76\xe3\x82\x07\x86" +			  "\xc6\x2c\x73\x88\xb0\x9d\x5f\x3e" +			  "\x5b\x78\xca\x0e\xab\x8a\xa3\xbb" +			  "\xd9\x1d\xc3\xe3\x05\xac\x76\xfb" +			  "\x58\x83\xda\x67\xfb\x21\x24\xa2" +			  "\xb1\xa7\xd7\x66\xa6\x8d\xa6\x93" +			  "\x97\xe2\xe3\xb8\xaa\x48\x85\xee" +			  "\x8c\xf6\x07\x95\x1f\xa6\x6c\x96" +			  "\x99\xc7\x5c\x8d\xd8\xb5\x68\x7b", +		.rlen	= 128 + 64, +	}, +}; + +#define HMAC_SHA1_DES3_EDE_CBC_ENC_TEST_VEC	1 + +static struct aead_testvec hmac_sha1_des3_ede_cbc_enc_tv_temp[] = { +	{ /*Generated with cryptopp*/ +#ifdef __LITTLE_ENDIAN +		.key    = "\x08\x00"		/* rta length */ +			  "\x01\x00"		/* rta type */ +#else +		.key    = "\x00\x08"		/* rta length */ +			  "\x00\x01"		/* rta type */ +#endif +			  "\x00\x00\x00\x18"	/* enc key length */ +			  "\x11\x22\x33\x44\x55\x66\x77\x88" +			  "\x99\xaa\xbb\xcc\xdd\xee\xff\x11" +			  "\x22\x33\x44\x55" +		  "\xE9\xC0\xFF\x2E\x76\x0B\x64\x24" +			  "\x44\x4D\x99\x5A\x12\xD6\x40\xC0" +			  "\xEA\xC2\x84\xE8\x14\x95\xDB\xE8", +		.klen	= 8 + 20 + 24, +		.iv	= "\x7D\x33\x88\x93\x0F\x93\xB2\x42", +		.assoc  = "\x00\x00\x43\x21\x00\x00\x00\x01", +		.alen   = 8, +		.input	= "\x6f\x54\x20\x6f\x61\x4d\x79\x6e" +			  "\x53\x20\x63\x65\x65\x72\x73\x74" +		  "\x54\x20\x6f\x6f\x4d\x20\x6e\x61" +			  "\x20\x79\x65\x53\x72\x63\x74\x65" +			  "\x20\x73\x6f\x54\x20\x6f\x61\x4d" +			  "\x79\x6e\x53\x20\x63\x65\x65\x72" +			  "\x73\x74\x54\x20\x6f\x6f\x4d\x20" +			  "\x6e\x61\x20\x79\x65\x53\x72\x63" +			  "\x74\x65\x20\x73\x6f\x54\x20\x6f" +			  "\x61\x4d\x79\x6e\x53\x20\x63\x65" +			  "\x65\x72\x73\x74\x54\x20\x6f\x6f" +			  "\x4d\x20\x6e\x61\x20\x79\x65\x53" +			  "\x72\x63\x74\x65\x20\x73\x6f\x54" +			  "\x20\x6f\x61\x4d\x79\x6e\x53\x20" +			  "\x63\x65\x65\x72\x73\x74\x54\x20" +			  "\x6f\x6f\x4d\x20\x6e\x61\x0a\x79", +		.ilen	= 128, +		.result	= "\x0e\x2d\xb6\x97\x3c\x56\x33\xf4" +			  "\x67\x17\x21\xc7\x6e\x8a\xd5\x49" +		  "\x74\xb3\x49\x05\xc5\x1c\xd0\xed" +		  "\x12\x56\x5c\x53\x96\xb6\x00\x7d" +		  "\x90\x48\xfc\xf5\x8d\x29\x39\xcc" +			  "\x8a\xd5\x35\x18\x36\x23\x4e\xd7" +			  "\x76\xd1\xda\x0c\x94\x67\xbb\x04" +			  "\x8b\xf2\x03\x6c\xa8\xcf\xb6\xea" +		  "\x22\x64\x47\xaa\x8f\x75\x13\xbf" +			  "\x9f\xc2\xc3\xf0\xc9\x56\xc5\x7a" +			  "\x71\x63\x2e\x89\x7b\x1e\x12\xca" +			  "\xe2\x5f\xaf\xd8\xa4\xf8\xc9\x7a" +			  "\xd6\xf9\x21\x31\x62\x44\x45\xa6" +			  "\xd6\xbc\x5a\xd3\x2d\x54\x43\xcc" +			  "\x9d\xde\xa5\x70\xe9\x42\x45\x8a" +			  "\x6b\xfa\xb1\x91\x13\xb0\xd9\x19" +			  "\x67\x6d\xb1\xf5\xb8\x10\xdc\xc6" +			  "\x75\x86\x96\x6b\xb1\xc5\xe4\xcf" +			  "\xd1\x60\x91\xb3", +			  .rlen	= 128 + 20, +	}, +}; + +#define HMAC_SHA224_DES3_EDE_CBC_ENC_TEST_VEC	1 + +static struct aead_testvec hmac_sha224_des3_ede_cbc_enc_tv_temp[] = { +	{ /*Generated with cryptopp*/ +#ifdef __LITTLE_ENDIAN +		.key    = "\x08\x00"		/* rta length */ +			  "\x01\x00"		/* rta type */ +#else +		.key    = "\x00\x08"		/* rta length */ +			  "\x00\x01"		/* rta type */ +#endif +			  "\x00\x00\x00\x18"	/* enc key length */ +			  "\x11\x22\x33\x44\x55\x66\x77\x88" +			  "\x99\xaa\xbb\xcc\xdd\xee\xff\x11" +			  "\x22\x33\x44\x55\x66\x77\x88\x99" +			  "\xE9\xC0\xFF\x2E\x76\x0B\x64\x24" +			  "\x44\x4D\x99\x5A\x12\xD6\x40\xC0" +			  "\xEA\xC2\x84\xE8\x14\x95\xDB\xE8", +		.klen	= 8 + 24 + 24, +		.iv	= "\x7D\x33\x88\x93\x0F\x93\xB2\x42", +		.assoc  = "\x00\x00\x43\x21\x00\x00\x00\x01", +		.alen   = 8, +		.input	= "\x6f\x54\x20\x6f\x61\x4d\x79\x6e" +			  "\x53\x20\x63\x65\x65\x72\x73\x74" +			  "\x54\x20\x6f\x6f\x4d\x20\x6e\x61" +			  "\x20\x79\x65\x53\x72\x63\x74\x65" +			  "\x20\x73\x6f\x54\x20\x6f\x61\x4d" +			  "\x79\x6e\x53\x20\x63\x65\x65\x72" +			  "\x73\x74\x54\x20\x6f\x6f\x4d\x20" +			  "\x6e\x61\x20\x79\x65\x53\x72\x63" +			  "\x74\x65\x20\x73\x6f\x54\x20\x6f" +			  "\x61\x4d\x79\x6e\x53\x20\x63\x65" +			  "\x65\x72\x73\x74\x54\x20\x6f\x6f" +			  "\x4d\x20\x6e\x61\x20\x79\x65\x53" +		  "\x72\x63\x74\x65\x20\x73\x6f\x54" +			  "\x20\x6f\x61\x4d\x79\x6e\x53\x20" +			  "\x63\x65\x65\x72\x73\x74\x54\x20" +			  "\x6f\x6f\x4d\x20\x6e\x61\x0a\x79", +		.ilen	= 128, +		.result	= "\x0e\x2d\xb6\x97\x3c\x56\x33\xf4" +		  "\x67\x17\x21\xc7\x6e\x8a\xd5\x49" +			  "\x74\xb3\x49\x05\xc5\x1c\xd0\xed" +			  "\x12\x56\x5c\x53\x96\xb6\x00\x7d" +			  "\x90\x48\xfc\xf5\x8d\x29\x39\xcc" +			  "\x8a\xd5\x35\x18\x36\x23\x4e\xd7" +			  "\x76\xd1\xda\x0c\x94\x67\xbb\x04" +		  "\x8b\xf2\x03\x6c\xa8\xcf\xb6\xea" +			  "\x22\x64\x47\xaa\x8f\x75\x13\xbf" +			  "\x9f\xc2\xc3\xf0\xc9\x56\xc5\x7a" +			  "\x71\x63\x2e\x89\x7b\x1e\x12\xca" +			  "\xe2\x5f\xaf\xd8\xa4\xf8\xc9\x7a" +			  "\xd6\xf9\x21\x31\x62\x44\x45\xa6" +		  "\xd6\xbc\x5a\xd3\x2d\x54\x43\xcc" +			  "\x9d\xde\xa5\x70\xe9\x42\x45\x8a" +		  "\x6b\xfa\xb1\x91\x13\xb0\xd9\x19" +			  "\x15\x24\x7f\x5a\x45\x4a\x66\xce" +			  "\x2b\x0b\x93\x99\x2f\x9d\x0c\x6c" +			  "\x56\x1f\xe1\xa6\x41\xb2\x4c\xd0", +			  .rlen	= 128 + 24, +	}, +}; + +#define HMAC_SHA256_DES3_EDE_CBC_ENC_TEST_VEC	1 + +static struct aead_testvec hmac_sha256_des3_ede_cbc_enc_tv_temp[] = { +	{ /*Generated with cryptopp*/ +#ifdef __LITTLE_ENDIAN +		.key    = "\x08\x00"		/* rta length */ +			  "\x01\x00"		/* rta type */ +#else +		.key    = "\x00\x08"		/* rta length */ +			  "\x00\x01"		/* rta type */ +#endif +			  "\x00\x00\x00\x18"	/* enc key length */ +			  "\x11\x22\x33\x44\x55\x66\x77\x88" +			  "\x99\xaa\xbb\xcc\xdd\xee\xff\x11" +			  "\x22\x33\x44\x55\x66\x77\x88\x99" +			  "\xaa\xbb\xcc\xdd\xee\xff\x11\x22" +			  "\xE9\xC0\xFF\x2E\x76\x0B\x64\x24" +			  "\x44\x4D\x99\x5A\x12\xD6\x40\xC0" +			  "\xEA\xC2\x84\xE8\x14\x95\xDB\xE8", +		.klen	= 8 + 32 + 24, +		.iv	= "\x7D\x33\x88\x93\x0F\x93\xB2\x42", +		.assoc  = "\x00\x00\x43\x21\x00\x00\x00\x01", +		.alen   = 8, +		.input	= "\x6f\x54\x20\x6f\x61\x4d\x79\x6e" +			  "\x53\x20\x63\x65\x65\x72\x73\x74" +			  "\x54\x20\x6f\x6f\x4d\x20\x6e\x61" +			  "\x20\x79\x65\x53\x72\x63\x74\x65" +			  "\x20\x73\x6f\x54\x20\x6f\x61\x4d" +			  "\x79\x6e\x53\x20\x63\x65\x65\x72" +			  "\x73\x74\x54\x20\x6f\x6f\x4d\x20" +			  "\x6e\x61\x20\x79\x65\x53\x72\x63" +			  "\x74\x65\x20\x73\x6f\x54\x20\x6f" +			  "\x61\x4d\x79\x6e\x53\x20\x63\x65" +			  "\x65\x72\x73\x74\x54\x20\x6f\x6f" +			  "\x4d\x20\x6e\x61\x20\x79\x65\x53" +			  "\x72\x63\x74\x65\x20\x73\x6f\x54" +			  "\x20\x6f\x61\x4d\x79\x6e\x53\x20" +			  "\x63\x65\x65\x72\x73\x74\x54\x20" +			  "\x6f\x6f\x4d\x20\x6e\x61\x0a\x79", +		.ilen	= 128, +		.result	= "\x0e\x2d\xb6\x97\x3c\x56\x33\xf4" +			  "\x67\x17\x21\xc7\x6e\x8a\xd5\x49" +			  "\x74\xb3\x49\x05\xc5\x1c\xd0\xed" +			  "\x12\x56\x5c\x53\x96\xb6\x00\x7d" +			  "\x90\x48\xfc\xf5\x8d\x29\x39\xcc" +			  "\x8a\xd5\x35\x18\x36\x23\x4e\xd7" +			  "\x76\xd1\xda\x0c\x94\x67\xbb\x04" +			  "\x8b\xf2\x03\x6c\xa8\xcf\xb6\xea" +			  "\x22\x64\x47\xaa\x8f\x75\x13\xbf" +			  "\x9f\xc2\xc3\xf0\xc9\x56\xc5\x7a" +			  "\x71\x63\x2e\x89\x7b\x1e\x12\xca" +			  "\xe2\x5f\xaf\xd8\xa4\xf8\xc9\x7a" +			  "\xd6\xf9\x21\x31\x62\x44\x45\xa6" +			  "\xd6\xbc\x5a\xd3\x2d\x54\x43\xcc" +			  "\x9d\xde\xa5\x70\xe9\x42\x45\x8a" +			  "\x6b\xfa\xb1\x91\x13\xb0\xd9\x19" +			  "\x73\xb0\xea\x9f\xe8\x18\x80\xd6" +			  "\x56\x38\x44\xc0\xdb\xe3\x4f\x71" +			  "\xf7\xce\xd1\xd3\xf8\xbd\x3e\x4f" +			  "\xca\x43\x95\xdf\x80\x61\x81\xa9", +		.rlen	= 128 + 32, +	}, +}; + +#define HMAC_SHA384_DES3_EDE_CBC_ENC_TEST_VEC	1 + +static struct aead_testvec hmac_sha384_des3_ede_cbc_enc_tv_temp[] = { +	{ /*Generated with cryptopp*/ +#ifdef __LITTLE_ENDIAN +		.key    = "\x08\x00"		/* rta length */ +			  "\x01\x00"		/* rta type */ +#else +		.key    = "\x00\x08"		/* rta length */ +			  "\x00\x01"		/* rta type */ +#endif +			  "\x00\x00\x00\x18"	/* enc key length */ +			  "\x11\x22\x33\x44\x55\x66\x77\x88" +			  "\x99\xaa\xbb\xcc\xdd\xee\xff\x11" +			  "\x22\x33\x44\x55\x66\x77\x88\x99" +			  "\xaa\xbb\xcc\xdd\xee\xff\x11\x22" +			  "\x33\x44\x55\x66\x77\x88\x99\xaa" +			  "\xbb\xcc\xdd\xee\xff\x11\x22\x33" +			  "\xE9\xC0\xFF\x2E\x76\x0B\x64\x24" +			  "\x44\x4D\x99\x5A\x12\xD6\x40\xC0" +			  "\xEA\xC2\x84\xE8\x14\x95\xDB\xE8", +		.klen	= 8 + 48 + 24, +		.iv	= "\x7D\x33\x88\x93\x0F\x93\xB2\x42", +	.assoc  = "\x00\x00\x43\x21\x00\x00\x00\x01", +		.alen   = 8, +		.input	= "\x6f\x54\x20\x6f\x61\x4d\x79\x6e" +			  "\x53\x20\x63\x65\x65\x72\x73\x74" +			  "\x54\x20\x6f\x6f\x4d\x20\x6e\x61" +			  "\x20\x79\x65\x53\x72\x63\x74\x65" +			  "\x20\x73\x6f\x54\x20\x6f\x61\x4d" +			  "\x79\x6e\x53\x20\x63\x65\x65\x72" +			  "\x73\x74\x54\x20\x6f\x6f\x4d\x20" +			  "\x6e\x61\x20\x79\x65\x53\x72\x63" +			  "\x74\x65\x20\x73\x6f\x54\x20\x6f" +			  "\x61\x4d\x79\x6e\x53\x20\x63\x65" +			  "\x65\x72\x73\x74\x54\x20\x6f\x6f" +			  "\x4d\x20\x6e\x61\x20\x79\x65\x53" +			  "\x72\x63\x74\x65\x20\x73\x6f\x54" +			  "\x20\x6f\x61\x4d\x79\x6e\x53\x20" +			  "\x63\x65\x65\x72\x73\x74\x54\x20" +			  "\x6f\x6f\x4d\x20\x6e\x61\x0a\x79", +		.ilen	= 128, +		.result	= "\x0e\x2d\xb6\x97\x3c\x56\x33\xf4" +			  "\x67\x17\x21\xc7\x6e\x8a\xd5\x49" +			  "\x74\xb3\x49\x05\xc5\x1c\xd0\xed" +			  "\x12\x56\x5c\x53\x96\xb6\x00\x7d" +			  "\x90\x48\xfc\xf5\x8d\x29\x39\xcc" +			  "\x8a\xd5\x35\x18\x36\x23\x4e\xd7" +			  "\x76\xd1\xda\x0c\x94\x67\xbb\x04" +			  "\x8b\xf2\x03\x6c\xa8\xcf\xb6\xea" +			  "\x22\x64\x47\xaa\x8f\x75\x13\xbf" +			  "\x9f\xc2\xc3\xf0\xc9\x56\xc5\x7a" +			  "\x71\x63\x2e\x89\x7b\x1e\x12\xca" +			  "\xe2\x5f\xaf\xd8\xa4\xf8\xc9\x7a" +			  "\xd6\xf9\x21\x31\x62\x44\x45\xa6" +			  "\xd6\xbc\x5a\xd3\x2d\x54\x43\xcc" +			  "\x9d\xde\xa5\x70\xe9\x42\x45\x8a" +			  "\x6b\xfa\xb1\x91\x13\xb0\xd9\x19" +			  "\x6d\x77\xfc\x80\x9d\x8a\x9c\xb7" +		  "\x70\xe7\x93\xbf\x73\xe6\x9f\x83" +			  "\x99\x62\x23\xe6\x5b\xd0\xda\x18" +			  "\xa4\x32\x8a\x0b\x46\xd7\xf0\x39" +			  "\x36\x5d\x13\x2f\x86\x10\x78\xd6" +			  "\xd6\xbe\x5c\xb9\x15\x89\xf9\x1b", +		.rlen	= 128 + 48, +	}, +}; + +#define HMAC_SHA512_DES3_EDE_CBC_ENC_TEST_VEC	1 + +static struct aead_testvec hmac_sha512_des3_ede_cbc_enc_tv_temp[] = { +	{ /*Generated with cryptopp*/ +#ifdef __LITTLE_ENDIAN +		.key    = "\x08\x00"		/* rta length */ +			  "\x01\x00"		/* rta type */ +#else +		.key    = "\x00\x08"		/* rta length */ +			  "\x00\x01"		/* rta type */ +#endif +			  "\x00\x00\x00\x18"	/* enc key length */ +			  "\x11\x22\x33\x44\x55\x66\x77\x88" +			  "\x99\xaa\xbb\xcc\xdd\xee\xff\x11" +			  "\x22\x33\x44\x55\x66\x77\x88\x99" +			  "\xaa\xbb\xcc\xdd\xee\xff\x11\x22" +			  "\x33\x44\x55\x66\x77\x88\x99\xaa" +			  "\xbb\xcc\xdd\xee\xff\x11\x22\x33" +			  "\x44\x55\x66\x77\x88\x99\xaa\xbb" +			  "\xcc\xdd\xee\xff\x11\x22\x33\x44" +			  "\xE9\xC0\xFF\x2E\x76\x0B\x64\x24" +		  "\x44\x4D\x99\x5A\x12\xD6\x40\xC0" +			  "\xEA\xC2\x84\xE8\x14\x95\xDB\xE8", +		.klen	= 8 + 64 + 24, +		.iv	= "\x7D\x33\x88\x93\x0F\x93\xB2\x42", +		.assoc  = "\x00\x00\x43\x21\x00\x00\x00\x01", +		.alen   = 8, +		.input	= "\x6f\x54\x20\x6f\x61\x4d\x79\x6e" +			  "\x53\x20\x63\x65\x65\x72\x73\x74" +			  "\x54\x20\x6f\x6f\x4d\x20\x6e\x61" +			  "\x20\x79\x65\x53\x72\x63\x74\x65" +			  "\x20\x73\x6f\x54\x20\x6f\x61\x4d" +			  "\x79\x6e\x53\x20\x63\x65\x65\x72" +			  "\x73\x74\x54\x20\x6f\x6f\x4d\x20" +			  "\x6e\x61\x20\x79\x65\x53\x72\x63" +		  "\x74\x65\x20\x73\x6f\x54\x20\x6f" +			  "\x61\x4d\x79\x6e\x53\x20\x63\x65" +			  "\x65\x72\x73\x74\x54\x20\x6f\x6f" +			  "\x4d\x20\x6e\x61\x20\x79\x65\x53" +			  "\x72\x63\x74\x65\x20\x73\x6f\x54" +			  "\x20\x6f\x61\x4d\x79\x6e\x53\x20" +			  "\x63\x65\x65\x72\x73\x74\x54\x20" +			  "\x6f\x6f\x4d\x20\x6e\x61\x0a\x79", +		.ilen	= 128, +		.result	= "\x0e\x2d\xb6\x97\x3c\x56\x33\xf4" +			  "\x67\x17\x21\xc7\x6e\x8a\xd5\x49" +			  "\x74\xb3\x49\x05\xc5\x1c\xd0\xed" +			  "\x12\x56\x5c\x53\x96\xb6\x00\x7d" +			  "\x90\x48\xfc\xf5\x8d\x29\x39\xcc" +			  "\x8a\xd5\x35\x18\x36\x23\x4e\xd7" +			  "\x76\xd1\xda\x0c\x94\x67\xbb\x04" +			  "\x8b\xf2\x03\x6c\xa8\xcf\xb6\xea" +			  "\x22\x64\x47\xaa\x8f\x75\x13\xbf" +			  "\x9f\xc2\xc3\xf0\xc9\x56\xc5\x7a" +			  "\x71\x63\x2e\x89\x7b\x1e\x12\xca" +			  "\xe2\x5f\xaf\xd8\xa4\xf8\xc9\x7a" +			  "\xd6\xf9\x21\x31\x62\x44\x45\xa6" +			  "\xd6\xbc\x5a\xd3\x2d\x54\x43\xcc" +			  "\x9d\xde\xa5\x70\xe9\x42\x45\x8a" +			  "\x6b\xfa\xb1\x91\x13\xb0\xd9\x19" +			  "\x41\xb5\x1f\xbb\xbd\x4e\xb8\x32" +			  "\x22\x86\x4e\x57\x1b\x2a\xd8\x6e" +			  "\xa9\xfb\xc8\xf3\xbf\x2d\xae\x2b" +			  "\x3b\xbc\x41\xe8\x38\xbb\xf1\x60" +			  "\x4c\x68\xa9\x4e\x8c\x73\xa7\xc0" +			  "\x2a\x74\xd4\x65\x12\xcb\x55\xf2" +			  "\xd5\x02\x6d\xe6\xaf\xc9\x2f\xf2" +			  "\x57\xaa\x85\xf7\xf3\x6a\xcb\xdb", +		.rlen	= 128 + 64, +	}, +}; +  static struct cipher_testvec aes_lrw_enc_tv_template[] = {  	/* from http://grouper.ieee.org/groups/1619/email/pdf00017.pdf */  	{ /* LRW-32-AES 1 */ @@ -18566,7 +20125,29 @@ static struct aead_testvec aes_ccm_enc_tv_template[] = {  			  "\x7c\xf9\xbe\xc2\x40\x88\x97\xc6"  			  "\xba",  		.rlen	= 33, -	}, +	}, { +		/* +		 * This is the same vector as aes_ccm_rfc4309_enc_tv_template[0] +		 * below but rewritten to use the ccm algorithm directly. +		 */ +		.key	= "\x83\xac\x54\x66\xc2\xeb\xe5\x05" +			  "\x2e\x01\xd1\xfc\x5d\x82\x66\x2e", +		.klen	= 16, +		.iv	= "\x03\x96\xac\x59\x30\x07\xa1\xe2\xa2\xc7\x55\x24\0\0\0\0", +		.alen	= 0, +		.input	= "\x19\xc8\x81\xf6\xe9\x86\xff\x93" +			  "\x0b\x78\x67\xe5\xbb\xb7\xfc\x6e" +			  "\x83\x77\xb3\xa6\x0c\x8c\x9f\x9c" +			  "\x35\x2e\xad\xe0\x62\xf9\x91\xa1", +		.ilen	= 32, +		.result	= "\xab\x6f\xe1\x69\x1d\x19\x99\xa8" +			  "\x92\xa0\xc4\x6f\x7e\xe2\x8b\xb1" +			  "\x70\xbb\x8c\xa6\x4c\x6e\x97\x8a" +			  "\x57\x2b\xbe\x5d\x98\xa6\xb1\x32" +			  "\xda\x24\xea\xd9\xa1\x39\x98\xfd" +			  "\xa4\xbe\xd9\xf2\x1a\x6d\x22\xa8", +		.rlen	= 48, +	}  };  static struct aead_testvec aes_ccm_dec_tv_template[] = {  | 
