diff options
Diffstat (limited to 'arch/x86/crypto/aesni-intel_glue.c')
| -rw-r--r-- | arch/x86/crypto/aesni-intel_glue.c | 1482 | 
1 files changed, 1080 insertions, 402 deletions
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c index 2cb3dcc4490..948ad0e7774 100644 --- a/arch/x86/crypto/aesni-intel_glue.c +++ b/arch/x86/crypto/aesni-intel_glue.c @@ -5,6 +5,14 @@   * Copyright (C) 2008, Intel Corp.   *    Author: Huang Ying <ying.huang@intel.com>   * + * Added RFC4106 AES-GCM support for 128-bit keys under the AEAD + * interface for 64-bit kernels. + *    Authors: Adrian Hoban <adrian.hoban@intel.com> + *             Gabriele Paoloni <gabriele.paoloni@intel.com> + *             Tadeusz Struk (tadeusz.struk@intel.com) + *             Aidan O'Mahony (aidan.o.mahony@intel.com) + *    Copyright (c) 2010, Intel Corporation. + *   * This program is free software; you can redistribute it and/or modify   * it under the terms of the GNU General Public License as published by   * the Free Software Foundation; either version 2 of the License, or @@ -14,36 +22,66 @@  #include <linux/hardirq.h>  #include <linux/types.h>  #include <linux/crypto.h> +#include <linux/module.h>  #include <linux/err.h>  #include <crypto/algapi.h>  #include <crypto/aes.h>  #include <crypto/cryptd.h>  #include <crypto/ctr.h> +#include <crypto/b128ops.h> +#include <crypto/lrw.h> +#include <crypto/xts.h> +#include <asm/cpu_device_id.h>  #include <asm/i387.h> -#include <asm/aes.h> - -#if defined(CONFIG_CRYPTO_CTR) || defined(CONFIG_CRYPTO_CTR_MODULE) -#define HAS_CTR -#endif - -#if defined(CONFIG_CRYPTO_LRW) || defined(CONFIG_CRYPTO_LRW_MODULE) -#define HAS_LRW +#include <asm/crypto/aes.h> +#include <crypto/ablk_helper.h> +#include <crypto/scatterwalk.h> +#include <crypto/internal/aead.h> +#include <linux/workqueue.h> +#include <linux/spinlock.h> +#ifdef CONFIG_X86_64 +#include <asm/crypto/glue_helper.h>  #endif  #if defined(CONFIG_CRYPTO_PCBC) || defined(CONFIG_CRYPTO_PCBC_MODULE)  #define HAS_PCBC  #endif -#if defined(CONFIG_CRYPTO_XTS) || defined(CONFIG_CRYPTO_XTS_MODULE) -#define HAS_XTS -#endif +/* This data is stored at the end of the crypto_tfm struct. + * It's a type of per "session" data storage location. + * This needs to be 16 byte aligned. + */ +struct aesni_rfc4106_gcm_ctx { +	u8 hash_subkey[16]; +	struct crypto_aes_ctx aes_key_expanded; +	u8 nonce[4]; +	struct cryptd_aead *cryptd_tfm; +}; + +struct aesni_gcm_set_hash_subkey_result { +	int err; +	struct completion completion; +}; -struct async_aes_ctx { -	struct cryptd_ablkcipher *cryptd_tfm; +struct aesni_hash_subkey_req_data { +	u8 iv[16]; +	struct aesni_gcm_set_hash_subkey_result result; +	struct scatterlist sg;  }; -#define AESNI_ALIGN	16 +#define AESNI_ALIGN	(16)  #define AES_BLOCK_MASK	(~(AES_BLOCK_SIZE-1)) +#define RFC4106_HASH_SUBKEY_SIZE 16 + +struct aesni_lrw_ctx { +	struct lrw_table_ctx lrw_table; +	u8 raw_aes_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1]; +}; + +struct aesni_xts_ctx { +	u8 raw_tweak_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1]; +	u8 raw_crypt_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1]; +};  asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,  			     unsigned int key_len); @@ -59,9 +97,189 @@ asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,  			      const u8 *in, unsigned int len, u8 *iv);  asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,  			      const u8 *in, unsigned int len, u8 *iv); + +int crypto_fpu_init(void); +void crypto_fpu_exit(void); + +#define AVX_GEN2_OPTSIZE 640 +#define AVX_GEN4_OPTSIZE 4096 + +#ifdef CONFIG_X86_64  asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,  			      const u8 *in, unsigned int len, u8 *iv); +asmlinkage void aesni_xts_crypt8(struct crypto_aes_ctx *ctx, u8 *out, +				 const u8 *in, bool enc, u8 *iv); + +/* asmlinkage void aesni_gcm_enc() + * void *ctx,  AES Key schedule. Starts on a 16 byte boundary. + * u8 *out, Ciphertext output. Encrypt in-place is allowed. + * const u8 *in, Plaintext input + * unsigned long plaintext_len, Length of data in bytes for encryption. + * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association) + *         concatenated with 8 byte Initialisation Vector (from IPSec ESP + *         Payload) concatenated with 0x00000001. 16-byte aligned pointer. + * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary. + * const u8 *aad, Additional Authentication Data (AAD) + * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this + *          is going to be 8 or 12 bytes + * u8 *auth_tag, Authenticated Tag output. + * unsigned long auth_tag_len), Authenticated Tag Length in bytes. + *          Valid values are 16 (most likely), 12 or 8. + */ +asmlinkage void aesni_gcm_enc(void *ctx, u8 *out, +			const u8 *in, unsigned long plaintext_len, u8 *iv, +			u8 *hash_subkey, const u8 *aad, unsigned long aad_len, +			u8 *auth_tag, unsigned long auth_tag_len); + +/* asmlinkage void aesni_gcm_dec() + * void *ctx, AES Key schedule. Starts on a 16 byte boundary. + * u8 *out, Plaintext output. Decrypt in-place is allowed. + * const u8 *in, Ciphertext input + * unsigned long ciphertext_len, Length of data in bytes for decryption. + * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association) + *         concatenated with 8 byte Initialisation Vector (from IPSec ESP + *         Payload) concatenated with 0x00000001. 16-byte aligned pointer. + * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary. + * const u8 *aad, Additional Authentication Data (AAD) + * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this is going + * to be 8 or 12 bytes + * u8 *auth_tag, Authenticated Tag output. + * unsigned long auth_tag_len) Authenticated Tag Length in bytes. + * Valid values are 16 (most likely), 12 or 8. + */ +asmlinkage void aesni_gcm_dec(void *ctx, u8 *out, +			const u8 *in, unsigned long ciphertext_len, u8 *iv, +			u8 *hash_subkey, const u8 *aad, unsigned long aad_len, +			u8 *auth_tag, unsigned long auth_tag_len); + + +#ifdef CONFIG_AS_AVX +/* + * asmlinkage void aesni_gcm_precomp_avx_gen2() + * gcm_data *my_ctx_data, context data + * u8 *hash_subkey,  the Hash sub key input. Data starts on a 16-byte boundary. + */ +asmlinkage void aesni_gcm_precomp_avx_gen2(void *my_ctx_data, u8 *hash_subkey); + +asmlinkage void aesni_gcm_enc_avx_gen2(void *ctx, u8 *out, +			const u8 *in, unsigned long plaintext_len, u8 *iv, +			const u8 *aad, unsigned long aad_len, +			u8 *auth_tag, unsigned long auth_tag_len); + +asmlinkage void aesni_gcm_dec_avx_gen2(void *ctx, u8 *out, +			const u8 *in, unsigned long ciphertext_len, u8 *iv, +			const u8 *aad, unsigned long aad_len, +			u8 *auth_tag, unsigned long auth_tag_len); + +static void aesni_gcm_enc_avx(void *ctx, u8 *out, +			const u8 *in, unsigned long plaintext_len, u8 *iv, +			u8 *hash_subkey, const u8 *aad, unsigned long aad_len, +			u8 *auth_tag, unsigned long auth_tag_len) +{ +	if (plaintext_len < AVX_GEN2_OPTSIZE) { +		aesni_gcm_enc(ctx, out, in, plaintext_len, iv, hash_subkey, aad, +				aad_len, auth_tag, auth_tag_len); +	} else { +		aesni_gcm_precomp_avx_gen2(ctx, hash_subkey); +		aesni_gcm_enc_avx_gen2(ctx, out, in, plaintext_len, iv, aad, +					aad_len, auth_tag, auth_tag_len); +	} +} + +static void aesni_gcm_dec_avx(void *ctx, u8 *out, +			const u8 *in, unsigned long ciphertext_len, u8 *iv, +			u8 *hash_subkey, const u8 *aad, unsigned long aad_len, +			u8 *auth_tag, unsigned long auth_tag_len) +{ +	if (ciphertext_len < AVX_GEN2_OPTSIZE) { +		aesni_gcm_dec(ctx, out, in, ciphertext_len, iv, hash_subkey, aad, +				aad_len, auth_tag, auth_tag_len); +	} else { +		aesni_gcm_precomp_avx_gen2(ctx, hash_subkey); +		aesni_gcm_dec_avx_gen2(ctx, out, in, ciphertext_len, iv, aad, +					aad_len, auth_tag, auth_tag_len); +	} +} +#endif + +#ifdef CONFIG_AS_AVX2 +/* + * asmlinkage void aesni_gcm_precomp_avx_gen4() + * gcm_data *my_ctx_data, context data + * u8 *hash_subkey,  the Hash sub key input. Data starts on a 16-byte boundary. + */ +asmlinkage void aesni_gcm_precomp_avx_gen4(void *my_ctx_data, u8 *hash_subkey); + +asmlinkage void aesni_gcm_enc_avx_gen4(void *ctx, u8 *out, +			const u8 *in, unsigned long plaintext_len, u8 *iv, +			const u8 *aad, unsigned long aad_len, +			u8 *auth_tag, unsigned long auth_tag_len); + +asmlinkage void aesni_gcm_dec_avx_gen4(void *ctx, u8 *out, +			const u8 *in, unsigned long ciphertext_len, u8 *iv, +			const u8 *aad, unsigned long aad_len, +			u8 *auth_tag, unsigned long auth_tag_len); + +static void aesni_gcm_enc_avx2(void *ctx, u8 *out, +			const u8 *in, unsigned long plaintext_len, u8 *iv, +			u8 *hash_subkey, const u8 *aad, unsigned long aad_len, +			u8 *auth_tag, unsigned long auth_tag_len) +{ +	if (plaintext_len < AVX_GEN2_OPTSIZE) { +		aesni_gcm_enc(ctx, out, in, plaintext_len, iv, hash_subkey, aad, +				aad_len, auth_tag, auth_tag_len); +	} else if (plaintext_len < AVX_GEN4_OPTSIZE) { +		aesni_gcm_precomp_avx_gen2(ctx, hash_subkey); +		aesni_gcm_enc_avx_gen2(ctx, out, in, plaintext_len, iv, aad, +					aad_len, auth_tag, auth_tag_len); +	} else { +		aesni_gcm_precomp_avx_gen4(ctx, hash_subkey); +		aesni_gcm_enc_avx_gen4(ctx, out, in, plaintext_len, iv, aad, +					aad_len, auth_tag, auth_tag_len); +	} +} + +static void aesni_gcm_dec_avx2(void *ctx, u8 *out, +			const u8 *in, unsigned long ciphertext_len, u8 *iv, +			u8 *hash_subkey, const u8 *aad, unsigned long aad_len, +			u8 *auth_tag, unsigned long auth_tag_len) +{ +	if (ciphertext_len < AVX_GEN2_OPTSIZE) { +		aesni_gcm_dec(ctx, out, in, ciphertext_len, iv, hash_subkey, +				aad, aad_len, auth_tag, auth_tag_len); +	} else if (ciphertext_len < AVX_GEN4_OPTSIZE) { +		aesni_gcm_precomp_avx_gen2(ctx, hash_subkey); +		aesni_gcm_dec_avx_gen2(ctx, out, in, ciphertext_len, iv, aad, +					aad_len, auth_tag, auth_tag_len); +	} else { +		aesni_gcm_precomp_avx_gen4(ctx, hash_subkey); +		aesni_gcm_dec_avx_gen4(ctx, out, in, ciphertext_len, iv, aad, +					aad_len, auth_tag, auth_tag_len); +	} +} +#endif + +static void (*aesni_gcm_enc_tfm)(void *ctx, u8 *out, +			const u8 *in, unsigned long plaintext_len, u8 *iv, +			u8 *hash_subkey, const u8 *aad, unsigned long aad_len, +			u8 *auth_tag, unsigned long auth_tag_len); + +static void (*aesni_gcm_dec_tfm)(void *ctx, u8 *out, +			const u8 *in, unsigned long ciphertext_len, u8 *iv, +			u8 *hash_subkey, const u8 *aad, unsigned long aad_len, +			u8 *auth_tag, unsigned long auth_tag_len); + +static inline struct +aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm) +{ +	return +		(struct aesni_rfc4106_gcm_ctx *) +		PTR_ALIGN((u8 *) +		crypto_tfm_ctx(crypto_aead_tfm(tfm)), AESNI_ALIGN); +} +#endif +  static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx)  {  	unsigned long addr = (unsigned long)raw_ctx; @@ -128,27 +346,6 @@ static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)  	}  } -static struct crypto_alg aesni_alg = { -	.cra_name		= "aes", -	.cra_driver_name	= "aes-aesni", -	.cra_priority		= 300, -	.cra_flags		= CRYPTO_ALG_TYPE_CIPHER, -	.cra_blocksize		= AES_BLOCK_SIZE, -	.cra_ctxsize		= sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1, -	.cra_alignmask		= 0, -	.cra_module		= THIS_MODULE, -	.cra_list		= LIST_HEAD_INIT(aesni_alg.cra_list), -	.cra_u	= { -		.cipher	= { -			.cia_min_keysize	= AES_MIN_KEY_SIZE, -			.cia_max_keysize	= AES_MAX_KEY_SIZE, -			.cia_setkey		= aes_set_key, -			.cia_encrypt		= aes_encrypt, -			.cia_decrypt		= aes_decrypt -		} -	} -}; -  static void __aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)  {  	struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm)); @@ -163,27 +360,6 @@ static void __aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)  	aesni_dec(ctx, dst, src);  } -static struct crypto_alg __aesni_alg = { -	.cra_name		= "__aes-aesni", -	.cra_driver_name	= "__driver-aes-aesni", -	.cra_priority		= 0, -	.cra_flags		= CRYPTO_ALG_TYPE_CIPHER, -	.cra_blocksize		= AES_BLOCK_SIZE, -	.cra_ctxsize		= sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1, -	.cra_alignmask		= 0, -	.cra_module		= THIS_MODULE, -	.cra_list		= LIST_HEAD_INIT(__aesni_alg.cra_list), -	.cra_u	= { -		.cipher	= { -			.cia_min_keysize	= AES_MIN_KEY_SIZE, -			.cia_max_keysize	= AES_MAX_KEY_SIZE, -			.cia_setkey		= aes_set_key, -			.cia_encrypt		= __aes_encrypt, -			.cia_decrypt		= __aes_decrypt -		} -	} -}; -  static int ecb_encrypt(struct blkcipher_desc *desc,  		       struct scatterlist *dst, struct scatterlist *src,  		       unsigned int nbytes) @@ -232,28 +408,6 @@ static int ecb_decrypt(struct blkcipher_desc *desc,  	return err;  } -static struct crypto_alg blk_ecb_alg = { -	.cra_name		= "__ecb-aes-aesni", -	.cra_driver_name	= "__driver-ecb-aes-aesni", -	.cra_priority		= 0, -	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER, -	.cra_blocksize		= AES_BLOCK_SIZE, -	.cra_ctxsize		= sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1, -	.cra_alignmask		= 0, -	.cra_type		= &crypto_blkcipher_type, -	.cra_module		= THIS_MODULE, -	.cra_list		= LIST_HEAD_INIT(blk_ecb_alg.cra_list), -	.cra_u = { -		.blkcipher = { -			.min_keysize	= AES_MIN_KEY_SIZE, -			.max_keysize	= AES_MAX_KEY_SIZE, -			.setkey		= aes_set_key, -			.encrypt	= ecb_encrypt, -			.decrypt	= ecb_decrypt, -		}, -	}, -}; -  static int cbc_encrypt(struct blkcipher_desc *desc,  		       struct scatterlist *dst, struct scatterlist *src,  		       unsigned int nbytes) @@ -302,28 +456,7 @@ static int cbc_decrypt(struct blkcipher_desc *desc,  	return err;  } -static struct crypto_alg blk_cbc_alg = { -	.cra_name		= "__cbc-aes-aesni", -	.cra_driver_name	= "__driver-cbc-aes-aesni", -	.cra_priority		= 0, -	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER, -	.cra_blocksize		= AES_BLOCK_SIZE, -	.cra_ctxsize		= sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1, -	.cra_alignmask		= 0, -	.cra_type		= &crypto_blkcipher_type, -	.cra_module		= THIS_MODULE, -	.cra_list		= LIST_HEAD_INIT(blk_cbc_alg.cra_list), -	.cra_u = { -		.blkcipher = { -			.min_keysize	= AES_MIN_KEY_SIZE, -			.max_keysize	= AES_MAX_KEY_SIZE, -			.setkey		= aes_set_key, -			.encrypt	= cbc_encrypt, -			.decrypt	= cbc_decrypt, -		}, -	}, -}; - +#ifdef CONFIG_X86_64  static void ctr_crypt_final(struct crypto_aes_ctx *ctx,  			    struct blkcipher_walk *walk)  { @@ -366,127 +499,736 @@ static int ctr_crypt(struct blkcipher_desc *desc,  	return err;  } +#endif -static struct crypto_alg blk_ctr_alg = { -	.cra_name		= "__ctr-aes-aesni", -	.cra_driver_name	= "__driver-ctr-aes-aesni", -	.cra_priority		= 0, -	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER, -	.cra_blocksize		= 1, -	.cra_ctxsize		= sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1, -	.cra_alignmask		= 0, -	.cra_type		= &crypto_blkcipher_type, -	.cra_module		= THIS_MODULE, -	.cra_list		= LIST_HEAD_INIT(blk_ctr_alg.cra_list), -	.cra_u = { -		.blkcipher = { -			.min_keysize	= AES_MIN_KEY_SIZE, -			.max_keysize	= AES_MAX_KEY_SIZE, -			.ivsize		= AES_BLOCK_SIZE, -			.setkey		= aes_set_key, -			.encrypt	= ctr_crypt, -			.decrypt	= ctr_crypt, -		}, -	}, -}; +static int ablk_ecb_init(struct crypto_tfm *tfm) +{ +	return ablk_init_common(tfm, "__driver-ecb-aes-aesni"); +} -static int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key, -			unsigned int key_len) +static int ablk_cbc_init(struct crypto_tfm *tfm)  { -	struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm); -	struct crypto_ablkcipher *child = &ctx->cryptd_tfm->base; +	return ablk_init_common(tfm, "__driver-cbc-aes-aesni"); +} + +#ifdef CONFIG_X86_64 +static int ablk_ctr_init(struct crypto_tfm *tfm) +{ +	return ablk_init_common(tfm, "__driver-ctr-aes-aesni"); +} + +#endif + +#ifdef HAS_PCBC +static int ablk_pcbc_init(struct crypto_tfm *tfm) +{ +	return ablk_init_common(tfm, "fpu(pcbc(__driver-aes-aesni))"); +} +#endif + +static void lrw_xts_encrypt_callback(void *ctx, u8 *blks, unsigned int nbytes) +{ +	aesni_ecb_enc(ctx, blks, blks, nbytes); +} + +static void lrw_xts_decrypt_callback(void *ctx, u8 *blks, unsigned int nbytes) +{ +	aesni_ecb_dec(ctx, blks, blks, nbytes); +} + +static int lrw_aesni_setkey(struct crypto_tfm *tfm, const u8 *key, +			    unsigned int keylen) +{ +	struct aesni_lrw_ctx *ctx = crypto_tfm_ctx(tfm);  	int err; -	crypto_ablkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); -	crypto_ablkcipher_set_flags(child, crypto_ablkcipher_get_flags(tfm) -				    & CRYPTO_TFM_REQ_MASK); -	err = crypto_ablkcipher_setkey(child, key, key_len); -	crypto_ablkcipher_set_flags(tfm, crypto_ablkcipher_get_flags(child) -				    & CRYPTO_TFM_RES_MASK); -	return err; +	err = aes_set_key_common(tfm, ctx->raw_aes_ctx, key, +				 keylen - AES_BLOCK_SIZE); +	if (err) +		return err; + +	return lrw_init_table(&ctx->lrw_table, key + keylen - AES_BLOCK_SIZE); +} + +static void lrw_aesni_exit_tfm(struct crypto_tfm *tfm) +{ +	struct aesni_lrw_ctx *ctx = crypto_tfm_ctx(tfm); + +	lrw_free_table(&ctx->lrw_table); +} + +static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, +		       struct scatterlist *src, unsigned int nbytes) +{ +	struct aesni_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); +	be128 buf[8]; +	struct lrw_crypt_req req = { +		.tbuf = buf, +		.tbuflen = sizeof(buf), + +		.table_ctx = &ctx->lrw_table, +		.crypt_ctx = aes_ctx(ctx->raw_aes_ctx), +		.crypt_fn = lrw_xts_encrypt_callback, +	}; +	int ret; + +	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; + +	kernel_fpu_begin(); +	ret = lrw_crypt(desc, dst, src, nbytes, &req); +	kernel_fpu_end(); + +	return ret; +} + +static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, +		       struct scatterlist *src, unsigned int nbytes) +{ +	struct aesni_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); +	be128 buf[8]; +	struct lrw_crypt_req req = { +		.tbuf = buf, +		.tbuflen = sizeof(buf), + +		.table_ctx = &ctx->lrw_table, +		.crypt_ctx = aes_ctx(ctx->raw_aes_ctx), +		.crypt_fn = lrw_xts_decrypt_callback, +	}; +	int ret; + +	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; + +	kernel_fpu_begin(); +	ret = lrw_crypt(desc, dst, src, nbytes, &req); +	kernel_fpu_end(); + +	return ret;  } -static int ablk_encrypt(struct ablkcipher_request *req) +static int xts_aesni_setkey(struct crypto_tfm *tfm, const u8 *key, +			    unsigned int keylen)  { -	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); -	struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm); +	struct aesni_xts_ctx *ctx = crypto_tfm_ctx(tfm); +	u32 *flags = &tfm->crt_flags; +	int err; + +	/* key consists of keys of equal size concatenated, therefore +	 * the length must be even +	 */ +	if (keylen % 2) { +		*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; +		return -EINVAL; +	} + +	/* first half of xts-key is for crypt */ +	err = aes_set_key_common(tfm, ctx->raw_crypt_ctx, key, keylen / 2); +	if (err) +		return err; + +	/* second half of xts-key is for tweak */ +	return aes_set_key_common(tfm, ctx->raw_tweak_ctx, key + keylen / 2, +				  keylen / 2); +} + + +static void aesni_xts_tweak(void *ctx, u8 *out, const u8 *in) +{ +	aesni_enc(ctx, out, in); +} + +#ifdef CONFIG_X86_64 + +static void aesni_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv) +{ +	glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_enc)); +} + +static void aesni_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv) +{ +	glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_dec)); +} + +static void aesni_xts_enc8(void *ctx, u128 *dst, const u128 *src, le128 *iv) +{ +	aesni_xts_crypt8(ctx, (u8 *)dst, (const u8 *)src, true, (u8 *)iv); +} + +static void aesni_xts_dec8(void *ctx, u128 *dst, const u128 *src, le128 *iv) +{ +	aesni_xts_crypt8(ctx, (u8 *)dst, (const u8 *)src, false, (u8 *)iv); +} + +static const struct common_glue_ctx aesni_enc_xts = { +	.num_funcs = 2, +	.fpu_blocks_limit = 1, + +	.funcs = { { +		.num_blocks = 8, +		.fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_enc8) } +	}, { +		.num_blocks = 1, +		.fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_enc) } +	} } +}; + +static const struct common_glue_ctx aesni_dec_xts = { +	.num_funcs = 2, +	.fpu_blocks_limit = 1, + +	.funcs = { { +		.num_blocks = 8, +		.fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_dec8) } +	}, { +		.num_blocks = 1, +		.fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_dec) } +	} } +}; + +static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, +		       struct scatterlist *src, unsigned int nbytes) +{ +	struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); + +	return glue_xts_crypt_128bit(&aesni_enc_xts, desc, dst, src, nbytes, +				     XTS_TWEAK_CAST(aesni_xts_tweak), +				     aes_ctx(ctx->raw_tweak_ctx), +				     aes_ctx(ctx->raw_crypt_ctx)); +} + +static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, +		       struct scatterlist *src, unsigned int nbytes) +{ +	struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); + +	return glue_xts_crypt_128bit(&aesni_dec_xts, desc, dst, src, nbytes, +				     XTS_TWEAK_CAST(aesni_xts_tweak), +				     aes_ctx(ctx->raw_tweak_ctx), +				     aes_ctx(ctx->raw_crypt_ctx)); +} + +#else + +static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, +		       struct scatterlist *src, unsigned int nbytes) +{ +	struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); +	be128 buf[8]; +	struct xts_crypt_req req = { +		.tbuf = buf, +		.tbuflen = sizeof(buf), + +		.tweak_ctx = aes_ctx(ctx->raw_tweak_ctx), +		.tweak_fn = aesni_xts_tweak, +		.crypt_ctx = aes_ctx(ctx->raw_crypt_ctx), +		.crypt_fn = lrw_xts_encrypt_callback, +	}; +	int ret; + +	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; + +	kernel_fpu_begin(); +	ret = xts_crypt(desc, dst, src, nbytes, &req); +	kernel_fpu_end(); + +	return ret; +} + +static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, +		       struct scatterlist *src, unsigned int nbytes) +{ +	struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); +	be128 buf[8]; +	struct xts_crypt_req req = { +		.tbuf = buf, +		.tbuflen = sizeof(buf), + +		.tweak_ctx = aes_ctx(ctx->raw_tweak_ctx), +		.tweak_fn = aesni_xts_tweak, +		.crypt_ctx = aes_ctx(ctx->raw_crypt_ctx), +		.crypt_fn = lrw_xts_decrypt_callback, +	}; +	int ret; + +	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; + +	kernel_fpu_begin(); +	ret = xts_crypt(desc, dst, src, nbytes, &req); +	kernel_fpu_end(); + +	return ret; +} + +#endif + +#ifdef CONFIG_X86_64 +static int rfc4106_init(struct crypto_tfm *tfm) +{ +	struct cryptd_aead *cryptd_tfm; +	struct aesni_rfc4106_gcm_ctx *ctx = (struct aesni_rfc4106_gcm_ctx *) +		PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN); +	struct crypto_aead *cryptd_child; +	struct aesni_rfc4106_gcm_ctx *child_ctx; +	cryptd_tfm = cryptd_alloc_aead("__driver-gcm-aes-aesni", 0, 0); +	if (IS_ERR(cryptd_tfm)) +		return PTR_ERR(cryptd_tfm); + +	cryptd_child = cryptd_aead_child(cryptd_tfm); +	child_ctx = aesni_rfc4106_gcm_ctx_get(cryptd_child); +	memcpy(child_ctx, ctx, sizeof(*ctx)); +	ctx->cryptd_tfm = cryptd_tfm; +	tfm->crt_aead.reqsize = sizeof(struct aead_request) +		+ crypto_aead_reqsize(&cryptd_tfm->base); +	return 0; +} + +static void rfc4106_exit(struct crypto_tfm *tfm) +{ +	struct aesni_rfc4106_gcm_ctx *ctx = +		(struct aesni_rfc4106_gcm_ctx *) +		PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN); +	if (!IS_ERR(ctx->cryptd_tfm)) +		cryptd_free_aead(ctx->cryptd_tfm); +	return; +} + +static void +rfc4106_set_hash_subkey_done(struct crypto_async_request *req, int err) +{ +	struct aesni_gcm_set_hash_subkey_result *result = req->data; + +	if (err == -EINPROGRESS) +		return; +	result->err = err; +	complete(&result->completion); +} + +static int +rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len) +{ +	struct crypto_ablkcipher *ctr_tfm; +	struct ablkcipher_request *req; +	int ret = -EINVAL; +	struct aesni_hash_subkey_req_data *req_data; + +	ctr_tfm = crypto_alloc_ablkcipher("ctr(aes)", 0, 0); +	if (IS_ERR(ctr_tfm)) +		return PTR_ERR(ctr_tfm); + +	crypto_ablkcipher_clear_flags(ctr_tfm, ~0); + +	ret = crypto_ablkcipher_setkey(ctr_tfm, key, key_len); +	if (ret) +		goto out_free_ablkcipher; + +	ret = -ENOMEM; +	req = ablkcipher_request_alloc(ctr_tfm, GFP_KERNEL); +	if (!req) +		goto out_free_ablkcipher; + +	req_data = kmalloc(sizeof(*req_data), GFP_KERNEL); +	if (!req_data) +		goto out_free_request; + +	memset(req_data->iv, 0, sizeof(req_data->iv)); + +	/* Clear the data in the hash sub key container to zero.*/ +	/* We want to cipher all zeros to create the hash sub key. */ +	memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE); + +	init_completion(&req_data->result.completion); +	sg_init_one(&req_data->sg, hash_subkey, RFC4106_HASH_SUBKEY_SIZE); +	ablkcipher_request_set_tfm(req, ctr_tfm); +	ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP | +					CRYPTO_TFM_REQ_MAY_BACKLOG, +					rfc4106_set_hash_subkey_done, +					&req_data->result); + +	ablkcipher_request_set_crypt(req, &req_data->sg, +		&req_data->sg, RFC4106_HASH_SUBKEY_SIZE, req_data->iv); + +	ret = crypto_ablkcipher_encrypt(req); +	if (ret == -EINPROGRESS || ret == -EBUSY) { +		ret = wait_for_completion_interruptible +			(&req_data->result.completion); +		if (!ret) +			ret = req_data->result.err; +	} +	kfree(req_data); +out_free_request: +	ablkcipher_request_free(req); +out_free_ablkcipher: +	crypto_free_ablkcipher(ctr_tfm); +	return ret; +} + +static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key, +						   unsigned int key_len) +{ +	int ret = 0; +	struct crypto_tfm *tfm = crypto_aead_tfm(parent); +	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent); +	struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm); +	struct aesni_rfc4106_gcm_ctx *child_ctx = +                                 aesni_rfc4106_gcm_ctx_get(cryptd_child); +	u8 *new_key_align, *new_key_mem = NULL; + +	if (key_len < 4) { +		crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); +		return -EINVAL; +	} +	/*Account for 4 byte nonce at the end.*/ +	key_len -= 4; +	if (key_len != AES_KEYSIZE_128) { +		crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); +		return -EINVAL; +	} + +	memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce)); +	/*This must be on a 16 byte boundary!*/ +	if ((unsigned long)(&(ctx->aes_key_expanded.key_enc[0])) % AESNI_ALIGN) +		return -EINVAL; + +	if ((unsigned long)key % AESNI_ALIGN) { +		/*key is not aligned: use an auxuliar aligned pointer*/ +		new_key_mem = kmalloc(key_len+AESNI_ALIGN, GFP_KERNEL); +		if (!new_key_mem) +			return -ENOMEM; + +		new_key_align = PTR_ALIGN(new_key_mem, AESNI_ALIGN); +		memcpy(new_key_align, key, key_len); +		key = new_key_align; +	} + +	if (!irq_fpu_usable()) +		ret = crypto_aes_expand_key(&(ctx->aes_key_expanded), +		key, key_len); +	else { +		kernel_fpu_begin(); +		ret = aesni_set_key(&(ctx->aes_key_expanded), key, key_len); +		kernel_fpu_end(); +	} +	/*This must be on a 16 byte boundary!*/ +	if ((unsigned long)(&(ctx->hash_subkey[0])) % AESNI_ALIGN) { +		ret = -EINVAL; +		goto exit; +	} +	ret = rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len); +	memcpy(child_ctx, ctx, sizeof(*ctx)); +exit: +	kfree(new_key_mem); +	return ret; +} + +/* This is the Integrity Check Value (aka the authentication tag length and can + * be 8, 12 or 16 bytes long. */ +static int rfc4106_set_authsize(struct crypto_aead *parent, +				unsigned int authsize) +{ +	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent); +	struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm); + +	switch (authsize) { +	case 8: +	case 12: +	case 16: +		break; +	default: +		return -EINVAL; +	} +	crypto_aead_crt(parent)->authsize = authsize; +	crypto_aead_crt(cryptd_child)->authsize = authsize; +	return 0; +} + +static int rfc4106_encrypt(struct aead_request *req) +{ +	int ret; +	struct crypto_aead *tfm = crypto_aead_reqtfm(req); +	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);  	if (!irq_fpu_usable()) { -		struct ablkcipher_request *cryptd_req = -			ablkcipher_request_ctx(req); +		struct aead_request *cryptd_req = +			(struct aead_request *) aead_request_ctx(req);  		memcpy(cryptd_req, req, sizeof(*req)); -		ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base); -		return crypto_ablkcipher_encrypt(cryptd_req); +		aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base); +		return crypto_aead_encrypt(cryptd_req);  	} else { -		struct blkcipher_desc desc; -		desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm); -		desc.info = req->info; -		desc.flags = 0; -		return crypto_blkcipher_crt(desc.tfm)->encrypt( -			&desc, req->dst, req->src, req->nbytes); +		struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm); +		kernel_fpu_begin(); +		ret = cryptd_child->base.crt_aead.encrypt(req); +		kernel_fpu_end(); +		return ret;  	}  } -static int ablk_decrypt(struct ablkcipher_request *req) +static int rfc4106_decrypt(struct aead_request *req)  { -	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); -	struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm); +	int ret; +	struct crypto_aead *tfm = crypto_aead_reqtfm(req); +	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);  	if (!irq_fpu_usable()) { -		struct ablkcipher_request *cryptd_req = -			ablkcipher_request_ctx(req); +		struct aead_request *cryptd_req = +			(struct aead_request *) aead_request_ctx(req);  		memcpy(cryptd_req, req, sizeof(*req)); -		ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base); -		return crypto_ablkcipher_decrypt(cryptd_req); +		aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base); +		return crypto_aead_decrypt(cryptd_req);  	} else { -		struct blkcipher_desc desc; -		desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm); -		desc.info = req->info; -		desc.flags = 0; -		return crypto_blkcipher_crt(desc.tfm)->decrypt( -			&desc, req->dst, req->src, req->nbytes); +		struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm); +		kernel_fpu_begin(); +		ret = cryptd_child->base.crt_aead.decrypt(req); +		kernel_fpu_end(); +		return ret;  	}  } -static void ablk_exit(struct crypto_tfm *tfm) +static int __driver_rfc4106_encrypt(struct aead_request *req)  { -	struct async_aes_ctx *ctx = crypto_tfm_ctx(tfm); +	u8 one_entry_in_sg = 0; +	u8 *src, *dst, *assoc; +	__be32 counter = cpu_to_be32(1); +	struct crypto_aead *tfm = crypto_aead_reqtfm(req); +	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm); +	void *aes_ctx = &(ctx->aes_key_expanded); +	unsigned long auth_tag_len = crypto_aead_authsize(tfm); +	u8 iv_tab[16+AESNI_ALIGN]; +	u8* iv = (u8 *) PTR_ALIGN((u8 *)iv_tab, AESNI_ALIGN); +	struct scatter_walk src_sg_walk; +	struct scatter_walk assoc_sg_walk; +	struct scatter_walk dst_sg_walk; +	unsigned int i; + +	/* Assuming we are supporting rfc4106 64-bit extended */ +	/* sequence numbers We need to have the AAD length equal */ +	/* to 8 or 12 bytes */ +	if (unlikely(req->assoclen != 8 && req->assoclen != 12)) +		return -EINVAL; +	/* IV below built */ +	for (i = 0; i < 4; i++) +		*(iv+i) = ctx->nonce[i]; +	for (i = 0; i < 8; i++) +		*(iv+4+i) = req->iv[i]; +	*((__be32 *)(iv+12)) = counter; + +	if ((sg_is_last(req->src)) && (sg_is_last(req->assoc))) { +		one_entry_in_sg = 1; +		scatterwalk_start(&src_sg_walk, req->src); +		scatterwalk_start(&assoc_sg_walk, req->assoc); +		src = scatterwalk_map(&src_sg_walk); +		assoc = scatterwalk_map(&assoc_sg_walk); +		dst = src; +		if (unlikely(req->src != req->dst)) { +			scatterwalk_start(&dst_sg_walk, req->dst); +			dst = scatterwalk_map(&dst_sg_walk); +		} -	cryptd_free_ablkcipher(ctx->cryptd_tfm); -} +	} else { +		/* Allocate memory for src, dst, assoc */ +		src = kmalloc(req->cryptlen + auth_tag_len + req->assoclen, +			GFP_ATOMIC); +		if (unlikely(!src)) +			return -ENOMEM; +		assoc = (src + req->cryptlen + auth_tag_len); +		scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0); +		scatterwalk_map_and_copy(assoc, req->assoc, 0, +					req->assoclen, 0); +		dst = src; +	} -static void ablk_init_common(struct crypto_tfm *tfm, -			     struct cryptd_ablkcipher *cryptd_tfm) -{ -	struct async_aes_ctx *ctx = crypto_tfm_ctx(tfm); +	aesni_gcm_enc_tfm(aes_ctx, dst, src, (unsigned long)req->cryptlen, iv, +		ctx->hash_subkey, assoc, (unsigned long)req->assoclen, dst +		+ ((unsigned long)req->cryptlen), auth_tag_len); -	ctx->cryptd_tfm = cryptd_tfm; -	tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request) + -		crypto_ablkcipher_reqsize(&cryptd_tfm->base); +	/* The authTag (aka the Integrity Check Value) needs to be written +	 * back to the packet. */ +	if (one_entry_in_sg) { +		if (unlikely(req->src != req->dst)) { +			scatterwalk_unmap(dst); +			scatterwalk_done(&dst_sg_walk, 0, 0); +		} +		scatterwalk_unmap(src); +		scatterwalk_unmap(assoc); +		scatterwalk_done(&src_sg_walk, 0, 0); +		scatterwalk_done(&assoc_sg_walk, 0, 0); +	} else { +		scatterwalk_map_and_copy(dst, req->dst, 0, +			req->cryptlen + auth_tag_len, 1); +		kfree(src); +	} +	return 0;  } -static int ablk_ecb_init(struct crypto_tfm *tfm) +static int __driver_rfc4106_decrypt(struct aead_request *req)  { -	struct cryptd_ablkcipher *cryptd_tfm; +	u8 one_entry_in_sg = 0; +	u8 *src, *dst, *assoc; +	unsigned long tempCipherLen = 0; +	__be32 counter = cpu_to_be32(1); +	int retval = 0; +	struct crypto_aead *tfm = crypto_aead_reqtfm(req); +	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm); +	void *aes_ctx = &(ctx->aes_key_expanded); +	unsigned long auth_tag_len = crypto_aead_authsize(tfm); +	u8 iv_and_authTag[32+AESNI_ALIGN]; +	u8 *iv = (u8 *) PTR_ALIGN((u8 *)iv_and_authTag, AESNI_ALIGN); +	u8 *authTag = iv + 16; +	struct scatter_walk src_sg_walk; +	struct scatter_walk assoc_sg_walk; +	struct scatter_walk dst_sg_walk; +	unsigned int i; + +	if (unlikely((req->cryptlen < auth_tag_len) || +		(req->assoclen != 8 && req->assoclen != 12))) +		return -EINVAL; +	/* Assuming we are supporting rfc4106 64-bit extended */ +	/* sequence numbers We need to have the AAD length */ +	/* equal to 8 or 12 bytes */ + +	tempCipherLen = (unsigned long)(req->cryptlen - auth_tag_len); +	/* IV below built */ +	for (i = 0; i < 4; i++) +		*(iv+i) = ctx->nonce[i]; +	for (i = 0; i < 8; i++) +		*(iv+4+i) = req->iv[i]; +	*((__be32 *)(iv+12)) = counter; + +	if ((sg_is_last(req->src)) && (sg_is_last(req->assoc))) { +		one_entry_in_sg = 1; +		scatterwalk_start(&src_sg_walk, req->src); +		scatterwalk_start(&assoc_sg_walk, req->assoc); +		src = scatterwalk_map(&src_sg_walk); +		assoc = scatterwalk_map(&assoc_sg_walk); +		dst = src; +		if (unlikely(req->src != req->dst)) { +			scatterwalk_start(&dst_sg_walk, req->dst); +			dst = scatterwalk_map(&dst_sg_walk); +		} -	cryptd_tfm = cryptd_alloc_ablkcipher("__driver-ecb-aes-aesni", 0, 0); -	if (IS_ERR(cryptd_tfm)) -		return PTR_ERR(cryptd_tfm); -	ablk_init_common(tfm, cryptd_tfm); -	return 0; +	} else { +		/* Allocate memory for src, dst, assoc */ +		src = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC); +		if (!src) +			return -ENOMEM; +		assoc = (src + req->cryptlen + auth_tag_len); +		scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0); +		scatterwalk_map_and_copy(assoc, req->assoc, 0, +			req->assoclen, 0); +		dst = src; +	} + +	aesni_gcm_dec_tfm(aes_ctx, dst, src, tempCipherLen, iv, +		ctx->hash_subkey, assoc, (unsigned long)req->assoclen, +		authTag, auth_tag_len); + +	/* Compare generated tag with passed in tag. */ +	retval = crypto_memneq(src + tempCipherLen, authTag, auth_tag_len) ? +		-EBADMSG : 0; + +	if (one_entry_in_sg) { +		if (unlikely(req->src != req->dst)) { +			scatterwalk_unmap(dst); +			scatterwalk_done(&dst_sg_walk, 0, 0); +		} +		scatterwalk_unmap(src); +		scatterwalk_unmap(assoc); +		scatterwalk_done(&src_sg_walk, 0, 0); +		scatterwalk_done(&assoc_sg_walk, 0, 0); +	} else { +		scatterwalk_map_and_copy(dst, req->dst, 0, req->cryptlen, 1); +		kfree(src); +	} +	return retval;  } +#endif -static struct crypto_alg ablk_ecb_alg = { +static struct crypto_alg aesni_algs[] = { { +	.cra_name		= "aes", +	.cra_driver_name	= "aes-aesni", +	.cra_priority		= 300, +	.cra_flags		= CRYPTO_ALG_TYPE_CIPHER, +	.cra_blocksize		= AES_BLOCK_SIZE, +	.cra_ctxsize		= sizeof(struct crypto_aes_ctx) + +				  AESNI_ALIGN - 1, +	.cra_alignmask		= 0, +	.cra_module		= THIS_MODULE, +	.cra_u	= { +		.cipher	= { +			.cia_min_keysize	= AES_MIN_KEY_SIZE, +			.cia_max_keysize	= AES_MAX_KEY_SIZE, +			.cia_setkey		= aes_set_key, +			.cia_encrypt		= aes_encrypt, +			.cia_decrypt		= aes_decrypt +		} +	} +}, { +	.cra_name		= "__aes-aesni", +	.cra_driver_name	= "__driver-aes-aesni", +	.cra_priority		= 0, +	.cra_flags		= CRYPTO_ALG_TYPE_CIPHER, +	.cra_blocksize		= AES_BLOCK_SIZE, +	.cra_ctxsize		= sizeof(struct crypto_aes_ctx) + +				  AESNI_ALIGN - 1, +	.cra_alignmask		= 0, +	.cra_module		= THIS_MODULE, +	.cra_u	= { +		.cipher	= { +			.cia_min_keysize	= AES_MIN_KEY_SIZE, +			.cia_max_keysize	= AES_MAX_KEY_SIZE, +			.cia_setkey		= aes_set_key, +			.cia_encrypt		= __aes_encrypt, +			.cia_decrypt		= __aes_decrypt +		} +	} +}, { +	.cra_name		= "__ecb-aes-aesni", +	.cra_driver_name	= "__driver-ecb-aes-aesni", +	.cra_priority		= 0, +	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER, +	.cra_blocksize		= AES_BLOCK_SIZE, +	.cra_ctxsize		= sizeof(struct crypto_aes_ctx) + +				  AESNI_ALIGN - 1, +	.cra_alignmask		= 0, +	.cra_type		= &crypto_blkcipher_type, +	.cra_module		= THIS_MODULE, +	.cra_u = { +		.blkcipher = { +			.min_keysize	= AES_MIN_KEY_SIZE, +			.max_keysize	= AES_MAX_KEY_SIZE, +			.setkey		= aes_set_key, +			.encrypt	= ecb_encrypt, +			.decrypt	= ecb_decrypt, +		}, +	}, +}, { +	.cra_name		= "__cbc-aes-aesni", +	.cra_driver_name	= "__driver-cbc-aes-aesni", +	.cra_priority		= 0, +	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER, +	.cra_blocksize		= AES_BLOCK_SIZE, +	.cra_ctxsize		= sizeof(struct crypto_aes_ctx) + +				  AESNI_ALIGN - 1, +	.cra_alignmask		= 0, +	.cra_type		= &crypto_blkcipher_type, +	.cra_module		= THIS_MODULE, +	.cra_u = { +		.blkcipher = { +			.min_keysize	= AES_MIN_KEY_SIZE, +			.max_keysize	= AES_MAX_KEY_SIZE, +			.setkey		= aes_set_key, +			.encrypt	= cbc_encrypt, +			.decrypt	= cbc_decrypt, +		}, +	}, +}, {  	.cra_name		= "ecb(aes)",  	.cra_driver_name	= "ecb-aes-aesni",  	.cra_priority		= 400, -	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC, +	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,  	.cra_blocksize		= AES_BLOCK_SIZE, -	.cra_ctxsize		= sizeof(struct async_aes_ctx), +	.cra_ctxsize		= sizeof(struct async_helper_ctx),  	.cra_alignmask		= 0,  	.cra_type		= &crypto_ablkcipher_type,  	.cra_module		= THIS_MODULE, -	.cra_list		= LIST_HEAD_INIT(ablk_ecb_alg.cra_list),  	.cra_init		= ablk_ecb_init,  	.cra_exit		= ablk_exit,  	.cra_u = { @@ -498,30 +1240,16 @@ static struct crypto_alg ablk_ecb_alg = {  			.decrypt	= ablk_decrypt,  		},  	}, -}; - -static int ablk_cbc_init(struct crypto_tfm *tfm) -{ -	struct cryptd_ablkcipher *cryptd_tfm; - -	cryptd_tfm = cryptd_alloc_ablkcipher("__driver-cbc-aes-aesni", 0, 0); -	if (IS_ERR(cryptd_tfm)) -		return PTR_ERR(cryptd_tfm); -	ablk_init_common(tfm, cryptd_tfm); -	return 0; -} - -static struct crypto_alg ablk_cbc_alg = { +}, {  	.cra_name		= "cbc(aes)",  	.cra_driver_name	= "cbc-aes-aesni",  	.cra_priority		= 400, -	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC, +	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,  	.cra_blocksize		= AES_BLOCK_SIZE, -	.cra_ctxsize		= sizeof(struct async_aes_ctx), +	.cra_ctxsize		= sizeof(struct async_helper_ctx),  	.cra_alignmask		= 0,  	.cra_type		= &crypto_ablkcipher_type,  	.cra_module		= THIS_MODULE, -	.cra_list		= LIST_HEAD_INIT(ablk_cbc_alg.cra_list),  	.cra_init		= ablk_cbc_init,  	.cra_exit		= ablk_exit,  	.cra_u = { @@ -534,30 +1262,38 @@ static struct crypto_alg ablk_cbc_alg = {  			.decrypt	= ablk_decrypt,  		},  	}, -}; - -static int ablk_ctr_init(struct crypto_tfm *tfm) -{ -	struct cryptd_ablkcipher *cryptd_tfm; - -	cryptd_tfm = cryptd_alloc_ablkcipher("__driver-ctr-aes-aesni", 0, 0); -	if (IS_ERR(cryptd_tfm)) -		return PTR_ERR(cryptd_tfm); -	ablk_init_common(tfm, cryptd_tfm); -	return 0; -} - -static struct crypto_alg ablk_ctr_alg = { +#ifdef CONFIG_X86_64 +}, { +	.cra_name		= "__ctr-aes-aesni", +	.cra_driver_name	= "__driver-ctr-aes-aesni", +	.cra_priority		= 0, +	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER, +	.cra_blocksize		= 1, +	.cra_ctxsize		= sizeof(struct crypto_aes_ctx) + +				  AESNI_ALIGN - 1, +	.cra_alignmask		= 0, +	.cra_type		= &crypto_blkcipher_type, +	.cra_module		= THIS_MODULE, +	.cra_u = { +		.blkcipher = { +			.min_keysize	= AES_MIN_KEY_SIZE, +			.max_keysize	= AES_MAX_KEY_SIZE, +			.ivsize		= AES_BLOCK_SIZE, +			.setkey		= aes_set_key, +			.encrypt	= ctr_crypt, +			.decrypt	= ctr_crypt, +		}, +	}, +}, {  	.cra_name		= "ctr(aes)",  	.cra_driver_name	= "ctr-aes-aesni",  	.cra_priority		= 400, -	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC, +	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,  	.cra_blocksize		= 1, -	.cra_ctxsize		= sizeof(struct async_aes_ctx), +	.cra_ctxsize		= sizeof(struct async_helper_ctx),  	.cra_alignmask		= 0,  	.cra_type		= &crypto_ablkcipher_type,  	.cra_module		= THIS_MODULE, -	.cra_list		= LIST_HEAD_INIT(ablk_ctr_alg.cra_list),  	.cra_init		= ablk_ctr_init,  	.cra_exit		= ablk_exit,  	.cra_u = { @@ -571,151 +1307,146 @@ static struct crypto_alg ablk_ctr_alg = {  			.geniv		= "chainiv",  		},  	}, -}; - -#ifdef HAS_CTR -static int ablk_rfc3686_ctr_init(struct crypto_tfm *tfm) -{ -	struct cryptd_ablkcipher *cryptd_tfm; - -	cryptd_tfm = cryptd_alloc_ablkcipher( -		"rfc3686(__driver-ctr-aes-aesni)", 0, 0); -	if (IS_ERR(cryptd_tfm)) -		return PTR_ERR(cryptd_tfm); -	ablk_init_common(tfm, cryptd_tfm); -	return 0; -} - -static struct crypto_alg ablk_rfc3686_ctr_alg = { -	.cra_name		= "rfc3686(ctr(aes))", -	.cra_driver_name	= "rfc3686-ctr-aes-aesni", +}, { +	.cra_name		= "__gcm-aes-aesni", +	.cra_driver_name	= "__driver-gcm-aes-aesni", +	.cra_priority		= 0, +	.cra_flags		= CRYPTO_ALG_TYPE_AEAD, +	.cra_blocksize		= 1, +	.cra_ctxsize		= sizeof(struct aesni_rfc4106_gcm_ctx) + +				  AESNI_ALIGN, +	.cra_alignmask		= 0, +	.cra_type		= &crypto_aead_type, +	.cra_module		= THIS_MODULE, +	.cra_u = { +		.aead = { +			.encrypt	= __driver_rfc4106_encrypt, +			.decrypt	= __driver_rfc4106_decrypt, +		}, +	}, +}, { +	.cra_name		= "rfc4106(gcm(aes))", +	.cra_driver_name	= "rfc4106-gcm-aesni",  	.cra_priority		= 400, -	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC, +	.cra_flags		= CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,  	.cra_blocksize		= 1, -	.cra_ctxsize		= sizeof(struct async_aes_ctx), +	.cra_ctxsize		= sizeof(struct aesni_rfc4106_gcm_ctx) + +				  AESNI_ALIGN,  	.cra_alignmask		= 0, -	.cra_type		= &crypto_ablkcipher_type, +	.cra_type		= &crypto_nivaead_type,  	.cra_module		= THIS_MODULE, -	.cra_list		= LIST_HEAD_INIT(ablk_rfc3686_ctr_alg.cra_list), -	.cra_init		= ablk_rfc3686_ctr_init, -	.cra_exit		= ablk_exit, +	.cra_init		= rfc4106_init, +	.cra_exit		= rfc4106_exit,  	.cra_u = { -		.ablkcipher = { -			.min_keysize = AES_MIN_KEY_SIZE+CTR_RFC3686_NONCE_SIZE, -			.max_keysize = AES_MAX_KEY_SIZE+CTR_RFC3686_NONCE_SIZE, -			.ivsize	     = CTR_RFC3686_IV_SIZE, -			.setkey	     = ablk_set_key, -			.encrypt     = ablk_encrypt, -			.decrypt     = ablk_decrypt, -			.geniv	     = "seqiv", +		.aead = { +			.setkey		= rfc4106_set_key, +			.setauthsize	= rfc4106_set_authsize, +			.encrypt	= rfc4106_encrypt, +			.decrypt	= rfc4106_decrypt, +			.geniv		= "seqiv", +			.ivsize		= 8, +			.maxauthsize	= 16,  		},  	}, -};  #endif - -#ifdef HAS_LRW -static int ablk_lrw_init(struct crypto_tfm *tfm) -{ -	struct cryptd_ablkcipher *cryptd_tfm; - -	cryptd_tfm = cryptd_alloc_ablkcipher("fpu(lrw(__driver-aes-aesni))", -					     0, 0); -	if (IS_ERR(cryptd_tfm)) -		return PTR_ERR(cryptd_tfm); -	ablk_init_common(tfm, cryptd_tfm); -	return 0; -} - -static struct crypto_alg ablk_lrw_alg = { -	.cra_name		= "lrw(aes)", -	.cra_driver_name	= "lrw-aes-aesni", +#ifdef HAS_PCBC +}, { +	.cra_name		= "pcbc(aes)", +	.cra_driver_name	= "pcbc-aes-aesni",  	.cra_priority		= 400, -	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC, +	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,  	.cra_blocksize		= AES_BLOCK_SIZE, -	.cra_ctxsize		= sizeof(struct async_aes_ctx), +	.cra_ctxsize		= sizeof(struct async_helper_ctx),  	.cra_alignmask		= 0,  	.cra_type		= &crypto_ablkcipher_type,  	.cra_module		= THIS_MODULE, -	.cra_list		= LIST_HEAD_INIT(ablk_lrw_alg.cra_list), -	.cra_init		= ablk_lrw_init, +	.cra_init		= ablk_pcbc_init,  	.cra_exit		= ablk_exit,  	.cra_u = {  		.ablkcipher = { -			.min_keysize	= AES_MIN_KEY_SIZE + AES_BLOCK_SIZE, -			.max_keysize	= AES_MAX_KEY_SIZE + AES_BLOCK_SIZE, +			.min_keysize	= AES_MIN_KEY_SIZE, +			.max_keysize	= AES_MAX_KEY_SIZE,  			.ivsize		= AES_BLOCK_SIZE,  			.setkey		= ablk_set_key,  			.encrypt	= ablk_encrypt,  			.decrypt	= ablk_decrypt,  		},  	}, -};  #endif - -#ifdef HAS_PCBC -static int ablk_pcbc_init(struct crypto_tfm *tfm) -{ -	struct cryptd_ablkcipher *cryptd_tfm; - -	cryptd_tfm = cryptd_alloc_ablkcipher("fpu(pcbc(__driver-aes-aesni))", -					     0, 0); -	if (IS_ERR(cryptd_tfm)) -		return PTR_ERR(cryptd_tfm); -	ablk_init_common(tfm, cryptd_tfm); -	return 0; -} - -static struct crypto_alg ablk_pcbc_alg = { -	.cra_name		= "pcbc(aes)", -	.cra_driver_name	= "pcbc-aes-aesni", +}, { +	.cra_name		= "__lrw-aes-aesni", +	.cra_driver_name	= "__driver-lrw-aes-aesni", +	.cra_priority		= 0, +	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER, +	.cra_blocksize		= AES_BLOCK_SIZE, +	.cra_ctxsize		= sizeof(struct aesni_lrw_ctx), +	.cra_alignmask		= 0, +	.cra_type		= &crypto_blkcipher_type, +	.cra_module		= THIS_MODULE, +	.cra_exit		= lrw_aesni_exit_tfm, +	.cra_u = { +		.blkcipher = { +			.min_keysize	= AES_MIN_KEY_SIZE + AES_BLOCK_SIZE, +			.max_keysize	= AES_MAX_KEY_SIZE + AES_BLOCK_SIZE, +			.ivsize		= AES_BLOCK_SIZE, +			.setkey		= lrw_aesni_setkey, +			.encrypt	= lrw_encrypt, +			.decrypt	= lrw_decrypt, +		}, +	}, +}, { +	.cra_name		= "__xts-aes-aesni", +	.cra_driver_name	= "__driver-xts-aes-aesni", +	.cra_priority		= 0, +	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER, +	.cra_blocksize		= AES_BLOCK_SIZE, +	.cra_ctxsize		= sizeof(struct aesni_xts_ctx), +	.cra_alignmask		= 0, +	.cra_type		= &crypto_blkcipher_type, +	.cra_module		= THIS_MODULE, +	.cra_u = { +		.blkcipher = { +			.min_keysize	= 2 * AES_MIN_KEY_SIZE, +			.max_keysize	= 2 * AES_MAX_KEY_SIZE, +			.ivsize		= AES_BLOCK_SIZE, +			.setkey		= xts_aesni_setkey, +			.encrypt	= xts_encrypt, +			.decrypt	= xts_decrypt, +		}, +	}, +}, { +	.cra_name		= "lrw(aes)", +	.cra_driver_name	= "lrw-aes-aesni",  	.cra_priority		= 400, -	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC, +	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,  	.cra_blocksize		= AES_BLOCK_SIZE, -	.cra_ctxsize		= sizeof(struct async_aes_ctx), +	.cra_ctxsize		= sizeof(struct async_helper_ctx),  	.cra_alignmask		= 0,  	.cra_type		= &crypto_ablkcipher_type,  	.cra_module		= THIS_MODULE, -	.cra_list		= LIST_HEAD_INIT(ablk_pcbc_alg.cra_list), -	.cra_init		= ablk_pcbc_init, +	.cra_init		= ablk_init,  	.cra_exit		= ablk_exit,  	.cra_u = {  		.ablkcipher = { -			.min_keysize	= AES_MIN_KEY_SIZE, -			.max_keysize	= AES_MAX_KEY_SIZE, +			.min_keysize	= AES_MIN_KEY_SIZE + AES_BLOCK_SIZE, +			.max_keysize	= AES_MAX_KEY_SIZE + AES_BLOCK_SIZE,  			.ivsize		= AES_BLOCK_SIZE,  			.setkey		= ablk_set_key,  			.encrypt	= ablk_encrypt,  			.decrypt	= ablk_decrypt,  		},  	}, -}; -#endif - -#ifdef HAS_XTS -static int ablk_xts_init(struct crypto_tfm *tfm) -{ -	struct cryptd_ablkcipher *cryptd_tfm; - -	cryptd_tfm = cryptd_alloc_ablkcipher("fpu(xts(__driver-aes-aesni))", -					     0, 0); -	if (IS_ERR(cryptd_tfm)) -		return PTR_ERR(cryptd_tfm); -	ablk_init_common(tfm, cryptd_tfm); -	return 0; -} - -static struct crypto_alg ablk_xts_alg = { +}, {  	.cra_name		= "xts(aes)",  	.cra_driver_name	= "xts-aes-aesni",  	.cra_priority		= 400, -	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC, +	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,  	.cra_blocksize		= AES_BLOCK_SIZE, -	.cra_ctxsize		= sizeof(struct async_aes_ctx), +	.cra_ctxsize		= sizeof(struct async_helper_ctx),  	.cra_alignmask		= 0,  	.cra_type		= &crypto_ablkcipher_type,  	.cra_module		= THIS_MODULE, -	.cra_list		= LIST_HEAD_INIT(ablk_xts_alg.cra_list), -	.cra_init		= ablk_xts_init, +	.cra_init		= ablk_init,  	.cra_exit		= ablk_exit,  	.cra_u = {  		.ablkcipher = { @@ -727,108 +1458,55 @@ static struct crypto_alg ablk_xts_alg = {  			.decrypt	= ablk_decrypt,  		},  	}, +} }; + + +static const struct x86_cpu_id aesni_cpu_id[] = { +	X86_FEATURE_MATCH(X86_FEATURE_AES), +	{}  }; -#endif +MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id);  static int __init aesni_init(void)  {  	int err; -	if (!cpu_has_aes) { -		printk(KERN_INFO "Intel AES-NI instructions are not detected.\n"); +	if (!x86_match_cpu(aesni_cpu_id))  		return -ENODEV; -	} -	if ((err = crypto_register_alg(&aesni_alg))) -		goto aes_err; -	if ((err = crypto_register_alg(&__aesni_alg))) -		goto __aes_err; -	if ((err = crypto_register_alg(&blk_ecb_alg))) -		goto blk_ecb_err; -	if ((err = crypto_register_alg(&blk_cbc_alg))) -		goto blk_cbc_err; -	if ((err = crypto_register_alg(&blk_ctr_alg))) -		goto blk_ctr_err; -	if ((err = crypto_register_alg(&ablk_ecb_alg))) -		goto ablk_ecb_err; -	if ((err = crypto_register_alg(&ablk_cbc_alg))) -		goto ablk_cbc_err; -	if ((err = crypto_register_alg(&ablk_ctr_alg))) -		goto ablk_ctr_err; -#ifdef HAS_CTR -	if ((err = crypto_register_alg(&ablk_rfc3686_ctr_alg))) -		goto ablk_rfc3686_ctr_err; -#endif -#ifdef HAS_LRW -	if ((err = crypto_register_alg(&ablk_lrw_alg))) -		goto ablk_lrw_err; +#ifdef CONFIG_X86_64 +#ifdef CONFIG_AS_AVX2 +	if (boot_cpu_has(X86_FEATURE_AVX2)) { +		pr_info("AVX2 version of gcm_enc/dec engaged.\n"); +		aesni_gcm_enc_tfm = aesni_gcm_enc_avx2; +		aesni_gcm_dec_tfm = aesni_gcm_dec_avx2; +	} else  #endif -#ifdef HAS_PCBC -	if ((err = crypto_register_alg(&ablk_pcbc_alg))) -		goto ablk_pcbc_err; +#ifdef CONFIG_AS_AVX +	if (boot_cpu_has(X86_FEATURE_AVX)) { +		pr_info("AVX version of gcm_enc/dec engaged.\n"); +		aesni_gcm_enc_tfm = aesni_gcm_enc_avx; +		aesni_gcm_dec_tfm = aesni_gcm_dec_avx; +	} else  #endif -#ifdef HAS_XTS -	if ((err = crypto_register_alg(&ablk_xts_alg))) -		goto ablk_xts_err; +	{ +		pr_info("SSE version of gcm_enc/dec engaged.\n"); +		aesni_gcm_enc_tfm = aesni_gcm_enc; +		aesni_gcm_dec_tfm = aesni_gcm_dec; +	}  #endif -	return err; +	err = crypto_fpu_init(); +	if (err) +		return err; -#ifdef HAS_XTS -ablk_xts_err: -#endif -#ifdef HAS_PCBC -	crypto_unregister_alg(&ablk_pcbc_alg); -ablk_pcbc_err: -#endif -#ifdef HAS_LRW -	crypto_unregister_alg(&ablk_lrw_alg); -ablk_lrw_err: -#endif -#ifdef HAS_CTR -	crypto_unregister_alg(&ablk_rfc3686_ctr_alg); -ablk_rfc3686_ctr_err: -#endif -	crypto_unregister_alg(&ablk_ctr_alg); -ablk_ctr_err: -	crypto_unregister_alg(&ablk_cbc_alg); -ablk_cbc_err: -	crypto_unregister_alg(&ablk_ecb_alg); -ablk_ecb_err: -	crypto_unregister_alg(&blk_ctr_alg); -blk_ctr_err: -	crypto_unregister_alg(&blk_cbc_alg); -blk_cbc_err: -	crypto_unregister_alg(&blk_ecb_alg); -blk_ecb_err: -	crypto_unregister_alg(&__aesni_alg); -__aes_err: -	crypto_unregister_alg(&aesni_alg); -aes_err: -	return err; +	return crypto_register_algs(aesni_algs, ARRAY_SIZE(aesni_algs));  }  static void __exit aesni_exit(void)  { -#ifdef HAS_XTS -	crypto_unregister_alg(&ablk_xts_alg); -#endif -#ifdef HAS_PCBC -	crypto_unregister_alg(&ablk_pcbc_alg); -#endif -#ifdef HAS_LRW -	crypto_unregister_alg(&ablk_lrw_alg); -#endif -#ifdef HAS_CTR -	crypto_unregister_alg(&ablk_rfc3686_ctr_alg); -#endif -	crypto_unregister_alg(&ablk_ctr_alg); -	crypto_unregister_alg(&ablk_cbc_alg); -	crypto_unregister_alg(&ablk_ecb_alg); -	crypto_unregister_alg(&blk_ctr_alg); -	crypto_unregister_alg(&blk_cbc_alg); -	crypto_unregister_alg(&blk_ecb_alg); -	crypto_unregister_alg(&__aesni_alg); -	crypto_unregister_alg(&aesni_alg); +	crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs)); + +	crypto_fpu_exit();  }  module_init(aesni_init);  | 
