aboutsummaryrefslogtreecommitdiff
path: root/arch/x86/crypto/aesni-intel_glue.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/crypto/aesni-intel_glue.c')
-rw-r--r--arch/x86/crypto/aesni-intel_glue.c264
1 files changed, 224 insertions, 40 deletions
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index 1b9c22bea8a..948ad0e7774 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -34,14 +34,13 @@
#include <asm/cpu_device_id.h>
#include <asm/i387.h>
#include <asm/crypto/aes.h>
-#include <asm/crypto/ablk_helper.h>
+#include <crypto/ablk_helper.h>
#include <crypto/scatterwalk.h>
#include <crypto/internal/aead.h>
#include <linux/workqueue.h>
#include <linux/spinlock.h>
-
-#if defined(CONFIG_CRYPTO_CTR) || defined(CONFIG_CRYPTO_CTR_MODULE)
-#define HAS_CTR
+#ifdef CONFIG_X86_64
+#include <asm/crypto/glue_helper.h>
#endif
#if defined(CONFIG_CRYPTO_PCBC) || defined(CONFIG_CRYPTO_PCBC_MODULE)
@@ -102,10 +101,16 @@ asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
int crypto_fpu_init(void);
void crypto_fpu_exit(void);
+#define AVX_GEN2_OPTSIZE 640
+#define AVX_GEN4_OPTSIZE 4096
+
#ifdef CONFIG_X86_64
asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
const u8 *in, unsigned int len, u8 *iv);
+asmlinkage void aesni_xts_crypt8(struct crypto_aes_ctx *ctx, u8 *out,
+ const u8 *in, bool enc, u8 *iv);
+
/* asmlinkage void aesni_gcm_enc()
* void *ctx, AES Key schedule. Starts on a 16 byte boundary.
* u8 *out, Ciphertext output. Encrypt in-place is allowed.
@@ -148,6 +153,123 @@ asmlinkage void aesni_gcm_dec(void *ctx, u8 *out,
u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
u8 *auth_tag, unsigned long auth_tag_len);
+
+#ifdef CONFIG_AS_AVX
+/*
+ * asmlinkage void aesni_gcm_precomp_avx_gen2()
+ * gcm_data *my_ctx_data, context data
+ * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
+ */
+asmlinkage void aesni_gcm_precomp_avx_gen2(void *my_ctx_data, u8 *hash_subkey);
+
+asmlinkage void aesni_gcm_enc_avx_gen2(void *ctx, u8 *out,
+ const u8 *in, unsigned long plaintext_len, u8 *iv,
+ const u8 *aad, unsigned long aad_len,
+ u8 *auth_tag, unsigned long auth_tag_len);
+
+asmlinkage void aesni_gcm_dec_avx_gen2(void *ctx, u8 *out,
+ const u8 *in, unsigned long ciphertext_len, u8 *iv,
+ const u8 *aad, unsigned long aad_len,
+ u8 *auth_tag, unsigned long auth_tag_len);
+
+static void aesni_gcm_enc_avx(void *ctx, u8 *out,
+ const u8 *in, unsigned long plaintext_len, u8 *iv,
+ u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
+ u8 *auth_tag, unsigned long auth_tag_len)
+{
+ if (plaintext_len < AVX_GEN2_OPTSIZE) {
+ aesni_gcm_enc(ctx, out, in, plaintext_len, iv, hash_subkey, aad,
+ aad_len, auth_tag, auth_tag_len);
+ } else {
+ aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
+ aesni_gcm_enc_avx_gen2(ctx, out, in, plaintext_len, iv, aad,
+ aad_len, auth_tag, auth_tag_len);
+ }
+}
+
+static void aesni_gcm_dec_avx(void *ctx, u8 *out,
+ const u8 *in, unsigned long ciphertext_len, u8 *iv,
+ u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
+ u8 *auth_tag, unsigned long auth_tag_len)
+{
+ if (ciphertext_len < AVX_GEN2_OPTSIZE) {
+ aesni_gcm_dec(ctx, out, in, ciphertext_len, iv, hash_subkey, aad,
+ aad_len, auth_tag, auth_tag_len);
+ } else {
+ aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
+ aesni_gcm_dec_avx_gen2(ctx, out, in, ciphertext_len, iv, aad,
+ aad_len, auth_tag, auth_tag_len);
+ }
+}
+#endif
+
+#ifdef CONFIG_AS_AVX2
+/*
+ * asmlinkage void aesni_gcm_precomp_avx_gen4()
+ * gcm_data *my_ctx_data, context data
+ * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
+ */
+asmlinkage void aesni_gcm_precomp_avx_gen4(void *my_ctx_data, u8 *hash_subkey);
+
+asmlinkage void aesni_gcm_enc_avx_gen4(void *ctx, u8 *out,
+ const u8 *in, unsigned long plaintext_len, u8 *iv,
+ const u8 *aad, unsigned long aad_len,
+ u8 *auth_tag, unsigned long auth_tag_len);
+
+asmlinkage void aesni_gcm_dec_avx_gen4(void *ctx, u8 *out,
+ const u8 *in, unsigned long ciphertext_len, u8 *iv,
+ const u8 *aad, unsigned long aad_len,
+ u8 *auth_tag, unsigned long auth_tag_len);
+
+static void aesni_gcm_enc_avx2(void *ctx, u8 *out,
+ const u8 *in, unsigned long plaintext_len, u8 *iv,
+ u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
+ u8 *auth_tag, unsigned long auth_tag_len)
+{
+ if (plaintext_len < AVX_GEN2_OPTSIZE) {
+ aesni_gcm_enc(ctx, out, in, plaintext_len, iv, hash_subkey, aad,
+ aad_len, auth_tag, auth_tag_len);
+ } else if (plaintext_len < AVX_GEN4_OPTSIZE) {
+ aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
+ aesni_gcm_enc_avx_gen2(ctx, out, in, plaintext_len, iv, aad,
+ aad_len, auth_tag, auth_tag_len);
+ } else {
+ aesni_gcm_precomp_avx_gen4(ctx, hash_subkey);
+ aesni_gcm_enc_avx_gen4(ctx, out, in, plaintext_len, iv, aad,
+ aad_len, auth_tag, auth_tag_len);
+ }
+}
+
+static void aesni_gcm_dec_avx2(void *ctx, u8 *out,
+ const u8 *in, unsigned long ciphertext_len, u8 *iv,
+ u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
+ u8 *auth_tag, unsigned long auth_tag_len)
+{
+ if (ciphertext_len < AVX_GEN2_OPTSIZE) {
+ aesni_gcm_dec(ctx, out, in, ciphertext_len, iv, hash_subkey,
+ aad, aad_len, auth_tag, auth_tag_len);
+ } else if (ciphertext_len < AVX_GEN4_OPTSIZE) {
+ aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
+ aesni_gcm_dec_avx_gen2(ctx, out, in, ciphertext_len, iv, aad,
+ aad_len, auth_tag, auth_tag_len);
+ } else {
+ aesni_gcm_precomp_avx_gen4(ctx, hash_subkey);
+ aesni_gcm_dec_avx_gen4(ctx, out, in, ciphertext_len, iv, aad,
+ aad_len, auth_tag, auth_tag_len);
+ }
+}
+#endif
+
+static void (*aesni_gcm_enc_tfm)(void *ctx, u8 *out,
+ const u8 *in, unsigned long plaintext_len, u8 *iv,
+ u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
+ u8 *auth_tag, unsigned long auth_tag_len);
+
+static void (*aesni_gcm_dec_tfm)(void *ctx, u8 *out,
+ const u8 *in, unsigned long ciphertext_len, u8 *iv,
+ u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
+ u8 *auth_tag, unsigned long auth_tag_len);
+
static inline struct
aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm)
{
@@ -395,12 +517,6 @@ static int ablk_ctr_init(struct crypto_tfm *tfm)
return ablk_init_common(tfm, "__driver-ctr-aes-aesni");
}
-#ifdef HAS_CTR
-static int ablk_rfc3686_ctr_init(struct crypto_tfm *tfm)
-{
- return ablk_init_common(tfm, "rfc3686(__driver-ctr-aes-aesni)");
-}
-#endif
#endif
#ifdef HAS_PCBC
@@ -520,6 +636,78 @@ static void aesni_xts_tweak(void *ctx, u8 *out, const u8 *in)
aesni_enc(ctx, out, in);
}
+#ifdef CONFIG_X86_64
+
+static void aesni_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv)
+{
+ glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_enc));
+}
+
+static void aesni_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv)
+{
+ glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_dec));
+}
+
+static void aesni_xts_enc8(void *ctx, u128 *dst, const u128 *src, le128 *iv)
+{
+ aesni_xts_crypt8(ctx, (u8 *)dst, (const u8 *)src, true, (u8 *)iv);
+}
+
+static void aesni_xts_dec8(void *ctx, u128 *dst, const u128 *src, le128 *iv)
+{
+ aesni_xts_crypt8(ctx, (u8 *)dst, (const u8 *)src, false, (u8 *)iv);
+}
+
+static const struct common_glue_ctx aesni_enc_xts = {
+ .num_funcs = 2,
+ .fpu_blocks_limit = 1,
+
+ .funcs = { {
+ .num_blocks = 8,
+ .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_enc8) }
+ }, {
+ .num_blocks = 1,
+ .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_enc) }
+ } }
+};
+
+static const struct common_glue_ctx aesni_dec_xts = {
+ .num_funcs = 2,
+ .fpu_blocks_limit = 1,
+
+ .funcs = { {
+ .num_blocks = 8,
+ .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_dec8) }
+ }, {
+ .num_blocks = 1,
+ .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_dec) }
+ } }
+};
+
+static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+ struct scatterlist *src, unsigned int nbytes)
+{
+ struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+
+ return glue_xts_crypt_128bit(&aesni_enc_xts, desc, dst, src, nbytes,
+ XTS_TWEAK_CAST(aesni_xts_tweak),
+ aes_ctx(ctx->raw_tweak_ctx),
+ aes_ctx(ctx->raw_crypt_ctx));
+}
+
+static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+ struct scatterlist *src, unsigned int nbytes)
+{
+ struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+
+ return glue_xts_crypt_128bit(&aesni_dec_xts, desc, dst, src, nbytes,
+ XTS_TWEAK_CAST(aesni_xts_tweak),
+ aes_ctx(ctx->raw_tweak_ctx),
+ aes_ctx(ctx->raw_crypt_ctx));
+}
+
+#else
+
static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
@@ -570,6 +758,8 @@ static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
return ret;
}
+#endif
+
#ifdef CONFIG_X86_64
static int rfc4106_init(struct crypto_tfm *tfm)
{
@@ -845,7 +1035,7 @@ static int __driver_rfc4106_encrypt(struct aead_request *req)
dst = src;
}
- aesni_gcm_enc(aes_ctx, dst, src, (unsigned long)req->cryptlen, iv,
+ aesni_gcm_enc_tfm(aes_ctx, dst, src, (unsigned long)req->cryptlen, iv,
ctx->hash_subkey, assoc, (unsigned long)req->assoclen, dst
+ ((unsigned long)req->cryptlen), auth_tag_len);
@@ -926,12 +1116,12 @@ static int __driver_rfc4106_decrypt(struct aead_request *req)
dst = src;
}
- aesni_gcm_dec(aes_ctx, dst, src, tempCipherLen, iv,
+ aesni_gcm_dec_tfm(aes_ctx, dst, src, tempCipherLen, iv,
ctx->hash_subkey, assoc, (unsigned long)req->assoclen,
authTag, auth_tag_len);
/* Compare generated tag with passed in tag. */
- retval = memcmp(src + tempCipherLen, authTag, auth_tag_len) ?
+ retval = crypto_memneq(src + tempCipherLen, authTag, auth_tag_len) ?
-EBADMSG : 0;
if (one_entry_in_sg) {
@@ -1158,33 +1348,6 @@ static struct crypto_alg aesni_algs[] = { {
.maxauthsize = 16,
},
},
-#ifdef HAS_CTR
-}, {
- .cra_name = "rfc3686(ctr(aes))",
- .cra_driver_name = "rfc3686-ctr-aes-aesni",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct async_helper_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = ablk_rfc3686_ctr_init,
- .cra_exit = ablk_exit,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE +
- CTR_RFC3686_NONCE_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE +
- CTR_RFC3686_NONCE_SIZE,
- .ivsize = CTR_RFC3686_IV_SIZE,
- .setkey = ablk_set_key,
- .encrypt = ablk_encrypt,
- .decrypt = ablk_decrypt,
- .geniv = "seqiv",
- },
- },
-#endif
#endif
#ifdef HAS_PCBC
}, {
@@ -1310,6 +1473,27 @@ static int __init aesni_init(void)
if (!x86_match_cpu(aesni_cpu_id))
return -ENODEV;
+#ifdef CONFIG_X86_64
+#ifdef CONFIG_AS_AVX2
+ if (boot_cpu_has(X86_FEATURE_AVX2)) {
+ pr_info("AVX2 version of gcm_enc/dec engaged.\n");
+ aesni_gcm_enc_tfm = aesni_gcm_enc_avx2;
+ aesni_gcm_dec_tfm = aesni_gcm_dec_avx2;
+ } else
+#endif
+#ifdef CONFIG_AS_AVX
+ if (boot_cpu_has(X86_FEATURE_AVX)) {
+ pr_info("AVX version of gcm_enc/dec engaged.\n");
+ aesni_gcm_enc_tfm = aesni_gcm_enc_avx;
+ aesni_gcm_dec_tfm = aesni_gcm_dec_avx;
+ } else
+#endif
+ {
+ pr_info("SSE version of gcm_enc/dec engaged.\n");
+ aesni_gcm_enc_tfm = aesni_gcm_enc;
+ aesni_gcm_dec_tfm = aesni_gcm_dec;
+ }
+#endif
err = crypto_fpu_init();
if (err)