diff options
Diffstat (limited to 'drivers/crypto/padlock-aes.c')
| -rw-r--r-- | drivers/crypto/padlock-aes.c | 200 |
1 files changed, 153 insertions, 47 deletions
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c index bf2917d197a..633ba945e15 100644 --- a/drivers/crypto/padlock-aes.c +++ b/drivers/crypto/padlock-aes.c @@ -9,15 +9,32 @@ #include <crypto/algapi.h> #include <crypto/aes.h> +#include <crypto/padlock.h> #include <linux/module.h> #include <linux/init.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/kernel.h> +#include <linux/percpu.h> +#include <linux/smp.h> +#include <linux/slab.h> +#include <asm/cpu_device_id.h> #include <asm/byteorder.h> +#include <asm/processor.h> #include <asm/i387.h> -#include "padlock.h" + +/* + * Number of data blocks actually fetched for each xcrypt insn. + * Processors with prefetch errata will fetch extra blocks. + */ +static unsigned int ecb_fetch_blocks = 2; +#define MAX_ECB_FETCH_BLOCKS (8) +#define ecb_fetch_bytes (ecb_fetch_blocks * AES_BLOCK_SIZE) + +static unsigned int cbc_fetch_blocks = 1; +#define MAX_CBC_FETCH_BLOCKS (4) +#define cbc_fetch_bytes (cbc_fetch_blocks * AES_BLOCK_SIZE) /* Control word. */ struct cword { @@ -49,6 +66,8 @@ struct aes_ctx { u32 *D; }; +static DEFINE_PER_CPU(struct cword *, paes_last_cword); + /* Tells whether the ACE is capable to generate the extended key for a given key_len. */ static inline int @@ -89,6 +108,7 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, const __le32 *key = (const __le32 *)in_key; u32 *flags = &tfm->crt_flags; struct crypto_aes_ctx gen_aes; + int cpu; if (key_len % 8) { *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; @@ -118,7 +138,7 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, /* Don't generate extended keys if the hardware can do it. */ if (aes_hw_extkey_available(key_len)) - return 0; + goto ok; ctx->D = ctx->d_data; ctx->cword.encrypt.keygen = 1; @@ -131,15 +151,34 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, memcpy(ctx->E, gen_aes.key_enc, AES_MAX_KEYLENGTH); memcpy(ctx->D, gen_aes.key_dec, AES_MAX_KEYLENGTH); + +ok: + for_each_online_cpu(cpu) + if (&ctx->cword.encrypt == per_cpu(paes_last_cword, cpu) || + &ctx->cword.decrypt == per_cpu(paes_last_cword, cpu)) + per_cpu(paes_last_cword, cpu) = NULL; + return 0; } /* ====== Encryption/decryption routines ====== */ /* These are the real call to PadLock. */ -static inline void padlock_reset_key(void) +static inline void padlock_reset_key(struct cword *cword) +{ + int cpu = raw_smp_processor_id(); + + if (cword != per_cpu(paes_last_cword, cpu)) +#ifndef CONFIG_X86_64 + asm volatile ("pushfl; popfl"); +#else + asm volatile ("pushfq; popfq"); +#endif +} + +static inline void padlock_store_cword(struct cword *cword) { - asm volatile ("pushfl; popfl"); + per_cpu(paes_last_cword, raw_smp_processor_id()) = cword; } /* @@ -148,64 +187,111 @@ static inline void padlock_reset_key(void) * should be used only inside the irq_ts_save/restore() context */ -static inline void padlock_xcrypt(const u8 *input, u8 *output, void *key, - void *control_word) +static inline void rep_xcrypt_ecb(const u8 *input, u8 *output, void *key, + struct cword *control_word, int count) { asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ : "+S"(input), "+D"(output) - : "d"(control_word), "b"(key), "c"(1)); + : "d"(control_word), "b"(key), "c"(count)); +} + +static inline u8 *rep_xcrypt_cbc(const u8 *input, u8 *output, void *key, + u8 *iv, struct cword *control_word, int count) +{ + asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */ + : "+S" (input), "+D" (output), "+a" (iv) + : "d" (control_word), "b" (key), "c" (count)); + return iv; +} + +static void ecb_crypt_copy(const u8 *in, u8 *out, u32 *key, + struct cword *cword, int count) +{ + /* + * Padlock prefetches extra data so we must provide mapped input buffers. + * Assume there are at least 16 bytes of stack already in use. + */ + u8 buf[AES_BLOCK_SIZE * (MAX_ECB_FETCH_BLOCKS - 1) + PADLOCK_ALIGNMENT - 1]; + u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT); + + memcpy(tmp, in, count * AES_BLOCK_SIZE); + rep_xcrypt_ecb(tmp, out, key, cword, count); } -static void aes_crypt_copy(const u8 *in, u8 *out, u32 *key, struct cword *cword) +static u8 *cbc_crypt_copy(const u8 *in, u8 *out, u32 *key, + u8 *iv, struct cword *cword, int count) { - u8 buf[AES_BLOCK_SIZE * 2 + PADLOCK_ALIGNMENT - 1]; + /* + * Padlock prefetches extra data so we must provide mapped input buffers. + * Assume there are at least 16 bytes of stack already in use. + */ + u8 buf[AES_BLOCK_SIZE * (MAX_CBC_FETCH_BLOCKS - 1) + PADLOCK_ALIGNMENT - 1]; u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT); - memcpy(tmp, in, AES_BLOCK_SIZE); - padlock_xcrypt(tmp, out, key, cword); + memcpy(tmp, in, count * AES_BLOCK_SIZE); + return rep_xcrypt_cbc(tmp, out, key, iv, cword, count); } -static inline void aes_crypt(const u8 *in, u8 *out, u32 *key, - struct cword *cword) +static inline void ecb_crypt(const u8 *in, u8 *out, u32 *key, + struct cword *cword, int count) { - /* padlock_xcrypt requires at least two blocks of data. */ - if (unlikely(!(((unsigned long)in ^ (PAGE_SIZE - AES_BLOCK_SIZE)) & - (PAGE_SIZE - 1)))) { - aes_crypt_copy(in, out, key, cword); + /* Padlock in ECB mode fetches at least ecb_fetch_bytes of data. + * We could avoid some copying here but it's probably not worth it. + */ + if (unlikely(((unsigned long)in & ~PAGE_MASK) + ecb_fetch_bytes > PAGE_SIZE)) { + ecb_crypt_copy(in, out, key, cword, count); return; } - padlock_xcrypt(in, out, key, cword); + rep_xcrypt_ecb(in, out, key, cword, count); +} + +static inline u8 *cbc_crypt(const u8 *in, u8 *out, u32 *key, + u8 *iv, struct cword *cword, int count) +{ + /* Padlock in CBC mode fetches at least cbc_fetch_bytes of data. */ + if (unlikely(((unsigned long)in & ~PAGE_MASK) + cbc_fetch_bytes > PAGE_SIZE)) + return cbc_crypt_copy(in, out, key, iv, cword, count); + + return rep_xcrypt_cbc(in, out, key, iv, cword, count); } static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key, void *control_word, u32 count) { - if (count == 1) { - aes_crypt(input, output, key, control_word); + u32 initial = count & (ecb_fetch_blocks - 1); + + if (count < ecb_fetch_blocks) { + ecb_crypt(input, output, key, control_word, count); return; } - asm volatile ("test $1, %%cl;" - "je 1f;" - "lea -1(%%ecx), %%eax;" - "mov $1, %%ecx;" - ".byte 0xf3,0x0f,0xa7,0xc8;" /* rep xcryptecb */ - "mov %%eax, %%ecx;" - "1:" - ".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ + if (initial) + asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ + : "+S"(input), "+D"(output) + : "d"(control_word), "b"(key), "c"(initial)); + + asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ : "+S"(input), "+D"(output) - : "d"(control_word), "b"(key), "c"(count) - : "ax"); + : "d"(control_word), "b"(key), "c"(count - initial)); } static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key, u8 *iv, void *control_word, u32 count) { - /* rep xcryptcbc */ - asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" + u32 initial = count & (cbc_fetch_blocks - 1); + + if (count < cbc_fetch_blocks) + return cbc_crypt(input, output, key, iv, control_word, count); + + if (initial) + asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */ + : "+S" (input), "+D" (output), "+a" (iv) + : "d" (control_word), "b" (key), "c" (initial)); + + asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */ : "+S" (input), "+D" (output), "+a" (iv) - : "d" (control_word), "b" (key), "c" (count)); + : "d" (control_word), "b" (key), "c" (count-initial)); return iv; } @@ -213,22 +299,24 @@ static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) { struct aes_ctx *ctx = aes_ctx(tfm); int ts_state; - padlock_reset_key(); + padlock_reset_key(&ctx->cword.encrypt); ts_state = irq_ts_save(); - aes_crypt(in, out, ctx->E, &ctx->cword.encrypt); + ecb_crypt(in, out, ctx->E, &ctx->cword.encrypt, 1); irq_ts_restore(ts_state); + padlock_store_cword(&ctx->cword.encrypt); } static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) { struct aes_ctx *ctx = aes_ctx(tfm); int ts_state; - padlock_reset_key(); + padlock_reset_key(&ctx->cword.encrypt); ts_state = irq_ts_save(); - aes_crypt(in, out, ctx->D, &ctx->cword.decrypt); + ecb_crypt(in, out, ctx->D, &ctx->cword.decrypt, 1); irq_ts_restore(ts_state); + padlock_store_cword(&ctx->cword.encrypt); } static struct crypto_alg aes_alg = { @@ -240,7 +328,6 @@ static struct crypto_alg aes_alg = { .cra_ctxsize = sizeof(struct aes_ctx), .cra_alignmask = PADLOCK_ALIGNMENT - 1, .cra_module = THIS_MODULE, - .cra_list = LIST_HEAD_INIT(aes_alg.cra_list), .cra_u = { .cipher = { .cia_min_keysize = AES_MIN_KEY_SIZE, @@ -261,7 +348,7 @@ static int ecb_aes_encrypt(struct blkcipher_desc *desc, int err; int ts_state; - padlock_reset_key(); + padlock_reset_key(&ctx->cword.encrypt); blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt(desc, &walk); @@ -276,6 +363,8 @@ static int ecb_aes_encrypt(struct blkcipher_desc *desc, } irq_ts_restore(ts_state); + padlock_store_cword(&ctx->cword.encrypt); + return err; } @@ -288,7 +377,7 @@ static int ecb_aes_decrypt(struct blkcipher_desc *desc, int err; int ts_state; - padlock_reset_key(); + padlock_reset_key(&ctx->cword.decrypt); blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt(desc, &walk); @@ -302,6 +391,9 @@ static int ecb_aes_decrypt(struct blkcipher_desc *desc, err = blkcipher_walk_done(desc, &walk, nbytes); } irq_ts_restore(ts_state); + + padlock_store_cword(&ctx->cword.encrypt); + return err; } @@ -315,7 +407,6 @@ static struct crypto_alg ecb_aes_alg = { .cra_alignmask = PADLOCK_ALIGNMENT - 1, .cra_type = &crypto_blkcipher_type, .cra_module = THIS_MODULE, - .cra_list = LIST_HEAD_INIT(ecb_aes_alg.cra_list), .cra_u = { .blkcipher = { .min_keysize = AES_MIN_KEY_SIZE, @@ -336,7 +427,7 @@ static int cbc_aes_encrypt(struct blkcipher_desc *desc, int err; int ts_state; - padlock_reset_key(); + padlock_reset_key(&ctx->cword.encrypt); blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt(desc, &walk); @@ -353,6 +444,8 @@ static int cbc_aes_encrypt(struct blkcipher_desc *desc, } irq_ts_restore(ts_state); + padlock_store_cword(&ctx->cword.decrypt); + return err; } @@ -365,7 +458,7 @@ static int cbc_aes_decrypt(struct blkcipher_desc *desc, int err; int ts_state; - padlock_reset_key(); + padlock_reset_key(&ctx->cword.encrypt); blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt(desc, &walk); @@ -380,6 +473,9 @@ static int cbc_aes_decrypt(struct blkcipher_desc *desc, } irq_ts_restore(ts_state); + + padlock_store_cword(&ctx->cword.encrypt); + return err; } @@ -393,7 +489,6 @@ static struct crypto_alg cbc_aes_alg = { .cra_alignmask = PADLOCK_ALIGNMENT - 1, .cra_type = &crypto_blkcipher_type, .cra_module = THIS_MODULE, - .cra_list = LIST_HEAD_INIT(cbc_aes_alg.cra_list), .cra_u = { .blkcipher = { .min_keysize = AES_MIN_KEY_SIZE, @@ -406,14 +501,19 @@ static struct crypto_alg cbc_aes_alg = { } }; +static struct x86_cpu_id padlock_cpu_id[] = { + X86_FEATURE_MATCH(X86_FEATURE_XCRYPT), + {} +}; +MODULE_DEVICE_TABLE(x86cpu, padlock_cpu_id); + static int __init padlock_init(void) { int ret; + struct cpuinfo_x86 *c = &cpu_data(0); - if (!cpu_has_xcrypt) { - printk(KERN_NOTICE PFX "VIA PadLock not detected.\n"); + if (!x86_match_cpu(padlock_cpu_id)) return -ENODEV; - } if (!cpu_has_xcrypt_enabled) { printk(KERN_NOTICE PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n"); @@ -431,6 +531,12 @@ static int __init padlock_init(void) printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n"); + if (c->x86 == 6 && c->x86_model == 15 && c->x86_mask == 2) { + ecb_fetch_blocks = MAX_ECB_FETCH_BLOCKS; + cbc_fetch_blocks = MAX_CBC_FETCH_BLOCKS; + printk(KERN_NOTICE PFX "VIA Nano stepping 2 detected: enabling workaround.\n"); + } + out: return ret; |
