diff options
Diffstat (limited to 'crypto')
52 files changed, 2657 insertions, 1153 deletions
diff --git a/crypto/Kconfig b/crypto/Kconfig index 26b5dd0cb56..1cd497d7a15 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -28,7 +28,7 @@ config CRYPTO_FIPS This options enables the fips boot option which is required if you want to system to operate in a FIPS 200 certification. You should say no unless you know what - this is. Note that CRYPTO_ANSI_CPRNG is requred if this + this is. Note that CRYPTO_ANSI_CPRNG is required if this option is selected config CRYPTO_ALGAPI @@ -80,6 +80,11 @@ config CRYPTO_RNG2 config CRYPTO_PCOMP tristate + select CRYPTO_PCOMP2 + select CRYPTO_ALGAPI + +config CRYPTO_PCOMP2 + tristate select CRYPTO_ALGAPI2 config CRYPTO_MANAGER @@ -94,7 +99,15 @@ config CRYPTO_MANAGER2 select CRYPTO_AEAD2 select CRYPTO_HASH2 select CRYPTO_BLKCIPHER2 - select CRYPTO_PCOMP + select CRYPTO_PCOMP2 + +config CRYPTO_MANAGER_TESTS + bool "Run algolithms' self-tests" + default y + depends on CRYPTO_MANAGER2 + help + Run cryptomanager's tests for the new crypto algorithms being + registered. config CRYPTO_GF128MUL tristate "GF(2^128) multiplication functions (EXPERIMENTAL)" @@ -114,6 +127,16 @@ config CRYPTO_NULL help These are 'Null' algorithms, used by IPsec, which do nothing. +config CRYPTO_PCRYPT + tristate "Parallel crypto engine (EXPERIMENTAL)" + depends on SMP && EXPERIMENTAL + select PADATA + select CRYPTO_MANAGER + select CRYPTO_AEAD + help + This converts an arbitrary crypto algorithm into a parallel + algorithm that executes in kernel threads. + config CRYPTO_WORKQUEUE tristate @@ -440,6 +463,15 @@ config CRYPTO_WP512 See also: <http://planeta.terra.com.br/informatica/paulobarreto/WhirlpoolPage.html> +config CRYPTO_GHASH_CLMUL_NI_INTEL + tristate "GHASH digest algorithm (CLMUL-NI accelerated)" + depends on (X86 || UML_X86) && 64BIT + select CRYPTO_SHASH + select CRYPTO_CRYPTD + help + GHASH is message digest algorithm for GCM (Galois/Counter Mode). + The implementation is accelerated by CLMUL-NI of Intel. + comment "Ciphers" config CRYPTO_AES @@ -807,8 +839,8 @@ config CRYPTO_ANSI_CPRNG help This option enables the generic pseudo random number generator for cryptographic modules. Uses the Algorithm specified in - ANSI X9.31 A.2.4. Not this option must be enabled if CRYPTO_FIPS - is selected + ANSI X9.31 A.2.4. Note that this option must be enabled if + CRYPTO_FIPS is selected source "drivers/crypto/Kconfig" diff --git a/crypto/Makefile b/crypto/Makefile index 9e8f61908cb..423b7de61f9 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -26,7 +26,7 @@ crypto_hash-objs += ahash.o crypto_hash-objs += shash.o obj-$(CONFIG_CRYPTO_HASH2) += crypto_hash.o -obj-$(CONFIG_CRYPTO_PCOMP) += pcompress.o +obj-$(CONFIG_CRYPTO_PCOMP2) += pcompress.o cryptomgr-objs := algboss.o testmgr.o @@ -56,11 +56,12 @@ obj-$(CONFIG_CRYPTO_XTS) += xts.o obj-$(CONFIG_CRYPTO_CTR) += ctr.o obj-$(CONFIG_CRYPTO_GCM) += gcm.o obj-$(CONFIG_CRYPTO_CCM) += ccm.o +obj-$(CONFIG_CRYPTO_PCRYPT) += pcrypt.o obj-$(CONFIG_CRYPTO_CRYPTD) += cryptd.o obj-$(CONFIG_CRYPTO_DES) += des_generic.o obj-$(CONFIG_CRYPTO_FCRYPT) += fcrypt.o obj-$(CONFIG_CRYPTO_BLOWFISH) += blowfish.o -obj-$(CONFIG_CRYPTO_TWOFISH) += twofish.o +obj-$(CONFIG_CRYPTO_TWOFISH) += twofish_generic.o obj-$(CONFIG_CRYPTO_TWOFISH_COMMON) += twofish_common.o obj-$(CONFIG_CRYPTO_SERPENT) += serpent.o obj-$(CONFIG_CRYPTO_AES) += aes_generic.o diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c index f6f08336df5..a854df2a5a4 100644 --- a/crypto/ablkcipher.c +++ b/crypto/ablkcipher.c @@ -1,6 +1,6 @@ /* * Asynchronous block chaining cipher operations. - * + * * This is the asynchronous version of blkcipher.c indicating completion * via a callback. * @@ -8,7 +8,7 @@ * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) + * Software Foundation; either version 2 of the License, or (at your option) * any later version. * */ @@ -24,10 +24,287 @@ #include <linux/slab.h> #include <linux/seq_file.h> +#include <crypto/scatterwalk.h> + #include "internal.h" static const char *skcipher_default_geniv __read_mostly; +struct ablkcipher_buffer { + struct list_head entry; + struct scatter_walk dst; + unsigned int len; + void *data; +}; + +enum { + ABLKCIPHER_WALK_SLOW = 1 << 0, +}; + +static inline void ablkcipher_buffer_write(struct ablkcipher_buffer *p) +{ + scatterwalk_copychunks(p->data, &p->dst, p->len, 1); +} + +void __ablkcipher_walk_complete(struct ablkcipher_walk *walk) +{ + struct ablkcipher_buffer *p, *tmp; + + list_for_each_entry_safe(p, tmp, &walk->buffers, entry) { + ablkcipher_buffer_write(p); + list_del(&p->entry); + kfree(p); + } +} +EXPORT_SYMBOL_GPL(__ablkcipher_walk_complete); + +static inline void ablkcipher_queue_write(struct ablkcipher_walk *walk, + struct ablkcipher_buffer *p) +{ + p->dst = walk->out; + list_add_tail(&p->entry, &walk->buffers); +} + +/* Get a spot of the specified length that does not straddle a page. + * The caller needs to ensure that there is enough space for this operation. + */ +static inline u8 *ablkcipher_get_spot(u8 *start, unsigned int len) +{ + u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK); + return max(start, end_page); +} + +static inline unsigned int ablkcipher_done_slow(struct ablkcipher_walk *walk, + unsigned int bsize) +{ + unsigned int n = bsize; + + for (;;) { + unsigned int len_this_page = scatterwalk_pagelen(&walk->out); + + if (len_this_page > n) + len_this_page = n; + scatterwalk_advance(&walk->out, n); + if (n == len_this_page) + break; + n -= len_this_page; + scatterwalk_start(&walk->out, scatterwalk_sg_next(walk->out.sg)); + } + + return bsize; +} + +static inline unsigned int ablkcipher_done_fast(struct ablkcipher_walk *walk, + unsigned int n) +{ + scatterwalk_advance(&walk->in, n); + scatterwalk_advance(&walk->out, n); + + return n; +} + +static int ablkcipher_walk_next(struct ablkcipher_request *req, + struct ablkcipher_walk *walk); + +int ablkcipher_walk_done(struct ablkcipher_request *req, + struct ablkcipher_walk *walk, int err) +{ + struct crypto_tfm *tfm = req->base.tfm; + unsigned int nbytes = 0; + + if (likely(err >= 0)) { + unsigned int n = walk->nbytes - err; + + if (likely(!(walk->flags & ABLKCIPHER_WALK_SLOW))) + n = ablkcipher_done_fast(walk, n); + else if (WARN_ON(err)) { + err = -EINVAL; + goto err; + } else + n = ablkcipher_done_slow(walk, n); + + nbytes = walk->total - n; + err = 0; + } + + scatterwalk_done(&walk->in, 0, nbytes); + scatterwalk_done(&walk->out, 1, nbytes); + +err: + walk->total = nbytes; + walk->nbytes = nbytes; + + if (nbytes) { + crypto_yield(req->base.flags); + return ablkcipher_walk_next(req, walk); + } + + if (walk->iv != req->info) + memcpy(req->info, walk->iv, tfm->crt_ablkcipher.ivsize); + if (walk->iv_buffer) + kfree(walk->iv_buffer); + + return err; +} +EXPORT_SYMBOL_GPL(ablkcipher_walk_done); + +static inline int ablkcipher_next_slow(struct ablkcipher_request *req, + struct ablkcipher_walk *walk, + unsigned int bsize, + unsigned int alignmask, + void **src_p, void **dst_p) +{ + unsigned aligned_bsize = ALIGN(bsize, alignmask + 1); + struct ablkcipher_buffer *p; + void *src, *dst, *base; + unsigned int n; + + n = ALIGN(sizeof(struct ablkcipher_buffer), alignmask + 1); + n += (aligned_bsize * 3 - (alignmask + 1) + + (alignmask & ~(crypto_tfm_ctx_alignment() - 1))); + + p = kmalloc(n, GFP_ATOMIC); + if (!p) + return ablkcipher_walk_done(req, walk, -ENOMEM); + + base = p + 1; + + dst = (u8 *)ALIGN((unsigned long)base, alignmask + 1); + src = dst = ablkcipher_get_spot(dst, bsize); + + p->len = bsize; + p->data = dst; + + scatterwalk_copychunks(src, &walk->in, bsize, 0); + + ablkcipher_queue_write(walk, p); + + walk->nbytes = bsize; + walk->flags |= ABLKCIPHER_WALK_SLOW; + + *src_p = src; + *dst_p = dst; + + return 0; +} + +static inline int ablkcipher_copy_iv(struct ablkcipher_walk *walk, + struct crypto_tfm *tfm, + unsigned int alignmask) +{ + unsigned bs = walk->blocksize; + unsigned int ivsize = tfm->crt_ablkcipher.ivsize; + unsigned aligned_bs = ALIGN(bs, alignmask + 1); + unsigned int size = aligned_bs * 2 + ivsize + max(aligned_bs, ivsize) - + (alignmask + 1); + u8 *iv; + + size += alignmask & ~(crypto_tfm_ctx_alignment() - 1); + walk->iv_buffer = kmalloc(size, GFP_ATOMIC); + if (!walk->iv_buffer) + return -ENOMEM; + + iv = (u8 *)ALIGN((unsigned long)walk->iv_buffer, alignmask + 1); + iv = ablkcipher_get_spot(iv, bs) + aligned_bs; + iv = ablkcipher_get_spot(iv, bs) + aligned_bs; + iv = ablkcipher_get_spot(iv, ivsize); + + walk->iv = memcpy(iv, walk->iv, ivsize); + return 0; +} + +static inline int ablkcipher_next_fast(struct ablkcipher_request *req, + struct ablkcipher_walk *walk) +{ + walk->src.page = scatterwalk_page(&walk->in); + walk->src.offset = offset_in_page(walk->in.offset); + walk->dst.page = scatterwalk_page(&walk->out); + walk->dst.offset = offset_in_page(walk->out.offset); + + return 0; +} + +static int ablkcipher_walk_next(struct ablkcipher_request *req, + struct ablkcipher_walk *walk) +{ + struct crypto_tfm *tfm = req->base.tfm; + unsigned int alignmask, bsize, n; + void *src, *dst; + int err; + + alignmask = crypto_tfm_alg_alignmask(tfm); + n = walk->total; + if (unlikely(n < crypto_tfm_alg_blocksize(tfm))) { + req->base.flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN; + return ablkcipher_walk_done(req, walk, -EINVAL); + } + + walk->flags &= ~ABLKCIPHER_WALK_SLOW; + src = dst = NULL; + + bsize = min(walk->blocksize, n); + n = scatterwalk_clamp(&walk->in, n); + n = scatterwalk_clamp(&walk->out, n); + + if (n < bsize || + !scatterwalk_aligned(&walk->in, alignmask) || + !scatterwalk_aligned(&walk->out, alignmask)) { + err = ablkcipher_next_slow(req, walk, bsize, alignmask, + &src, &dst); + goto set_phys_lowmem; + } + + walk->nbytes = n; + + return ablkcipher_next_fast(req, walk); + +set_phys_lowmem: + if (err >= 0) { + walk->src.page = virt_to_page(src); + walk->dst.page = virt_to_page(dst); + walk->src.offset = ((unsigned long)src & (PAGE_SIZE - 1)); + walk->dst.offset = ((unsigned long)dst & (PAGE_SIZE - 1)); + } + + return err; +} + +static int ablkcipher_walk_first(struct ablkcipher_request *req, + struct ablkcipher_walk *walk) +{ + struct crypto_tfm *tfm = req->base.tfm; + unsigned int alignmask; + + alignmask = crypto_tfm_alg_alignmask(tfm); + if (WARN_ON_ONCE(in_irq())) + return -EDEADLK; + + walk->nbytes = walk->total; + if (unlikely(!walk->total)) + return 0; + + walk->iv_buffer = NULL; + walk->iv = req->info; + if (unlikely(((unsigned long)walk->iv & alignmask))) { + int err = ablkcipher_copy_iv(walk, tfm, alignmask); + if (err) + return err; + } + + scatterwalk_start(&walk->in, walk->in.sg); + scatterwalk_start(&walk->out, walk->out.sg); + + return ablkcipher_walk_next(req, walk); +} + +int ablkcipher_walk_phys(struct ablkcipher_request *req, + struct ablkcipher_walk *walk) +{ + walk->blocksize = crypto_tfm_alg_blocksize(req->base.tfm); + return ablkcipher_walk_first(req, walk); +} +EXPORT_SYMBOL_GPL(ablkcipher_walk_phys); + static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key, unsigned int keylen) { diff --git a/crypto/aead.c b/crypto/aead.c index d9aa733db16..6729e8ff68e 100644 --- a/crypto/aead.c +++ b/crypto/aead.c @@ -1,13 +1,13 @@ /* * AEAD: Authenticated Encryption with Associated Data - * + * * This file provides API support for AEAD algorithms. * * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) + * Software Foundation; either version 2 of the License, or (at your option) * any later version. * */ @@ -18,6 +18,7 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/rtnetlink.h> +#include <linux/sched.h> #include <linux/slab.h> #include <linux/seq_file.h> diff --git a/crypto/aes_generic.c b/crypto/aes_generic.c index e78b7ee44a7..a68c73dae15 100644 --- a/crypto/aes_generic.c +++ b/crypto/aes_generic.c @@ -1,4 +1,4 @@ -/* +/* * Cryptographic API. * * AES Cipher Algorithm. @@ -1127,7 +1127,7 @@ EXPORT_SYMBOL_GPL(crypto_il_tab); #define star_x(x) (((x) & 0x7f7f7f7f) << 1) ^ ((((x) & 0x80808080) >> 7) * 0x1b) -#define imix_col(y,x) do { \ +#define imix_col(y, x) do { \ u = star_x(x); \ v = star_x(u); \ w = star_x(v); \ diff --git a/crypto/ahash.c b/crypto/ahash.c index 33a4ff45f84..b8c59b889c6 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -78,7 +78,6 @@ int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err) walk->data -= walk->offset; if (nbytes && walk->offset & alignmask && !err) { - walk->offset += alignmask - 1; walk->offset = ALIGN(walk->offset, alignmask + 1); walk->data += walk->offset; diff --git a/crypto/algapi.c b/crypto/algapi.c index f149b1c8b76..c3cf1a69a47 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -17,6 +17,7 @@ #include <linux/list.h> #include <linux/module.h> #include <linux/rtnetlink.h> +#include <linux/slab.h> #include <linux/string.h> #include "internal.h" @@ -230,7 +231,7 @@ static struct crypto_larval *__crypto_register_alg(struct crypto_alg *alg) list_add(&alg->cra_list, &crypto_alg_list); list_add(&larval->alg.cra_list, &crypto_alg_list); -out: +out: return larval; free_larval: @@ -388,7 +389,7 @@ int crypto_unregister_alg(struct crypto_alg *alg) { int ret; LIST_HEAD(list); - + down_write(&crypto_alg_sem); ret = crypto_remove_alg(alg, &list); up_write(&crypto_alg_sem); @@ -543,7 +544,7 @@ int crypto_init_spawn2(struct crypto_spawn *spawn, struct crypto_alg *alg, { int err = -EINVAL; - if (frontend && (alg->cra_flags ^ frontend->type) & frontend->maskset) + if ((alg->cra_flags ^ frontend->type) & frontend->maskset) goto out; spawn->frontend = frontend; diff --git a/crypto/algboss.c b/crypto/algboss.c index 412241ce4cf..40bd391f34d 100644 --- a/crypto/algboss.c +++ b/crypto/algboss.c @@ -19,6 +19,7 @@ #include <linux/notifier.h> #include <linux/rtnetlink.h> #include <linux/sched.h> +#include <linux/slab.h> #include <linux/string.h> #include "internal.h" @@ -205,6 +206,7 @@ err: return NOTIFY_OK; } +#ifdef CONFIG_CRYPTO_MANAGER_TESTS static int cryptomgr_test(void *data) { struct crypto_test_param *param = data; @@ -265,6 +267,7 @@ err_put_module: err: return NOTIFY_OK; } +#endif /* CONFIG_CRYPTO_MANAGER_TESTS */ static int cryptomgr_notify(struct notifier_block *this, unsigned long msg, void *data) @@ -272,8 +275,10 @@ static int cryptomgr_notify(struct notifier_block *this, unsigned long msg, switch (msg) { case CRYPTO_MSG_ALG_REQUEST: return cryptomgr_schedule_probe(data); +#ifdef CONFIG_CRYPTO_MANAGER_TESTS case CRYPTO_MSG_ALG_REGISTER: return cryptomgr_schedule_test(data); +#endif } return NOTIFY_DONE; diff --git a/crypto/ansi_cprng.c b/crypto/ansi_cprng.c index 3aa6e3834bf..2bc33214284 100644 --- a/crypto/ansi_cprng.c +++ b/crypto/ansi_cprng.c @@ -85,7 +85,7 @@ static void xor_vectors(unsigned char *in1, unsigned char *in2, * Returns DEFAULT_BLK_SZ bytes of random data per call * returns 0 if generation succeded, <0 if something went wrong */ -static int _get_more_prng_bytes(struct prng_context *ctx) +static int _get_more_prng_bytes(struct prng_context *ctx, int cont_test) { int i; unsigned char tmp[DEFAULT_BLK_SZ]; @@ -132,7 +132,7 @@ static int _get_more_prng_bytes(struct prng_context *ctx) */ if (!memcmp(ctx->rand_data, ctx->last_rand_data, DEFAULT_BLK_SZ)) { - if (fips_enabled) { + if (cont_test) { panic("cprng %p Failed repetition check!\n", ctx); } @@ -185,16 +185,14 @@ static int _get_more_prng_bytes(struct prng_context *ctx) } /* Our exported functions */ -static int get_prng_bytes(char *buf, size_t nbytes, struct prng_context *ctx) +static int get_prng_bytes(char *buf, size_t nbytes, struct prng_context *ctx, + int do_cont_test) { unsigned char *ptr = buf; unsigned int byte_count = (unsigned int)nbytes; int err; - if (nbytes < 0) - return -EINVAL; - spin_lock_bh(&ctx->prng_lock); err = -EINVAL; @@ -220,7 +218,7 @@ static int get_prng_bytes(char *buf, size_t nbytes, struct prng_context *ctx) remainder: if (ctx->rand_data_valid == DEFAULT_BLK_SZ) { - if (_get_more_prng_bytes(ctx) < 0) { + if (_get_more_prng_bytes(ctx, do_cont_test) < 0) { memset(buf, 0, nbytes); err = -EINVAL; goto done; @@ -247,7 +245,7 @@ empty_rbuf: */ for (; byte_count >= DEFAULT_BLK_SZ; byte_count -= DEFAULT_BLK_SZ) { if (ctx->rand_data_valid == DEFAULT_BLK_SZ) { - if (_get_more_prng_bytes(ctx) < 0) { + if (_get_more_prng_bytes(ctx, do_cont_test) < 0) { memset(buf, 0, nbytes); err = -EINVAL; goto done; @@ -356,7 +354,7 @@ static int cprng_get_random(struct crypto_rng *tfm, u8 *rdata, { struct prng_context *prng = crypto_rng_ctx(tfm); - return get_prng_bytes(rdata, dlen, prng); + return get_prng_bytes(rdata, dlen, prng, 0); } /* @@ -404,19 +402,79 @@ static struct crypto_alg rng_alg = { } }; +#ifdef CONFIG_CRYPTO_FIPS +static int fips_cprng_get_random(struct crypto_rng *tfm, u8 *rdata, + unsigned int dlen) +{ + struct prng_context *prng = crypto_rng_ctx(tfm); + + return get_prng_bytes(rdata, dlen, prng, 1); +} + +static int fips_cprng_reset(struct crypto_rng *tfm, u8 *seed, unsigned int slen) +{ + u8 rdata[DEFAULT_BLK_SZ]; + int rc; + + struct prng_context *prng = crypto_rng_ctx(tfm); + + rc = cprng_reset(tfm, seed, slen); + + if (!rc) + goto out; + + /* this primes our continuity test */ + rc = get_prng_bytes(rdata, DEFAULT_BLK_SZ, prng, 0); + prng->rand_data_valid = DEFAULT_BLK_SZ; + +out: + return rc; +} + +static struct crypto_alg fips_rng_alg = { + .cra_name = "fips(ansi_cprng)", + .cra_driver_name = "fips_ansi_cprng", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_RNG, + .cra_ctxsize = sizeof(struct prng_context), + .cra_type = &crypto_rng_type, + .cra_module = THIS_MODULE, + .cra_list = LIST_HEAD_INIT(rng_alg.cra_list), + .cra_init = cprng_init, + .cra_exit = cprng_exit, + .cra_u = { + .rng = { + .rng_make_random = fips_cprng_get_random, + .rng_reset = fips_cprng_reset, + .seedsize = DEFAULT_PRNG_KSZ + 2*DEFAULT_BLK_SZ, + } + } +}; +#endif /* Module initalization */ static int __init prng_mod_init(void) { - if (fips_enabled) - rng_alg.cra_priority += 200; + int rc = 0; - return crypto_register_alg(&rng_alg); + rc = crypto_register_alg(&rng_alg); +#ifdef CONFIG_CRYPTO_FIPS + if (rc) + goto out; + + rc = crypto_register_alg(&fips_rng_alg); + +out: +#endif + return rc; } static void __exit prng_mod_fini(void) { crypto_unregister_alg(&rng_alg); +#ifdef CONFIG_CRYPTO_FIPS + crypto_unregister_alg(&fips_rng_alg); +#endif return; } diff --git a/crypto/anubis.c b/crypto/anubis.c index e42c3a8ba4a..77530d571c9 100644 --- a/crypto/anubis.c +++ b/crypto/anubis.c @@ -469,14 +469,13 @@ static int anubis_setkey(struct crypto_tfm *tfm, const u8 *in_key, u32 kappa[ANUBIS_MAX_N]; u32 inter[ANUBIS_MAX_N]; - switch (key_len) - { + switch (key_len) { case 16: case 20: case 24: case 28: case 32: case 36: case 40: break; default: *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; - return - EINVAL; + return -EINVAL; } ctx->key_len = key_len * 8; @@ -530,23 +529,24 @@ static int anubis_setkey(struct crypto_tfm *tfm, const u8 *in_key, /* * compute kappa^{r+1} from kappa^r: */ - if (r == R) { + if (r == R) break; - } for (i = 0; i < N; i++) { int j = i; inter[i] = T0[(kappa[j--] >> 24) ]; - if (j < 0) j = N - 1; + if (j < 0) + j = N - 1; inter[i] ^= T1[(kappa[j--] >> 16) & 0xff]; - if (j < 0) j = N - 1; + if (j < 0) + j = N - 1; inter[i] ^= T2[(kappa[j--] >> 8) & 0xff]; - if (j < 0) j = N - 1; + if (j < 0) + j = N - 1; inter[i] ^= T3[(kappa[j ] ) & 0xff]; } kappa[0] = inter[0] ^ rc[r]; - for (i = 1; i < N; i++) { + for (i = 1; i < N; i++) kappa[i] = inter[i]; - } } /* @@ -690,7 +690,7 @@ static struct crypto_alg anubis_alg = { static int __init anubis_mod_init(void) { int ret = 0; - + ret = crypto_register_alg(&anubis_alg); return ret; } diff --git a/crypto/api.c b/crypto/api.c index 798526d9053..033a7147e5e 100644 --- a/crypto/api.c +++ b/crypto/api.c @@ -10,7 +10,7 @@ * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) + * Software Foundation; either version 2 of the License, or (at your option) * any later version. * */ @@ -288,11 +288,11 @@ static int crypto_init_ops(struct crypto_tfm *tfm, u32 type, u32 mask) case CRYPTO_ALG_TYPE_COMPRESS: return crypto_init_compress_ops(tfm); - + default: break; } - + BUG(); return -EINVAL; } @@ -315,10 +315,9 @@ static void crypto_exit_ops(struct crypto_tfm *tfm) case CRYPTO_ALG_TYPE_COMPRESS: crypto_exit_compress_ops(tfm); break; - + default: BUG(); - } } @@ -593,12 +592,12 @@ int crypto_has_alg(const char *name, u32 type, u32 mask) { int ret = 0; struct crypto_alg *alg = crypto_alg_mod_lookup(name, type, mask); - + if (!IS_ERR(alg)) { crypto_mod_put(alg); ret = 1; } - + return ret; } EXPORT_SYMBOL_GPL(crypto_has_alg); diff --git a/crypto/async_tx/Kconfig b/crypto/async_tx/Kconfig index e24aa80087a..5de2ed13b35 100644 --- a/crypto/async_tx/Kconfig +++ b/crypto/async_tx/Kconfig @@ -37,3 +37,8 @@ config ASYNC_RAID6_TEST If unsure, say N. +config ASYNC_TX_DISABLE_PQ_VAL_DMA + bool + +config ASYNC_TX_DISABLE_XOR_VAL_DMA + bool diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c index b88db6d1dc6..fdd8257d35d 100644 --- a/crypto/async_tx/async_pq.c +++ b/crypto/async_tx/async_pq.c @@ -24,16 +24,13 @@ #include <linux/dma-mapping.h> #include <linux/raid/pq.h> #include <linux/async_tx.h> +#include <linux/gfp.h> /** - * scribble - space to hold throwaway P buffer for synchronous gen_syndrome + * pq_scribble_page - space to hold throwaway P or Q buffer for + * synchronous gen_syndrome */ -static struct page *scribble; - -static bool is_raid6_zero_block(struct page *p) -{ - return p == (void *) raid6_empty_zero_page; -} +static struct page *pq_scribble_page; /* the struct page *blocks[] parameter passed to async_gen_syndrome() * and async_syndrome_val() contains the 'P' destination address at @@ -83,7 +80,7 @@ do_async_gen_syndrome(struct dma_chan *chan, struct page **blocks, * sources and update the coefficients accordingly */ for (i = 0, idx = 0; i < src_cnt; i++) { - if (is_raid6_zero_block(blocks[i])) + if (blocks[i] == NULL) continue; dma_src[idx] = dma_map_page(dma->dev, blocks[i], offset, len, DMA_TO_DEVICE); @@ -160,9 +157,9 @@ do_sync_gen_syndrome(struct page **blocks, unsigned int offset, int disks, srcs = (void **) blocks; for (i = 0; i < disks; i++) { - if (is_raid6_zero_block(blocks[i])) { + if (blocks[i] == NULL) { BUG_ON(i > disks - 3); /* P or Q can't be zero */ - srcs[i] = blocks[i]; + srcs[i] = (void*)raid6_empty_zero_page; } else srcs[i] = page_address(blocks[i]) + offset; } @@ -186,10 +183,14 @@ do_sync_gen_syndrome(struct page **blocks, unsigned int offset, int disks, * blocks[disks-1] to NULL. When P or Q is omitted 'len' must be <= * PAGE_SIZE as a temporary buffer of this size is used in the * synchronous path. 'disks' always accounts for both destination - * buffers. + * buffers. If any source buffers (blocks[i] where i < disks - 2) are + * set to NULL those buffers will be replaced with the raid6_zero_page + * in the synchronous path and omitted in the hardware-asynchronous + * path. * * 'blocks' note: if submit->scribble is NULL then the contents of - * 'blocks' may be overridden + * 'blocks' may be overwritten to perform address conversions + * (dma_map_page() or page_address()). */ struct dma_async_tx_descriptor * async_gen_syndrome(struct page **blocks, unsigned int offset, int disks, @@ -227,11 +228,11 @@ async_gen_syndrome(struct page **blocks, unsigned int offset, int disks, async_tx_quiesce(&submit->depend_tx); if (!P(blocks, disks)) { - P(blocks, disks) = scribble; + P(blocks, disks) = pq_scribble_page; BUG_ON(len + offset > PAGE_SIZE); } if (!Q(blocks, disks)) { - Q(blocks, disks) = scribble; + Q(blocks, disks) = pq_scribble_page; BUG_ON(len + offset > PAGE_SIZE); } do_sync_gen_syndrome(blocks, offset, disks, len, submit); @@ -240,6 +241,16 @@ async_gen_syndrome(struct page **blocks, unsigned int offset, int disks, } EXPORT_SYMBOL_GPL(async_gen_syndrome); +static inline struct dma_chan * +pq_val_chan(struct async_submit_ctl *submit, struct page **blocks, int disks, size_t len) +{ + #ifdef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA + return NULL; + #endif + return async_tx_find_channel(submit, DMA_PQ_VAL, NULL, 0, blocks, + disks, len); +} + /** * async_syndrome_val - asynchronously validate a raid6 syndrome * @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1 @@ -260,13 +271,13 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks, size_t len, enum sum_check_flags *pqres, struct page *spare, struct async_submit_ctl *submit) { - struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ_VAL, - NULL, 0, blocks, disks, - len); + struct dma_chan *chan = pq_val_chan(submit, blocks, disks, len); struct dma_device *device = chan ? chan->device : NULL; struct dma_async_tx_descriptor *tx; + unsigned char coefs[disks-2]; enum dma_ctrl_flags dma_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0; dma_addr_t *dma_src = NULL; + int src_cnt = 0; BUG_ON(disks < 4); @@ -285,22 +296,32 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks, __func__, disks, len); if (!P(blocks, disks)) dma_flags |= DMA_PREP_PQ_DISABLE_P; + else + pq[0] = dma_map_page(dev, P(blocks, disks), + offset, len, + DMA_TO_DEVICE); if (!Q(blocks, disks)) dma_flags |= DMA_PREP_PQ_DISABLE_Q; + else + pq[1] = dma_map_page(dev, Q(blocks, disks), + offset, len, + DMA_TO_DEVICE); + if (submit->flags & ASYNC_TX_FENCE) dma_flags |= DMA_PREP_FENCE; - for (i = 0; i < disks; i++) + for (i = 0; i < disks-2; i++) if (likely(blocks[i])) { - BUG_ON(is_raid6_zero_block(blocks[i])); - dma_src[i] = dma_map_page(dev, blocks[i], - offset, len, - DMA_TO_DEVICE); + dma_src[src_cnt] = dma_map_page(dev, blocks[i], + offset, len, + DMA_TO_DEVICE); + coefs[src_cnt] = raid6_gfexp[i]; + src_cnt++; } for (;;) { tx = device->device_prep_dma_pq_val(chan, pq, dma_src, - disks - 2, - raid6_gfexp, + src_cnt, + coefs, len, pqres, dma_flags); if (likely(tx)) @@ -373,9 +394,9 @@ EXPORT_SYMBOL_GPL(async_syndrome_val); static int __init async_pq_init(void) { - scribble = alloc_page(GFP_KERNEL); + pq_scribble_page = alloc_page(GFP_KERNEL); - if (scribble) + if (pq_scribble_page) return 0; pr_err("%s: failed to allocate required spare page\n", __func__); @@ -385,7 +406,7 @@ static int __init async_pq_init(void) static void __exit async_pq_exit(void) { - put_page(scribble); + put_page(pq_scribble_page); } module_init(async_pq_init); diff --git a/crypto/async_tx/async_raid6_recov.c b/crypto/async_tx/async_raid6_recov.c index 6d73dde4786..ce038d861eb 100644 --- a/crypto/async_tx/async_raid6_recov.c +++ b/crypto/async_tx/async_raid6_recov.c @@ -131,8 +131,8 @@ async_mult(struct page *dest, struct page *src, u8 coef, size_t len, } static struct dma_async_tx_descriptor * -__2data_recov_4(size_t bytes, int faila, int failb, struct page **blocks, - struct async_submit_ctl *submit) +__2data_recov_4(int disks, size_t bytes, int faila, int failb, + struct page **blocks, struct async_submit_ctl *submit) { struct dma_async_tx_descriptor *tx = NULL; struct page *p, *q, *a, *b; @@ -143,8 +143,8 @@ __2data_recov_4(size_t bytes, int faila, int failb, struct page **blocks, void *cb_param = submit->cb_param; void *scribble = submit->scribble; - p = blocks[4-2]; - q = blocks[4-1]; + p = blocks[disks-2]; + q = blocks[disks-1]; a = blocks[faila]; b = blocks[failb]; @@ -170,8 +170,8 @@ __2data_recov_4(size_t bytes, int faila, int failb, struct page **blocks, } static struct dma_async_tx_descriptor * -__2data_recov_5(size_t bytes, int faila, int failb, struct page **blocks, - struct async_submit_ctl *submit) +__2data_recov_5(int disks, size_t bytes, int faila, int failb, + struct page **blocks, struct async_submit_ctl *submit) { struct dma_async_tx_descriptor *tx = NULL; struct page *p, *q, *g, *dp, *dq; @@ -181,21 +181,22 @@ __2data_recov_5(size_t bytes, int faila, int failb, struct page **blocks, dma_async_tx_callback cb_fn = submit->cb_fn; void *cb_param = submit->cb_param; void *scribble = submit->scribble; - int uninitialized_var(good); - int i; + int good_srcs, good, i; - for (i = 0; i < 3; i++) { + good_srcs = 0; + good = -1; + for (i = 0; i < disks-2; i++) { + if (blocks[i] == NULL) + continue; if (i == faila || i == failb) continue; - else { - good = i; - break; - } + good = i; + good_srcs++; } - BUG_ON(i >= 3); + BUG_ON(good_srcs > 1); - p = blocks[5-2]; - q = blocks[5-1]; + p = blocks[disks-2]; + q = blocks[disks-1]; g = blocks[good]; /* Compute syndrome with zero for the missing data pages @@ -263,10 +264,10 @@ __2data_recov_n(int disks, size_t bytes, int faila, int failb, * delta p and delta q */ dp = blocks[faila]; - blocks[faila] = (void *)raid6_empty_zero_page; + blocks[faila] = NULL; blocks[disks-2] = dp; dq = blocks[failb]; - blocks[failb] = (void *)raid6_empty_zero_page; + blocks[failb] = NULL; blocks[disks-1] = dq; init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); @@ -323,22 +324,29 @@ struct dma_async_tx_descriptor * async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb, struct page **blocks, struct async_submit_ctl *submit) { + void *scribble = submit->scribble; + int non_zero_srcs, i; + BUG_ON(faila == failb); if (failb < faila) swap(faila, failb); pr_debug("%s: disks: %d len: %zu\n", __func__, disks, bytes); - /* we need to preserve the contents of 'blocks' for the async - * case, so punt to synchronous if a scribble buffer is not available + /* if a dma resource is not available or a scribble buffer is not + * available punt to the synchronous path. In the 'dma not + * available' case be sure to use the scribble buffer to + * preserve the content of 'blocks' as the caller intended. */ - if (!submit->scribble) { - void **ptrs = (void **) blocks; - int i; + if (!async_dma_find_channel(DMA_PQ) || !scribble) { + void **ptrs = scribble ? scribble : (void **) blocks; async_tx_quiesce(&submit->depend_tx); for (i = 0; i < disks; i++) - ptrs[i] = page_address(blocks[i]); + if (blocks[i] == NULL) + ptrs[i] = (void *) raid6_empty_zero_page; + else + ptrs[i] = page_address(blocks[i]); raid6_2data_recov(disks, bytes, faila, failb, ptrs); @@ -347,19 +355,30 @@ async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb, return NULL; } - switch (disks) { - case 4: + non_zero_srcs = 0; + for (i = 0; i < disks-2 && non_zero_srcs < 4; i++) + if (blocks[i]) + non_zero_srcs++; + switch (non_zero_srcs) { + case 0: + case 1: + /* There must be at least 2 sources - the failed devices. */ + BUG(); + + case 2: /* dma devices do not uniformly understand a zero source pq * operation (in contrast to the synchronous case), so - * explicitly handle the 4 disk special case + * explicitly handle the special case of a 4 disk array with + * both data disks missing. */ - return __2data_recov_4(bytes, faila, failb, blocks, submit); - case 5: + return __2data_recov_4(disks, bytes, faila, failb, blocks, submit); + case 3: /* dma devices do not uniformly understand a single * source pq operation (in contrast to the synchronous - * case), so explicitly handle the 5 disk special case + * case), so explicitly handle the special case of a 5 disk + * array with 2 of 3 data disks missing. */ - return __2data_recov_5(bytes, faila, failb, blocks, submit); + return __2data_recov_5(disks, bytes, faila, failb, blocks, submit); default: return __2data_recov_n(disks, bytes, faila, failb, blocks, submit); } @@ -385,20 +404,25 @@ async_raid6_datap_recov(int disks, size_t bytes, int faila, dma_async_tx_callback cb_fn = submit->cb_fn; void *cb_param = submit->cb_param; void *scribble = submit->scribble; + int good_srcs, good, i; struct page *srcs[2]; pr_debug("%s: disks: %d len: %zu\n", __func__, disks, bytes); - /* we need to preserve the contents of 'blocks' for the async - * case, so punt to synchronous if a scribble buffer is not available + /* if a dma resource is not available or a scribble buffer is not + * available punt to the synchronous path. In the 'dma not + * available' case be sure to use the scribble buffer to + * preserve the content of 'blocks' as the caller intended. */ - if (!scribble) { - void **ptrs = (void **) blocks; - int i; + if (!async_dma_find_channel(DMA_PQ) || !scribble) { + void **ptrs = scribble ? scribble : (void **) blocks; async_tx_quiesce(&submit->depend_tx); for (i = 0; i < disks; i++) - ptrs[i] = page_address(blocks[i]); + if (blocks[i] == NULL) + ptrs[i] = (void*)raid6_empty_zero_page; + else + ptrs[i] = page_address(blocks[i]); raid6_datap_recov(disks, bytes, faila, ptrs); @@ -407,6 +431,20 @@ async_raid6_datap_recov(int disks, size_t bytes, int faila, return NULL; } + good_srcs = 0; + good = -1; + for (i = 0; i < disks-2; i++) { + if (i == faila) + continue; + if (blocks[i]) { + good = i; + good_srcs++; + if (good_srcs > 1) + break; + } + } + BUG_ON(good_srcs == 0); + p = blocks[disks-2]; q = blocks[disks-1]; @@ -414,14 +452,13 @@ async_raid6_datap_recov(int disks, size_t bytes, int faila, * Use the dead data page as temporary storage for delta q */ dq = blocks[faila]; - blocks[faila] = (void *)raid6_empty_zero_page; + blocks[faila] = NULL; blocks[disks-1] = dq; - /* in the 4 disk case we only need to perform a single source - * multiplication + /* in the 4-disk case we only need to perform a single source + * multiplication with the one good data block. */ - if (disks == 4) { - int good = faila == 0 ? 1 : 0; + if (good_srcs == 1) { struct page *g = blocks[good]; init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c index f9cdf04fe7c..7f2c00a4520 100644 --- a/crypto/async_tx/async_tx.c +++ b/crypto/async_tx/async_tx.c @@ -81,18 +81,13 @@ async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx, struct dma_device *device = chan->device; struct dma_async_tx_descriptor *intr_tx = (void *) ~0; - #ifdef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH - BUG(); - #endif - /* first check to see if we can still append to depend_tx */ - spin_lock_bh(&depend_tx->lock); - if (depend_tx->parent && depend_tx->chan == tx->chan) { - tx->parent = depend_tx; - depend_tx->next = tx; + txd_lock(depend_tx); + if (txd_parent(depend_tx) && depend_tx->chan == tx->chan) { + txd_chain(depend_tx, tx); intr_tx = NULL; } - spin_unlock_bh(&depend_tx->lock); + txd_unlock(depend_tx); /* attached dependency, flush the parent channel */ if (!intr_tx) { @@ -111,24 +106,22 @@ async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx, if (intr_tx) { intr_tx->callback = NULL; intr_tx->callback_param = NULL; - tx->parent = intr_tx; - /* safe to set ->next outside the lock since we know we are + /* safe to chain outside the lock since we know we are * not submitted yet */ - intr_tx->next = tx; + txd_chain(intr_tx, tx); /* check if we need to append */ - spin_lock_bh(&depend_tx->lock); - if (depend_tx->parent) { - intr_tx->parent = depend_tx; - depend_tx->next = intr_tx; + txd_lock(depend_tx); + if (txd_parent(depend_tx)) { + txd_chain(depend_tx, intr_tx); async_tx_ack(intr_tx); intr_tx = NULL; } - spin_unlock_bh(&depend_tx->lock); + txd_unlock(depend_tx); if (intr_tx) { - intr_tx->parent = NULL; + txd_clear_parent(intr_tx); intr_tx->tx_submit(intr_tx); async_tx_ack(intr_tx); } @@ -176,21 +169,20 @@ async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx, * 2/ dependencies are 1:1 i.e. two transactions can * not depend on the same parent */ - BUG_ON(async_tx_test_ack(depend_tx) || depend_tx->next || - tx->parent); + BUG_ON(async_tx_test_ack(depend_tx) || txd_next(depend_tx) || + txd_parent(tx)); /* the lock prevents async_tx_run_dependencies from missing * the setting of ->next when ->parent != NULL */ - spin_lock_bh(&depend_tx->lock); - if (depend_tx->parent) { + txd_lock(depend_tx); + if (txd_parent(depend_tx)) { /* we have a parent so we can not submit directly * if we are staying on the same channel: append * else: channel switch */ if (depend_tx->chan == chan) { - tx->parent = depend_tx; - depend_tx->next = tx; + txd_chain(depend_tx, tx); s = ASYNC_TX_SUBMITTED; } else s = ASYNC_TX_CHANNEL_SWITCH; @@ -203,7 +195,7 @@ async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx, else s = ASYNC_TX_CHANNEL_SWITCH; } - spin_unlock_bh(&depend_tx->lock); + txd_unlock(depend_tx); switch (s) { case ASYNC_TX_SUBMITTED: @@ -212,12 +204,12 @@ async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx, async_tx_channel_switch(depend_tx, tx); break; case ASYNC_TX_DIRECT_SUBMIT: - tx->parent = NULL; + txd_clear_parent(tx); tx->tx_submit(tx); break; } } else { - tx->parent = NULL; + txd_clear_parent(tx); tx->tx_submit(tx); } diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c index b459a9034aa..079ae8ca590 100644 --- a/crypto/async_tx/async_xor.c +++ b/crypto/async_tx/async_xor.c @@ -44,20 +44,23 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list, void *cb_param_orig = submit->cb_param; enum async_tx_flags flags_orig = submit->flags; enum dma_ctrl_flags dma_flags; - int xor_src_cnt; + int xor_src_cnt = 0; dma_addr_t dma_dest; /* map the dest bidrectional in case it is re-used as a source */ dma_dest = dma_map_page(dma->dev, dest, offset, len, DMA_BIDIRECTIONAL); for (i = 0; i < src_cnt; i++) { /* only map the dest once */ + if (!src_list[i]) + continue; if (unlikely(src_list[i] == dest)) { - dma_src[i] = dma_dest; + dma_src[xor_src_cnt++] = dma_dest; continue; } - dma_src[i] = dma_map_page(dma->dev, src_list[i], offset, - len, DMA_TO_DEVICE); + dma_src[xor_src_cnt++] = dma_map_page(dma->dev, src_list[i], offset, + len, DMA_TO_DEVICE); } + src_cnt = xor_src_cnt; while (src_cnt) { submit->flags = flags_orig; @@ -123,7 +126,7 @@ do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset, int src_cnt, size_t len, struct async_submit_ctl *submit) { int i; - int xor_src_cnt; + int xor_src_cnt = 0; int src_off = 0; void *dest_buf; void **srcs; @@ -135,8 +138,9 @@ do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset, /* convert to buffer pointers */ for (i = 0; i < src_cnt; i++) - srcs[i] = page_address(src_list[i]) + offset; - + if (src_list[i]) + srcs[xor_src_cnt++] = page_address(src_list[i]) + offset; + src_cnt = xor_src_cnt; /* set destination address */ dest_buf = page_address(dest) + offset; @@ -230,6 +234,17 @@ static int page_is_zero(struct page *p, unsigned int offset, size_t len) memcmp(a, a + 4, len - 4) == 0); } +static inline struct dma_chan * +xor_val_chan(struct async_submit_ctl *submit, struct page *dest, + struct page **src_list, int src_cnt, size_t len) +{ + #ifdef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA + return NULL; + #endif + return async_tx_find_channel(submit, DMA_XOR_VAL, &dest, 1, src_list, + src_cnt, len); +} + /** * async_xor_val - attempt a xor parity check with a dma engine. * @dest: destination page used if the xor is performed synchronously @@ -251,9 +266,7 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset, int src_cnt, size_t len, enum sum_check_flags *result, struct async_submit_ctl *submit) { - struct dma_chan *chan = async_tx_find_channel(submit, DMA_XOR_VAL, - &dest, 1, src_list, - src_cnt, len); + struct dma_chan *chan = xor_val_chan(submit, dest, src_list, src_cnt, len); struct dma_device *device = chan ? chan->device : NULL; struct dma_async_tx_descriptor *tx = NULL; dma_addr_t *dma_src = NULL; diff --git a/crypto/async_tx/raid6test.c b/crypto/async_tx/raid6test.c index 3ec27c7e62e..c1321935ebc 100644 --- a/crypto/async_tx/raid6test.c +++ b/crypto/async_tx/raid6test.c @@ -20,6 +20,7 @@ * */ #include <linux/async_tx.h> +#include <linux/gfp.h> #include <linux/random.h> #undef pr @@ -214,6 +215,13 @@ static int raid6_test(void) err += test(4, &tests); if (NDISKS > 5) err += test(5, &tests); + /* the 11 and 12 disk cases are special for ioatdma (p-disabled + * q-continuation without extended descriptor) + */ + if (NDISKS > 12) { + err += test(11, &tests); + err += test(12, &tests); + } err += test(NDISKS, &tests); pr("\n"); diff --git a/crypto/authenc.c b/crypto/authenc.c index 4d6f49a5dae..a5a22cfcd07 100644 --- a/crypto/authenc.c +++ b/crypto/authenc.c @@ -46,6 +46,12 @@ struct authenc_request_ctx { char tail[]; }; +static void authenc_request_complete(struct aead_request *req, int err) +{ + if (err != -EINPROGRESS) + aead_request_complete(req, err); +} + static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key, unsigned int keylen) { @@ -142,7 +148,7 @@ static void authenc_geniv_ahash_update_done(struct crypto_async_request *areq, crypto_aead_authsize(authenc), 1); out: - aead_request_complete(req, err); + authenc_request_complete(req, err); } static void authenc_geniv_ahash_done(struct crypto_async_request *areq, int err) @@ -175,6 +181,7 @@ static void authenc_verify_ahash_update_done(struct crypto_async_request *areq, struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); + unsigned int cryptlen = req->cryptlen; if (err) goto out; @@ -190,11 +197,12 @@ static void authenc_verify_ahash_update_done(struct crypto_async_request *areq, goto out; authsize = crypto_aead_authsize(authenc); + cryptlen -= authsize; ihash = ahreq->result + authsize; scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, authsize, 0); - err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG: 0; + err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0; if (err) goto out; @@ -203,12 +211,12 @@ static void authenc_verify_ahash_update_done(struct crypto_async_request *areq, ablkcipher_request_set_callback(abreq, aead_request_flags(req), req->base.complete, req->base.data); ablkcipher_request_set_crypt(abreq, req->src, req->dst, - req->cryptlen, req->iv); + cryptlen, req->iv); err = crypto_ablkcipher_decrypt(abreq); out: - aead_request_complete(req, err); + authenc_request_complete(req, err); } static void authenc_verify_ahash_done(struct crypto_async_request *areq, @@ -222,16 +230,18 @@ static void authenc_verify_ahash_done(struct crypto_async_request *areq, struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); + unsigned int cryptlen = req->cryptlen; if (err) goto out; authsize = crypto_aead_authsize(authenc); + cryptlen -= authsize; ihash = ahreq->result + authsize; scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, authsize, 0); - err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG: 0; + err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0; if (err) goto out; @@ -240,12 +250,12 @@ static void authenc_verify_ahash_done(struct crypto_async_request *areq, ablkcipher_request_set_callback(abreq, aead_request_flags(req), req->base.complete, req->base.data); ablkcipher_request_set_crypt(abreq, req->src, req->dst, - req->cryptlen, req->iv); + cryptlen, req->iv); err = crypto_ablkcipher_decrypt(abreq); out: - aead_request_complete(req, err); + authenc_request_complete(req, err); } static u8 *crypto_authenc_ahash_fb(struct aead_request *req, unsigned int flags) @@ -379,18 +389,20 @@ static void crypto_authenc_encrypt_done(struct crypto_async_request *req, err = crypto_authenc_genicv(areq, iv, 0); } - aead_request_complete(areq, err); + authenc_request_complete(areq, err); } static int crypto_authenc_encrypt(struct aead_request *req) { struct crypto_aead *authenc = crypto_aead_reqtfm(req); struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); - struct ablkcipher_request *abreq = aead_request_ctx(req); + struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); struct crypto_ablkcipher *enc = ctx->enc; struct scatterlist *dst = req->dst; unsigned int cryptlen = req->cryptlen; - u8 *iv = (u8 *)(abreq + 1) + crypto_ablkcipher_reqsize(enc); + struct ablkcipher_request *abreq = (void *)(areq_ctx->tail + + ctx->reqoff); + u8 *iv = (u8 *)abreq - crypto_ablkcipher_ivsize(enc); int err; ablkcipher_request_set_tfm(abreq, enc); @@ -418,7 +430,7 @@ static void crypto_authenc_givencrypt_done(struct crypto_async_request *req, err = crypto_authenc_genicv(areq, greq->giv, 0); } - aead_request_complete(areq, err); + authenc_request_complete(areq, err); } static int crypto_authenc_givencrypt(struct aead_givcrypt_request *req) @@ -454,7 +466,7 @@ static int crypto_authenc_verify(struct aead_request *req, unsigned int authsize; areq_ctx->complete = authenc_verify_ahash_done; - areq_ctx->complete = authenc_verify_ahash_update_done; + areq_ctx->update_complete = authenc_verify_ahash_update_done; ohash = authenc_ahash_fn(req, CRYPTO_TFM_REQ_MAY_SLEEP); if (IS_ERR(ohash)) @@ -464,7 +476,7 @@ static int crypto_authenc_verify(struct aead_request *req, ihash = ohash + authsize; scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, authsize, 0); - return memcmp(ihash, ohash, authsize) ? -EBADMSG: 0; + return memcmp(ihash, ohash, authsize) ? -EBADMSG : 0; } static int crypto_authenc_iverify(struct aead_request *req, u8 *iv, @@ -546,10 +558,6 @@ static int crypto_authenc_init_tfm(struct crypto_tfm *tfm) if (IS_ERR(auth)) return PTR_ERR(auth); - ctx->reqoff = ALIGN(2 * crypto_ahash_digestsize(auth) + - crypto_ahash_alignmask(auth), - crypto_ahash_alignmask(auth) + 1); - enc = crypto_spawn_skcipher(&ictx->enc); err = PTR_ERR(enc); if (IS_ERR(enc)) @@ -557,14 +565,19 @@ static int crypto_authenc_init_tfm(struct crypto_tfm *tfm) ctx->auth = auth; ctx->enc = enc; - - tfm->crt_aead.reqsize = max_t(unsigned int, - crypto_ahash_reqsize(auth) + ctx->reqoff + - sizeof(struct authenc_request_ctx) + - sizeof(struct ahash_request), + + ctx->reqoff = ALIGN(2 * crypto_ahash_digestsize(auth) + + crypto_ahash_alignmask(auth), + crypto_ahash_alignmask(auth) + 1) + + crypto_ablkcipher_ivsize(enc); + + tfm->crt_aead.reqsize = sizeof(struct authenc_request_ctx) + + ctx->reqoff + + max_t(unsigned int, + crypto_ahash_reqsize(auth) + + sizeof(struct ahash_request), sizeof(struct skcipher_givcrypt_request) + - crypto_ablkcipher_reqsize(enc) + - crypto_ablkcipher_ivsize(enc)); + crypto_ablkcipher_reqsize(enc)); return 0; @@ -603,7 +616,7 @@ static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb) auth = ahash_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH, CRYPTO_ALG_TYPE_AHASH_MASK); if (IS_ERR(auth)) - return ERR_PTR(PTR_ERR(auth)); + return ERR_CAST(auth); auth_base = &auth->base; diff --git a/crypto/blowfish.c b/crypto/blowfish.c index 6f5b4873192..a67d52ee058 100644 --- a/crypto/blowfish.c +++ b/crypto/blowfish.c @@ -1,4 +1,4 @@ -/* +/* * Cryptographic API. * * Blowfish Cipher Algorithm, by Bruce Schneier. @@ -299,7 +299,7 @@ static const u32 bf_sbox[256 * 4] = { 0xb74e6132, 0xce77e25b, 0x578fdfe3, 0x3ac372e6, }; -/* +/* * Round loop unrolling macros, S is a pointer to a S-Box array * organized in 4 unsigned longs at a row. */ @@ -315,7 +315,7 @@ static const u32 bf_sbox[256 * 4] = { /* * The blowfish encipher, processes 64-bit blocks. - * NOTE: This function MUSTN'T respect endianess + * NOTE: This function MUSTN'T respect endianess */ static void encrypt_block(struct bf_ctx *bctx, u32 *dst, u32 *src) { @@ -395,7 +395,7 @@ static void bf_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) out_blk[1] = cpu_to_be32(yl); } -/* +/* * Calculates the blowfish S and P boxes for encryption and decryption. */ static int bf_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) @@ -417,10 +417,10 @@ static int bf_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) /* Actual subkey generation */ for (j = 0, i = 0; i < 16 + 2; i++) { - temp = (((u32 )key[j] << 24) | - ((u32 )key[(j + 1) % keylen] << 16) | - ((u32 )key[(j + 2) % keylen] << 8) | - ((u32 )key[(j + 3) % keylen])); + temp = (((u32)key[j] << 24) | + ((u32)key[(j + 1) % keylen] << 16) | + ((u32)key[(j + 2) % keylen] << 8) | + ((u32)key[(j + 3) % keylen])); P[i] = P[i] ^ temp; j = (j + 4) % keylen; @@ -444,7 +444,7 @@ static int bf_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) S[count + 1] = data[1]; } } - + /* Bruce says not to bother with the weak key check. */ return 0; } diff --git a/crypto/camellia.c b/crypto/camellia.c index 964635d163f..64cff46ea5e 100644 --- a/crypto/camellia.c +++ b/crypto/camellia.c @@ -39,271 +39,271 @@ #include <asm/unaligned.h> static const u32 camellia_sp1110[256] = { - 0x70707000,0x82828200,0x2c2c2c00,0xececec00, - 0xb3b3b300,0x27272700,0xc0c0c000,0xe5e5e500, - 0xe4e4e400,0x85858500,0x57575700,0x35353500, - 0xeaeaea00,0x0c0c0c00,0xaeaeae00,0x41414100, - 0x23232300,0xefefef00,0x6b6b6b00,0x93939300, - 0x45454500,0x19191900,0xa5a5a500,0x21212100, - 0xededed00,0x0e0e0e00,0x4f4f4f00,0x4e4e4e00, - 0x1d1d1d00,0x65656500,0x92929200,0xbdbdbd00, - 0x86868600,0xb8b8b800,0xafafaf00,0x8f8f8f00, - 0x7c7c7c00,0xebebeb00,0x1f1f1f00,0xcecece00, - 0x3e3e3e00,0x30303000,0xdcdcdc00,0x5f5f5f00, - 0x5e5e5e00,0xc5c5c500,0x0b0b0b00,0x1a1a1a00, - 0xa6a6a600,0xe1e1e100,0x39393900,0xcacaca00, - 0xd5d5d500,0x47474700,0x5d5d5d00,0x3d3d3d00, - 0xd9d9d900,0x01010100,0x5a5a5a00,0xd6d6d600, - 0x51515100,0x56565600,0x6c6c6c00,0x4d4d4d00, - 0x8b8b8b00,0x0d0d0d00,0x9a9a9a00,0x66666600, - 0xfbfbfb00,0xcccccc00,0xb0b0b000,0x2d2d2d00, - 0x74747400,0x12121200,0x2b2b2b00,0x20202000, - 0xf0f0f000,0xb1b1b100,0x84848400,0x99999900, - 0xdfdfdf00,0x4c4c4c00,0xcbcbcb00,0xc2c2c200, - 0x34343400,0x7e7e7e00,0x76767600,0x05050500, - 0x6d6d6d00,0xb7b7b700,0xa9a9a900,0x31313100, - 0xd1d1d100,0x17171700,0x04040400,0xd7d7d700, - 0x14141400,0x58585800,0x3a3a3a00,0x61616100, - 0xdedede00,0x1b1b1b00,0x11111100,0x1c1c1c00, - 0x32323200,0x0f0f0f00,0x9c9c9c00,0x16161600, - 0x53535300,0x18181800,0xf2f2f200,0x22222200, - 0xfefefe00,0x44444400,0xcfcfcf00,0xb2b2b200, - 0xc3c3c300,0xb5b5b500,0x7a7a7a00,0x91919100, - 0x24242400,0x08080800,0xe8e8e800,0xa8a8a800, - 0x60606000,0xfcfcfc00,0x69696900,0x50505000, - 0xaaaaaa00,0xd0d0d000,0xa0a0a000,0x7d7d7d00, - 0xa1a1a100,0x89898900,0x62626200,0x97979700, - 0x54545400,0x5b5b5b00,0x1e1e1e00,0x95959500, - 0xe0e0e000,0xffffff00,0x64646400,0xd2d2d200, - 0x10101000,0xc4c4c400,0x00000000,0x48484800, - 0xa3a3a300,0xf7f7f700,0x75757500,0xdbdbdb00, - 0x8a8a8a00,0x03030300,0xe6e6e600,0xdadada00, - 0x09090900,0x3f3f3f00,0xdddddd00,0x94949400, - 0x87878700,0x5c5c5c00,0x83838300,0x02020200, - 0xcdcdcd00,0x4a4a4a00,0x90909000,0x33333300, - 0x73737300,0x67676700,0xf6f6f600,0xf3f3f300, - 0x9d9d9d00,0x7f7f7f00,0xbfbfbf00,0xe2e2e200, - 0x52525200,0x9b9b9b00,0xd8d8d800,0x26262600, - 0xc8c8c800,0x37373700,0xc6c6c600,0x3b3b3b00, - 0x81818100,0x96969600,0x6f6f6f00,0x4b4b4b00, - 0x13131300,0xbebebe00,0x63636300,0x2e2e2e00, - 0xe9e9e900,0x79797900,0xa7a7a700,0x8c8c8c00, - 0x9f9f9f00,0x6e6e6e00,0xbcbcbc00,0x8e8e8e00, - 0x29292900,0xf5f5f500,0xf9f9f900,0xb6b6b600, - 0x2f2f2f00,0xfdfdfd00,0xb4b4b400,0x59595900, - 0x78787800,0x98989800,0x06060600,0x6a6a6a00, - 0xe7e7e700,0x46464600,0x71717100,0xbababa00, - 0xd4d4d400,0x25252500,0xababab00,0x42424200, - 0x88888800,0xa2a2a200,0x8d8d8d00,0xfafafa00, - 0x72727200,0x07070700,0xb9b9b900,0x55555500, - 0xf8f8f800,0xeeeeee00,0xacacac00,0x0a0a0a00, - 0x36363600,0x49494900,0x2a2a2a00,0x68686800, - 0x3c3c3c00,0x38383800,0xf1f1f100,0xa4a4a400, - 0x40404000,0x28282800,0xd3d3d300,0x7b7b7b00, - 0xbbbbbb00,0xc9c9c900,0x43434300,0xc1c1c100, - 0x15151500,0xe3e3e300,0xadadad00,0xf4f4f400, - 0x77777700,0xc7c7c700,0x80808000,0x9e9e9e00, + 0x70707000, 0x82828200, 0x2c2c2c00, 0xececec00, + 0xb3b3b300, 0x27272700, 0xc0c0c000, 0xe5e5e500, + 0xe4e4e400, 0x85858500, 0x57575700, 0x35353500, + 0xeaeaea00, 0x0c0c0c00, 0xaeaeae00, 0x41414100, + 0x23232300, 0xefefef00, 0x6b6b6b00, 0x93939300, + 0x45454500, 0x19191900, 0xa5a5a500, 0x21212100, + 0xededed00, 0x0e0e0e00, 0x4f4f4f00, 0x4e4e4e00, + 0x1d1d1d00, 0x65656500, 0x92929200, 0xbdbdbd00, + 0x86868600, 0xb8b8b800, 0xafafaf00, 0x8f8f8f00, + 0x7c7c7c00, 0xebebeb00, 0x1f1f1f00, 0xcecece00, + 0x3e3e3e00, 0x30303000, 0xdcdcdc00, 0x5f5f5f00, + 0x5e5e5e00, 0xc5c5c500, 0x0b0b0b00, 0x1a1a1a00, + 0xa6a6a600, 0xe1e1e100, 0x39393900, 0xcacaca00, + 0xd5d5d500, 0x47474700, 0x5d5d5d00, 0x3d3d3d00, + 0xd9d9d900, 0x01010100, 0x5a5a5a00, 0xd6d6d600, + 0x51515100, 0x56565600, 0x6c6c6c00, 0x4d4d4d00, + 0x8b8b8b00, 0x0d0d0d00, 0x9a9a9a00, 0x66666600, + 0xfbfbfb00, 0xcccccc00, 0xb0b0b000, 0x2d2d2d00, + 0x74747400, 0x12121200, 0x2b2b2b00, 0x20202000, + 0xf0f0f000, 0xb1b1b100, 0x84848400, 0x99999900, + 0xdfdfdf00, 0x4c4c4c00, 0xcbcbcb00, 0xc2c2c200, + 0x34343400, 0x7e7e7e00, 0x76767600, 0x05050500, + 0x6d6d6d00, 0xb7b7b700, 0xa9a9a900, 0x31313100, + 0xd1d1d100, 0x17171700, 0x04040400, 0xd7d7d700, + 0x14141400, 0x58585800, 0x3a3a3a00, 0x61616100, + 0xdedede00, 0x1b1b1b00, 0x11111100, 0x1c1c1c00, + 0x32323200, 0x0f0f0f00, 0x9c9c9c00, 0x16161600, + 0x53535300, 0x18181800, 0xf2f2f200, 0x22222200, + 0xfefefe00, 0x44444400, 0xcfcfcf00, 0xb2b2b200, + 0xc3c3c300, 0xb5b5b500, 0x7a7a7a00, 0x91919100, + 0x24242400, 0x08080800, 0xe8e8e800, 0xa8a8a800, + 0x60606000, 0xfcfcfc00, 0x69696900, 0x50505000, + 0xaaaaaa00, 0xd0d0d000, 0xa0a0a000, 0x7d7d7d00, + 0xa1a1a100, 0x89898900, 0x62626200, 0x97979700, + 0x54545400, 0x5b5b5b00, 0x1e1e1e00, 0x95959500, + 0xe0e0e000, 0xffffff00, 0x64646400, 0xd2d2d200, + 0x10101000, 0xc4c4c400, 0x00000000, 0x48484800, + 0xa3a3a300, 0xf7f7f700, 0x75757500, 0xdbdbdb00, + 0x8a8a8a00, 0x03030300, 0xe6e6e600, 0xdadada00, + 0x09090900, 0x3f3f3f00, 0xdddddd00, 0x94949400, + 0x87878700, 0x5c5c5c00, 0x83838300, 0x02020200, + 0xcdcdcd00, 0x4a4a4a00, 0x90909000, 0x33333300, + 0x73737300, 0x67676700, 0xf6f6f600, 0xf3f3f300, + 0x9d9d9d00, 0x7f7f7f00, 0xbfbfbf00, 0xe2e2e200, + 0x52525200, 0x9b9b9b00, 0xd8d8d800, 0x26262600, + 0xc8c8c800, 0x37373700, 0xc6c6c600, 0x3b3b3b00, + 0x81818100, 0x96969600, 0x6f6f6f00, 0x4b4b4b00, + 0x13131300, 0xbebebe00, 0x63636300, 0x2e2e2e00, + 0xe9e9e900, 0x79797900, 0xa7a7a700, 0x8c8c8c00, + 0x9f9f9f00, 0x6e6e6e00, 0xbcbcbc00, 0x8e8e8e00, + 0x29292900, 0xf5f5f500, 0xf9f9f900, 0xb6b6b600, + 0x2f2f2f00, 0xfdfdfd00, 0xb4b4b400, 0x59595900, + 0x78787800, 0x98989800, 0x06060600, 0x6a6a6a00, + 0xe7e7e700, 0x46464600, 0x71717100, 0xbababa00, + 0xd4d4d400, 0x25252500, 0xababab00, 0x42424200, + 0x88888800, 0xa2a2a200, 0x8d8d8d00, 0xfafafa00, + 0x72727200, 0x07070700, 0xb9b9b900, 0x55555500, + 0xf8f8f800, 0xeeeeee00, 0xacacac00, 0x0a0a0a00, + 0x36363600, 0x49494900, 0x2a2a2a00, 0x68686800, + 0x3c3c3c00, 0x38383800, 0xf1f1f100, 0xa4a4a400, + 0x40404000, 0x28282800, 0xd3d3d300, 0x7b7b7b00, + 0xbbbbbb00, 0xc9c9c900, 0x43434300, 0xc1c1c100, + 0x15151500, 0xe3e3e300, 0xadadad00, 0xf4f4f400, + 0x77777700, 0xc7c7c700, 0x80808000, 0x9e9e9e00, }; static const u32 camellia_sp0222[256] = { - 0x00e0e0e0,0x00050505,0x00585858,0x00d9d9d9, - 0x00676767,0x004e4e4e,0x00818181,0x00cbcbcb, - 0x00c9c9c9,0x000b0b0b,0x00aeaeae,0x006a6a6a, - 0x00d5d5d5,0x00181818,0x005d5d5d,0x00828282, - 0x00464646,0x00dfdfdf,0x00d6d6d6,0x00272727, - 0x008a8a8a,0x00323232,0x004b4b4b,0x00424242, - 0x00dbdbdb,0x001c1c1c,0x009e9e9e,0x009c9c9c, - 0x003a3a3a,0x00cacaca,0x00252525,0x007b7b7b, - 0x000d0d0d,0x00717171,0x005f5f5f,0x001f1f1f, - 0x00f8f8f8,0x00d7d7d7,0x003e3e3e,0x009d9d9d, - 0x007c7c7c,0x00606060,0x00b9b9b9,0x00bebebe, - 0x00bcbcbc,0x008b8b8b,0x00161616,0x00343434, - 0x004d4d4d,0x00c3c3c3,0x00727272,0x00959595, - 0x00ababab,0x008e8e8e,0x00bababa,0x007a7a7a, - 0x00b3b3b3,0x00020202,0x00b4b4b4,0x00adadad, - 0x00a2a2a2,0x00acacac,0x00d8d8d8,0x009a9a9a, - 0x00171717,0x001a1a1a,0x00353535,0x00cccccc, - 0x00f7f7f7,0x00999999,0x00616161,0x005a5a5a, - 0x00e8e8e8,0x00242424,0x00565656,0x00404040, - 0x00e1e1e1,0x00636363,0x00090909,0x00333333, - 0x00bfbfbf,0x00989898,0x00979797,0x00858585, - 0x00686868,0x00fcfcfc,0x00ececec,0x000a0a0a, - 0x00dadada,0x006f6f6f,0x00535353,0x00626262, - 0x00a3a3a3,0x002e2e2e,0x00080808,0x00afafaf, - 0x00282828,0x00b0b0b0,0x00747474,0x00c2c2c2, - 0x00bdbdbd,0x00363636,0x00222222,0x00383838, - 0x00646464,0x001e1e1e,0x00393939,0x002c2c2c, - 0x00a6a6a6,0x00303030,0x00e5e5e5,0x00444444, - 0x00fdfdfd,0x00888888,0x009f9f9f,0x00656565, - 0x00878787,0x006b6b6b,0x00f4f4f4,0x00232323, - 0x00484848,0x00101010,0x00d1d1d1,0x00515151, - 0x00c0c0c0,0x00f9f9f9,0x00d2d2d2,0x00a0a0a0, - 0x00555555,0x00a1a1a1,0x00414141,0x00fafafa, - 0x00434343,0x00131313,0x00c4c4c4,0x002f2f2f, - 0x00a8a8a8,0x00b6b6b6,0x003c3c3c,0x002b2b2b, - 0x00c1c1c1,0x00ffffff,0x00c8c8c8,0x00a5a5a5, - 0x00202020,0x00898989,0x00000000,0x00909090, - 0x00474747,0x00efefef,0x00eaeaea,0x00b7b7b7, - 0x00151515,0x00060606,0x00cdcdcd,0x00b5b5b5, - 0x00121212,0x007e7e7e,0x00bbbbbb,0x00292929, - 0x000f0f0f,0x00b8b8b8,0x00070707,0x00040404, - 0x009b9b9b,0x00949494,0x00212121,0x00666666, - 0x00e6e6e6,0x00cecece,0x00ededed,0x00e7e7e7, - 0x003b3b3b,0x00fefefe,0x007f7f7f,0x00c5c5c5, - 0x00a4a4a4,0x00373737,0x00b1b1b1,0x004c4c4c, - 0x00919191,0x006e6e6e,0x008d8d8d,0x00767676, - 0x00030303,0x002d2d2d,0x00dedede,0x00969696, - 0x00262626,0x007d7d7d,0x00c6c6c6,0x005c5c5c, - 0x00d3d3d3,0x00f2f2f2,0x004f4f4f,0x00191919, - 0x003f3f3f,0x00dcdcdc,0x00797979,0x001d1d1d, - 0x00525252,0x00ebebeb,0x00f3f3f3,0x006d6d6d, - 0x005e5e5e,0x00fbfbfb,0x00696969,0x00b2b2b2, - 0x00f0f0f0,0x00313131,0x000c0c0c,0x00d4d4d4, - 0x00cfcfcf,0x008c8c8c,0x00e2e2e2,0x00757575, - 0x00a9a9a9,0x004a4a4a,0x00575757,0x00848484, - 0x00111111,0x00454545,0x001b1b1b,0x00f5f5f5, - 0x00e4e4e4,0x000e0e0e,0x00737373,0x00aaaaaa, - 0x00f1f1f1,0x00dddddd,0x00595959,0x00141414, - 0x006c6c6c,0x00929292,0x00545454,0x00d0d0d0, - 0x00787878,0x00707070,0x00e3e3e3,0x00494949, - 0x00808080,0x00505050,0x00a7a7a7,0x00f6f6f6, - 0x00777777,0x00939393,0x00868686,0x00838383, - 0x002a2a2a,0x00c7c7c7,0x005b5b5b,0x00e9e9e9, - 0x00eeeeee,0x008f8f8f,0x00010101,0x003d3d3d, + 0x00e0e0e0, 0x00050505, 0x00585858, 0x00d9d9d9, + 0x00676767, 0x004e4e4e, 0x00818181, 0x00cbcbcb, + 0x00c9c9c9, 0x000b0b0b, 0x00aeaeae, 0x006a6a6a, + 0x00d5d5d5, 0x00181818, 0x005d5d5d, 0x00828282, + 0x00464646, 0x00dfdfdf, 0x00d6d6d6, 0x00272727, + 0x008a8a8a, 0x00323232, 0x004b4b4b, 0x00424242, + 0x00dbdbdb, 0x001c1c1c, 0x009e9e9e, 0x009c9c9c, + 0x003a3a3a, 0x00cacaca, 0x00252525, 0x007b7b7b, + 0x000d0d0d, 0x00717171, 0x005f5f5f, 0x001f1f1f, + 0x00f8f8f8, 0x00d7d7d7, 0x003e3e3e, 0x009d9d9d, + 0x007c7c7c, 0x00606060, 0x00b9b9b9, 0x00bebebe, + 0x00bcbcbc, 0x008b8b8b, 0x00161616, 0x00343434, + 0x004d4d4d, 0x00c3c3c3, 0x00727272, 0x00959595, + 0x00ababab, 0x008e8e8e, 0x00bababa, 0x007a7a7a, + 0x00b3b3b3, 0x00020202, 0x00b4b4b4, 0x00adadad, + 0x00a2a2a2, 0x00acacac, 0x00d8d8d8, 0x009a9a9a, + 0x00171717, 0x001a1a1a, 0x00353535, 0x00cccccc, + 0x00f7f7f7, 0x00999999, 0x00616161, 0x005a5a5a, + 0x00e8e8e8, 0x00242424, 0x00565656, 0x00404040, + 0x00e1e1e1, 0x00636363, 0x00090909, 0x00333333, + 0x00bfbfbf, 0x00989898, 0x00979797, 0x00858585, + 0x00686868, 0x00fcfcfc, 0x00ececec, 0x000a0a0a, + 0x00dadada, 0x006f6f6f, 0x00535353, 0x00626262, + 0x00a3a3a3, 0x002e2e2e, 0x00080808, 0x00afafaf, + 0x00282828, 0x00b0b0b0, 0x00747474, 0x00c2c2c2, + 0x00bdbdbd, 0x00363636, 0x00222222, 0x00383838, + 0x00646464, 0x001e1e1e, 0x00393939, 0x002c2c2c, + 0x00a6a6a6, 0x00303030, 0x00e5e5e5, 0x00444444, + 0x00fdfdfd, 0x00888888, 0x009f9f9f, 0x00656565, + 0x00878787, 0x006b6b6b, 0x00f4f4f4, 0x00232323, + 0x00484848, 0x00101010, 0x00d1d1d1, 0x00515151, + 0x00c0c0c0, 0x00f9f9f9, 0x00d2d2d2, 0x00a0a0a0, + 0x00555555, 0x00a1a1a1, 0x00414141, 0x00fafafa, + 0x00434343, 0x00131313, 0x00c4c4c4, 0x002f2f2f, + 0x00a8a8a8, 0x00b6b6b6, 0x003c3c3c, 0x002b2b2b, + 0x00c1c1c1, 0x00ffffff, 0x00c8c8c8, 0x00a5a5a5, + 0x00202020, 0x00898989, 0x00000000, 0x00909090, + 0x00474747, 0x00efefef, 0x00eaeaea, 0x00b7b7b7, + 0x00151515, 0x00060606, 0x00cdcdcd, 0x00b5b5b5, + 0x00121212, 0x007e7e7e, 0x00bbbbbb, 0x00292929, + 0x000f0f0f, 0x00b8b8b8, 0x00070707, 0x00040404, + 0x009b9b9b, 0x00949494, 0x00212121, 0x00666666, + 0x00e6e6e6, 0x00cecece, 0x00ededed, 0x00e7e7e7, + 0x003b3b3b, 0x00fefefe, 0x007f7f7f, 0x00c5c5c5, + 0x00a4a4a4, 0x00373737, 0x00b1b1b1, 0x004c4c4c, + 0x00919191, 0x006e6e6e, 0x008d8d8d, 0x00767676, + 0x00030303, 0x002d2d2d, 0x00dedede, 0x00969696, + 0x00262626, 0x007d7d7d, 0x00c6c6c6, 0x005c5c5c, + 0x00d3d3d3, 0x00f2f2f2, 0x004f4f4f, 0x00191919, + 0x003f3f3f, 0x00dcdcdc, 0x00797979, 0x001d1d1d, + 0x00525252, 0x00ebebeb, 0x00f3f3f3, 0x006d6d6d, + 0x005e5e5e, 0x00fbfbfb, 0x00696969, 0x00b2b2b2, + 0x00f0f0f0, 0x00313131, 0x000c0c0c, 0x00d4d4d4, + 0x00cfcfcf, 0x008c8c8c, 0x00e2e2e2, 0x00757575, + 0x00a9a9a9, 0x004a4a4a, 0x00575757, 0x00848484, + 0x00111111, 0x00454545, 0x001b1b1b, 0x00f5f5f5, + 0x00e4e4e4, 0x000e0e0e, 0x00737373, 0x00aaaaaa, + 0x00f1f1f1, 0x00dddddd, 0x00595959, 0x00141414, + 0x006c6c6c, 0x00929292, 0x00545454, 0x00d0d0d0, + 0x00787878, 0x00707070, 0x00e3e3e3, 0x00494949, + 0x00808080, 0x00505050, 0x00a7a7a7, 0x00f6f6f6, + 0x00777777, 0x00939393, 0x00868686, 0x00838383, + 0x002a2a2a, 0x00c7c7c7, 0x005b5b5b, 0x00e9e9e9, + 0x00eeeeee, 0x008f8f8f, 0x00010101, 0x003d3d3d, }; static const u32 camellia_sp3033[256] = { - 0x38003838,0x41004141,0x16001616,0x76007676, - 0xd900d9d9,0x93009393,0x60006060,0xf200f2f2, - 0x72007272,0xc200c2c2,0xab00abab,0x9a009a9a, - 0x75007575,0x06000606,0x57005757,0xa000a0a0, - 0x91009191,0xf700f7f7,0xb500b5b5,0xc900c9c9, - 0xa200a2a2,0x8c008c8c,0xd200d2d2,0x90009090, - 0xf600f6f6,0x07000707,0xa700a7a7,0x27002727, - 0x8e008e8e,0xb200b2b2,0x49004949,0xde00dede, - 0x43004343,0x5c005c5c,0xd700d7d7,0xc700c7c7, - 0x3e003e3e,0xf500f5f5,0x8f008f8f,0x67006767, - 0x1f001f1f,0x18001818,0x6e006e6e,0xaf00afaf, - 0x2f002f2f,0xe200e2e2,0x85008585,0x0d000d0d, - 0x53005353,0xf000f0f0,0x9c009c9c,0x65006565, - 0xea00eaea,0xa300a3a3,0xae00aeae,0x9e009e9e, - 0xec00ecec,0x80008080,0x2d002d2d,0x6b006b6b, - 0xa800a8a8,0x2b002b2b,0x36003636,0xa600a6a6, - 0xc500c5c5,0x86008686,0x4d004d4d,0x33003333, - 0xfd00fdfd,0x66006666,0x58005858,0x96009696, - 0x3a003a3a,0x09000909,0x95009595,0x10001010, - 0x78007878,0xd800d8d8,0x42004242,0xcc00cccc, - 0xef00efef,0x26002626,0xe500e5e5,0x61006161, - 0x1a001a1a,0x3f003f3f,0x3b003b3b,0x82008282, - 0xb600b6b6,0xdb00dbdb,0xd400d4d4,0x98009898, - 0xe800e8e8,0x8b008b8b,0x02000202,0xeb00ebeb, - 0x0a000a0a,0x2c002c2c,0x1d001d1d,0xb000b0b0, - 0x6f006f6f,0x8d008d8d,0x88008888,0x0e000e0e, - 0x19001919,0x87008787,0x4e004e4e,0x0b000b0b, - 0xa900a9a9,0x0c000c0c,0x79007979,0x11001111, - 0x7f007f7f,0x22002222,0xe700e7e7,0x59005959, - 0xe100e1e1,0xda00dada,0x3d003d3d,0xc800c8c8, - 0x12001212,0x04000404,0x74007474,0x54005454, - 0x30003030,0x7e007e7e,0xb400b4b4,0x28002828, - 0x55005555,0x68006868,0x50005050,0xbe00bebe, - 0xd000d0d0,0xc400c4c4,0x31003131,0xcb00cbcb, - 0x2a002a2a,0xad00adad,0x0f000f0f,0xca00caca, - 0x70007070,0xff00ffff,0x32003232,0x69006969, - 0x08000808,0x62006262,0x00000000,0x24002424, - 0xd100d1d1,0xfb00fbfb,0xba00baba,0xed00eded, - 0x45004545,0x81008181,0x73007373,0x6d006d6d, - 0x84008484,0x9f009f9f,0xee00eeee,0x4a004a4a, - 0xc300c3c3,0x2e002e2e,0xc100c1c1,0x01000101, - 0xe600e6e6,0x25002525,0x48004848,0x99009999, - 0xb900b9b9,0xb300b3b3,0x7b007b7b,0xf900f9f9, - 0xce00cece,0xbf00bfbf,0xdf00dfdf,0x71007171, - 0x29002929,0xcd00cdcd,0x6c006c6c,0x13001313, - 0x64006464,0x9b009b9b,0x63006363,0x9d009d9d, - 0xc000c0c0,0x4b004b4b,0xb700b7b7,0xa500a5a5, - 0x89008989,0x5f005f5f,0xb100b1b1,0x17001717, - 0xf400f4f4,0xbc00bcbc,0xd300d3d3,0x46004646, - 0xcf00cfcf,0x37003737,0x5e005e5e,0x47004747, - 0x94009494,0xfa00fafa,0xfc00fcfc,0x5b005b5b, - 0x97009797,0xfe00fefe,0x5a005a5a,0xac00acac, - 0x3c003c3c,0x4c004c4c,0x03000303,0x35003535, - 0xf300f3f3,0x23002323,0xb800b8b8,0x5d005d5d, - 0x6a006a6a,0x92009292,0xd500d5d5,0x21002121, - 0x44004444,0x51005151,0xc600c6c6,0x7d007d7d, - 0x39003939,0x83008383,0xdc00dcdc,0xaa00aaaa, - 0x7c007c7c,0x77007777,0x56005656,0x05000505, - 0x1b001b1b,0xa400a4a4,0x15001515,0x34003434, - 0x1e001e1e,0x1c001c1c,0xf800f8f8,0x52005252, - 0x20002020,0x14001414,0xe900e9e9,0xbd00bdbd, - 0xdd00dddd,0xe400e4e4,0xa100a1a1,0xe000e0e0, - 0x8a008a8a,0xf100f1f1,0xd600d6d6,0x7a007a7a, - 0xbb00bbbb,0xe300e3e3,0x40004040,0x4f004f4f, + 0x38003838, 0x41004141, 0x16001616, 0x76007676, + 0xd900d9d9, 0x93009393, 0x60006060, 0xf200f2f2, + 0x72007272, 0xc200c2c2, 0xab00abab, 0x9a009a9a, + 0x75007575, 0x06000606, 0x57005757, 0xa000a0a0, + 0x91009191, 0xf700f7f7, 0xb500b5b5, 0xc900c9c9, + 0xa200a2a2, 0x8c008c8c, 0xd200d2d2, 0x90009090, + 0xf600f6f6, 0x07000707, 0xa700a7a7, 0x27002727, + 0x8e008e8e, 0xb200b2b2, 0x49004949, 0xde00dede, + 0x43004343, 0x5c005c5c, 0xd700d7d7, 0xc700c7c7, + 0x3e003e3e, 0xf500f5f5, 0x8f008f8f, 0x67006767, + 0x1f001f1f, 0x18001818, 0x6e006e6e, 0xaf00afaf, + 0x2f002f2f, 0xe200e2e2, 0x85008585, 0x0d000d0d, + 0x53005353, 0xf000f0f0, 0x9c009c9c, 0x65006565, + 0xea00eaea, 0xa300a3a3, 0xae00aeae, 0x9e009e9e, + 0xec00ecec, 0x80008080, 0x2d002d2d, 0x6b006b6b, + 0xa800a8a8, 0x2b002b2b, 0x36003636, 0xa600a6a6, + 0xc500c5c5, 0x86008686, 0x4d004d4d, 0x33003333, + 0xfd00fdfd, 0x66006666, 0x58005858, 0x96009696, + 0x3a003a3a, 0x09000909, 0x95009595, 0x10001010, + 0x78007878, 0xd800d8d8, 0x42004242, 0xcc00cccc, + 0xef00efef, 0x26002626, 0xe500e5e5, 0x61006161, + 0x1a001a1a, 0x3f003f3f, 0x3b003b3b, 0x82008282, + 0xb600b6b6, 0xdb00dbdb, 0xd400d4d4, 0x98009898, + 0xe800e8e8, 0x8b008b8b, 0x02000202, 0xeb00ebeb, + 0x0a000a0a, 0x2c002c2c, 0x1d001d1d, 0xb000b0b0, + 0x6f006f6f, 0x8d008d8d, 0x88008888, 0x0e000e0e, + 0x19001919, 0x87008787, 0x4e004e4e, 0x0b000b0b, + 0xa900a9a9, 0x0c000c0c, 0x79007979, 0x11001111, + 0x7f007f7f, 0x22002222, 0xe700e7e7, 0x59005959, + 0xe100e1e1, 0xda00dada, 0x3d003d3d, 0xc800c8c8, + 0x12001212, 0x04000404, 0x74007474, 0x54005454, + 0x30003030, 0x7e007e7e, 0xb400b4b4, 0x28002828, + 0x55005555, 0x68006868, 0x50005050, 0xbe00bebe, + 0xd000d0d0, 0xc400c4c4, 0x31003131, 0xcb00cbcb, + 0x2a002a2a, 0xad00adad, 0x0f000f0f, 0xca00caca, + 0x70007070, 0xff00ffff, 0x32003232, 0x69006969, + 0x08000808, 0x62006262, 0x00000000, 0x24002424, + 0xd100d1d1, 0xfb00fbfb, 0xba00baba, 0xed00eded, + 0x45004545, 0x81008181, 0x73007373, 0x6d006d6d, + 0x84008484, 0x9f009f9f, 0xee00eeee, 0x4a004a4a, + 0xc300c3c3, 0x2e002e2e, 0xc100c1c1, 0x01000101, + 0xe600e6e6, 0x25002525, 0x48004848, 0x99009999, + 0xb900b9b9, 0xb300b3b3, 0x7b007b7b, 0xf900f9f9, + 0xce00cece, 0xbf00bfbf, 0xdf00dfdf, 0x71007171, + 0x29002929, 0xcd00cdcd, 0x6c006c6c, 0x13001313, + 0x64006464, 0x9b009b9b, 0x63006363, 0x9d009d9d, + 0xc000c0c0, 0x4b004b4b, 0xb700b7b7, 0xa500a5a5, + 0x89008989, 0x5f005f5f, 0xb100b1b1, 0x17001717, + 0xf400f4f4, 0xbc00bcbc, 0xd300d3d3, 0x46004646, + 0xcf00cfcf, 0x37003737, 0x5e005e5e, 0x47004747, + 0x94009494, 0xfa00fafa, 0xfc00fcfc, 0x5b005b5b, + 0x97009797, 0xfe00fefe, 0x5a005a5a, 0xac00acac, + 0x3c003c3c, 0x4c004c4c, 0x03000303, 0x35003535, + 0xf300f3f3, 0x23002323, 0xb800b8b8, 0x5d005d5d, + 0x6a006a6a, 0x92009292, 0xd500d5d5, 0x21002121, + 0x44004444, 0x51005151, 0xc600c6c6, 0x7d007d7d, + 0x39003939, 0x83008383, 0xdc00dcdc, 0xaa00aaaa, + 0x7c007c7c, 0x77007777, 0x56005656, 0x05000505, + 0x1b001b1b, 0xa400a4a4, 0x15001515, 0x34003434, + 0x1e001e1e, 0x1c001c1c, 0xf800f8f8, 0x52005252, + 0x20002020, 0x14001414, 0xe900e9e9, 0xbd00bdbd, + 0xdd00dddd, 0xe400e4e4, 0xa100a1a1, 0xe000e0e0, + 0x8a008a8a, 0xf100f1f1, 0xd600d6d6, 0x7a007a7a, + 0xbb00bbbb, 0xe300e3e3, 0x40004040, 0x4f004f4f, }; static const u32 camellia_sp4404[256] = { - 0x70700070,0x2c2c002c,0xb3b300b3,0xc0c000c0, - 0xe4e400e4,0x57570057,0xeaea00ea,0xaeae00ae, - 0x23230023,0x6b6b006b,0x45450045,0xa5a500a5, - 0xeded00ed,0x4f4f004f,0x1d1d001d,0x92920092, - 0x86860086,0xafaf00af,0x7c7c007c,0x1f1f001f, - 0x3e3e003e,0xdcdc00dc,0x5e5e005e,0x0b0b000b, - 0xa6a600a6,0x39390039,0xd5d500d5,0x5d5d005d, - 0xd9d900d9,0x5a5a005a,0x51510051,0x6c6c006c, - 0x8b8b008b,0x9a9a009a,0xfbfb00fb,0xb0b000b0, - 0x74740074,0x2b2b002b,0xf0f000f0,0x84840084, - 0xdfdf00df,0xcbcb00cb,0x34340034,0x76760076, - 0x6d6d006d,0xa9a900a9,0xd1d100d1,0x04040004, - 0x14140014,0x3a3a003a,0xdede00de,0x11110011, - 0x32320032,0x9c9c009c,0x53530053,0xf2f200f2, - 0xfefe00fe,0xcfcf00cf,0xc3c300c3,0x7a7a007a, - 0x24240024,0xe8e800e8,0x60600060,0x69690069, - 0xaaaa00aa,0xa0a000a0,0xa1a100a1,0x62620062, - 0x54540054,0x1e1e001e,0xe0e000e0,0x64640064, - 0x10100010,0x00000000,0xa3a300a3,0x75750075, - 0x8a8a008a,0xe6e600e6,0x09090009,0xdddd00dd, - 0x87870087,0x83830083,0xcdcd00cd,0x90900090, - 0x73730073,0xf6f600f6,0x9d9d009d,0xbfbf00bf, - 0x52520052,0xd8d800d8,0xc8c800c8,0xc6c600c6, - 0x81810081,0x6f6f006f,0x13130013,0x63630063, - 0xe9e900e9,0xa7a700a7,0x9f9f009f,0xbcbc00bc, - 0x29290029,0xf9f900f9,0x2f2f002f,0xb4b400b4, - 0x78780078,0x06060006,0xe7e700e7,0x71710071, - 0xd4d400d4,0xabab00ab,0x88880088,0x8d8d008d, - 0x72720072,0xb9b900b9,0xf8f800f8,0xacac00ac, - 0x36360036,0x2a2a002a,0x3c3c003c,0xf1f100f1, - 0x40400040,0xd3d300d3,0xbbbb00bb,0x43430043, - 0x15150015,0xadad00ad,0x77770077,0x80800080, - 0x82820082,0xecec00ec,0x27270027,0xe5e500e5, - 0x85850085,0x35350035,0x0c0c000c,0x41410041, - 0xefef00ef,0x93930093,0x19190019,0x21210021, - 0x0e0e000e,0x4e4e004e,0x65650065,0xbdbd00bd, - 0xb8b800b8,0x8f8f008f,0xebeb00eb,0xcece00ce, - 0x30300030,0x5f5f005f,0xc5c500c5,0x1a1a001a, - 0xe1e100e1,0xcaca00ca,0x47470047,0x3d3d003d, - 0x01010001,0xd6d600d6,0x56560056,0x4d4d004d, - 0x0d0d000d,0x66660066,0xcccc00cc,0x2d2d002d, - 0x12120012,0x20200020,0xb1b100b1,0x99990099, - 0x4c4c004c,0xc2c200c2,0x7e7e007e,0x05050005, - 0xb7b700b7,0x31310031,0x17170017,0xd7d700d7, - 0x58580058,0x61610061,0x1b1b001b,0x1c1c001c, - 0x0f0f000f,0x16160016,0x18180018,0x22220022, - 0x44440044,0xb2b200b2,0xb5b500b5,0x91910091, - 0x08080008,0xa8a800a8,0xfcfc00fc,0x50500050, - 0xd0d000d0,0x7d7d007d,0x89890089,0x97970097, - 0x5b5b005b,0x95950095,0xffff00ff,0xd2d200d2, - 0xc4c400c4,0x48480048,0xf7f700f7,0xdbdb00db, - 0x03030003,0xdada00da,0x3f3f003f,0x94940094, - 0x5c5c005c,0x02020002,0x4a4a004a,0x33330033, - 0x67670067,0xf3f300f3,0x7f7f007f,0xe2e200e2, - 0x9b9b009b,0x26260026,0x37370037,0x3b3b003b, - 0x96960096,0x4b4b004b,0xbebe00be,0x2e2e002e, - 0x79790079,0x8c8c008c,0x6e6e006e,0x8e8e008e, - 0xf5f500f5,0xb6b600b6,0xfdfd00fd,0x59590059, - 0x98980098,0x6a6a006a,0x46460046,0xbaba00ba, - 0x25250025,0x42420042,0xa2a200a2,0xfafa00fa, - 0x07070007,0x55550055,0xeeee00ee,0x0a0a000a, - 0x49490049,0x68680068,0x38380038,0xa4a400a4, - 0x28280028,0x7b7b007b,0xc9c900c9,0xc1c100c1, - 0xe3e300e3,0xf4f400f4,0xc7c700c7,0x9e9e009e, + 0x70700070, 0x2c2c002c, 0xb3b300b3, 0xc0c000c0, + 0xe4e400e4, 0x57570057, 0xeaea00ea, 0xaeae00ae, + 0x23230023, 0x6b6b006b, 0x45450045, 0xa5a500a5, + 0xeded00ed, 0x4f4f004f, 0x1d1d001d, 0x92920092, + 0x86860086, 0xafaf00af, 0x7c7c007c, 0x1f1f001f, + 0x3e3e003e, 0xdcdc00dc, 0x5e5e005e, 0x0b0b000b, + 0xa6a600a6, 0x39390039, 0xd5d500d5, 0x5d5d005d, + 0xd9d900d9, 0x5a5a005a, 0x51510051, 0x6c6c006c, + 0x8b8b008b, 0x9a9a009a, 0xfbfb00fb, 0xb0b000b0, + 0x74740074, 0x2b2b002b, 0xf0f000f0, 0x84840084, + 0xdfdf00df, 0xcbcb00cb, 0x34340034, 0x76760076, + 0x6d6d006d, 0xa9a900a9, 0xd1d100d1, 0x04040004, + 0x14140014, 0x3a3a003a, 0xdede00de, 0x11110011, + 0x32320032, 0x9c9c009c, 0x53530053, 0xf2f200f2, + 0xfefe00fe, 0xcfcf00cf, 0xc3c300c3, 0x7a7a007a, + 0x24240024, 0xe8e800e8, 0x60600060, 0x69690069, + 0xaaaa00aa, 0xa0a000a0, 0xa1a100a1, 0x62620062, + 0x54540054, 0x1e1e001e, 0xe0e000e0, 0x64640064, + 0x10100010, 0x00000000, 0xa3a300a3, 0x75750075, + 0x8a8a008a, 0xe6e600e6, 0x09090009, 0xdddd00dd, + 0x87870087, 0x83830083, 0xcdcd00cd, 0x90900090, + 0x73730073, 0xf6f600f6, 0x9d9d009d, 0xbfbf00bf, + 0x52520052, 0xd8d800d8, 0xc8c800c8, 0xc6c600c6, + 0x81810081, 0x6f6f006f, 0x13130013, 0x63630063, + 0xe9e900e9, 0xa7a700a7, 0x9f9f009f, 0xbcbc00bc, + 0x29290029, 0xf9f900f9, 0x2f2f002f, 0xb4b400b4, + 0x78780078, 0x06060006, 0xe7e700e7, 0x71710071, + 0xd4d400d4, 0xabab00ab, 0x88880088, 0x8d8d008d, + 0x72720072, 0xb9b900b9, 0xf8f800f8, 0xacac00ac, + 0x36360036, 0x2a2a002a, 0x3c3c003c, 0xf1f100f1, + 0x40400040, 0xd3d300d3, 0xbbbb00bb, 0x43430043, + 0x15150015, 0xadad00ad, 0x77770077, 0x80800080, + 0x82820082, 0xecec00ec, 0x27270027, 0xe5e500e5, + 0x85850085, 0x35350035, 0x0c0c000c, 0x41410041, + 0xefef00ef, 0x93930093, 0x19190019, 0x21210021, + 0x0e0e000e, 0x4e4e004e, 0x65650065, 0xbdbd00bd, + 0xb8b800b8, 0x8f8f008f, 0xebeb00eb, 0xcece00ce, + 0x30300030, 0x5f5f005f, 0xc5c500c5, 0x1a1a001a, + 0xe1e100e1, 0xcaca00ca, 0x47470047, 0x3d3d003d, + 0x01010001, 0xd6d600d6, 0x56560056, 0x4d4d004d, + 0x0d0d000d, 0x66660066, 0xcccc00cc, 0x2d2d002d, + 0x12120012, 0x20200020, 0xb1b100b1, 0x99990099, + 0x4c4c004c, 0xc2c200c2, 0x7e7e007e, 0x05050005, + 0xb7b700b7, 0x31310031, 0x17170017, 0xd7d700d7, + 0x58580058, 0x61610061, 0x1b1b001b, 0x1c1c001c, + 0x0f0f000f, 0x16160016, 0x18180018, 0x22220022, + 0x44440044, 0xb2b200b2, 0xb5b500b5, 0x91910091, + 0x08080008, 0xa8a800a8, 0xfcfc00fc, 0x50500050, + 0xd0d000d0, 0x7d7d007d, 0x89890089, 0x97970097, + 0x5b5b005b, 0x95950095, 0xffff00ff, 0xd2d200d2, + 0xc4c400c4, 0x48480048, 0xf7f700f7, 0xdbdb00db, + 0x03030003, 0xdada00da, 0x3f3f003f, 0x94940094, + 0x5c5c005c, 0x02020002, 0x4a4a004a, 0x33330033, + 0x67670067, 0xf3f300f3, 0x7f7f007f, 0xe2e200e2, + 0x9b9b009b, 0x26260026, 0x37370037, 0x3b3b003b, + 0x96960096, 0x4b4b004b, 0xbebe00be, 0x2e2e002e, + 0x79790079, 0x8c8c008c, 0x6e6e006e, 0x8e8e008e, + 0xf5f500f5, 0xb6b600b6, 0xfdfd00fd, 0x59590059, + 0x98980098, 0x6a6a006a, 0x46460046, 0xbaba00ba, + 0x25250025, 0x42420042, 0xa2a200a2, 0xfafa00fa, + 0x07070007, 0x55550055, 0xeeee00ee, 0x0a0a000a, + 0x49490049, 0x68680068, 0x38380038, 0xa4a400a4, + 0x28280028, 0x7b7b007b, 0xc9c900c9, 0xc1c100c1, + 0xe3e300e3, 0xf4f400f4, 0xc7c700c7, 0x9e9e009e, }; @@ -344,7 +344,7 @@ static const u32 camellia_sp4404[256] = { lr = (lr << bits) + (rl >> (32 - bits)); \ rl = (rl << bits) + (rr >> (32 - bits)); \ rr = (rr << bits) + (w0 >> (32 - bits)); \ - } while(0) + } while (0) #define ROLDQo32(ll, lr, rl, rr, w0, w1, bits) \ do { \ @@ -354,7 +354,7 @@ static const u32 camellia_sp4404[256] = { lr = (rl << (bits - 32)) + (rr >> (64 - bits)); \ rl = (rr << (bits - 32)) + (w0 >> (64 - bits)); \ rr = (w0 << (bits - 32)) + (w1 >> (64 - bits)); \ - } while(0) + } while (0) #define CAMELLIA_F(xl, xr, kl, kr, yl, yr, il, ir, t0, t1) \ do { \ @@ -373,7 +373,7 @@ static const u32 camellia_sp4404[256] = { yl ^= yr; \ yr = ror32(yr, 8); \ yr ^= yl; \ - } while(0) + } while (0) #define SUBKEY_L(INDEX) (subkey[(INDEX)*2]) #define SUBKEY_R(INDEX) (subkey[(INDEX)*2 + 1]) @@ -835,7 +835,7 @@ static void camellia_setup256(const unsigned char *key, u32 *subkey) static void camellia_setup192(const unsigned char *key, u32 *subkey) { unsigned char kk[32]; - u32 krll, krlr, krrl,krrr; + u32 krll, krlr, krrl, krrr; memcpy(kk, key, 24); memcpy((unsigned char *)&krll, key+16, 4); @@ -865,7 +865,7 @@ static void camellia_setup192(const unsigned char *key, u32 *subkey) t1 |= lr; \ ll ^= t1; \ rr ^= rol32(t3, 1); \ - } while(0) + } while (0) #define CAMELLIA_ROUNDSM(xl, xr, kl, kr, yl, yr, il, ir) \ do { \ @@ -881,12 +881,12 @@ static void camellia_setup192(const unsigned char *key, u32 *subkey) ir ^= il ^ kr; \ yl ^= ir; \ yr ^= ror32(il, 8) ^ ir; \ - } while(0) + } while (0) /* max = 24: 128bit encrypt, max = 32: 256bit encrypt */ static void camellia_do_encrypt(const u32 *subkey, u32 *io, unsigned max) { - u32 il,ir,t0,t1; /* temporary variables */ + u32 il, ir, t0, t1; /* temporary variables */ /* pre whitening but absorb kw2 */ io[0] ^= SUBKEY_L(0); @@ -894,30 +894,30 @@ static void camellia_do_encrypt(const u32 *subkey, u32 *io, unsigned max) /* main iteration */ #define ROUNDS(i) do { \ - CAMELLIA_ROUNDSM(io[0],io[1], \ - SUBKEY_L(i + 2),SUBKEY_R(i + 2), \ - io[2],io[3],il,ir); \ - CAMELLIA_ROUNDSM(io[2],io[3], \ - SUBKEY_L(i + 3),SUBKEY_R(i + 3), \ - io[0],io[1],il,ir); \ - CAMELLIA_ROUNDSM(io[0],io[1], \ - SUBKEY_L(i + 4),SUBKEY_R(i + 4), \ - io[2],io[3],il,ir); \ - CAMELLIA_ROUNDSM(io[2],io[3], \ - SUBKEY_L(i + 5),SUBKEY_R(i + 5), \ - io[0],io[1],il,ir); \ - CAMELLIA_ROUNDSM(io[0],io[1], \ - SUBKEY_L(i + 6),SUBKEY_R(i + 6), \ - io[2],io[3],il,ir); \ - CAMELLIA_ROUNDSM(io[2],io[3], \ - SUBKEY_L(i + 7),SUBKEY_R(i + 7), \ - io[0],io[1],il,ir); \ + CAMELLIA_ROUNDSM(io[0], io[1], \ + SUBKEY_L(i + 2), SUBKEY_R(i + 2), \ + io[2], io[3], il, ir); \ + CAMELLIA_ROUNDSM(io[2], io[3], \ + SUBKEY_L(i + 3), SUBKEY_R(i + 3), \ + io[0], io[1], il, ir); \ + CAMELLIA_ROUNDSM(io[0], io[1], \ + SUBKEY_L(i + 4), SUBKEY_R(i + 4), \ + io[2], io[3], il, ir); \ + CAMELLIA_ROUNDSM(io[2], io[3], \ + SUBKEY_L(i + 5), SUBKEY_R(i + 5), \ + io[0], io[1], il, ir); \ + CAMELLIA_ROUNDSM(io[0], io[1], \ + SUBKEY_L(i + 6), SUBKEY_R(i + 6), \ + io[2], io[3], il, ir); \ + CAMELLIA_ROUNDSM(io[2], io[3], \ + SUBKEY_L(i + 7), SUBKEY_R(i + 7), \ + io[0], io[1], il, ir); \ } while (0) #define FLS(i) do { \ - CAMELLIA_FLS(io[0],io[1],io[2],io[3], \ - SUBKEY_L(i + 0),SUBKEY_R(i + 0), \ - SUBKEY_L(i + 1),SUBKEY_R(i + 1), \ - t0,t1,il,ir); \ + CAMELLIA_FLS(io[0], io[1], io[2], io[3], \ + SUBKEY_L(i + 0), SUBKEY_R(i + 0), \ + SUBKEY_L(i + 1), SUBKEY_R(i + 1), \ + t0, t1, il, ir); \ } while (0) ROUNDS(0); @@ -941,7 +941,7 @@ static void camellia_do_encrypt(const u32 *subkey, u32 *io, unsigned max) static void camellia_do_decrypt(const u32 *subkey, u32 *io, unsigned i) { - u32 il,ir,t0,t1; /* temporary variables */ + u32 il, ir, t0, t1; /* temporary variables */ /* pre whitening but absorb kw2 */ io[0] ^= SUBKEY_L(i); @@ -949,30 +949,30 @@ static void camellia_do_decrypt(const u32 *subkey, u32 *io, unsigned i) /* main iteration */ #define ROUNDS(i) do { \ - CAMELLIA_ROUNDSM(io[0],io[1], \ - SUBKEY_L(i + 7),SUBKEY_R(i + 7), \ - io[2],io[3],il,ir); \ - CAMELLIA_ROUNDSM(io[2],io[3], \ - SUBKEY_L(i + 6),SUBKEY_R(i + 6), \ - io[0],io[1],il,ir); \ - CAMELLIA_ROUNDSM(io[0],io[1], \ - SUBKEY_L(i + 5),SUBKEY_R(i + 5), \ - io[2],io[3],il,ir); \ - CAMELLIA_ROUNDSM(io[2],io[3], \ - SUBKEY_L(i + 4),SUBKEY_R(i + 4), \ - io[0],io[1],il,ir); \ - CAMELLIA_ROUNDSM(io[0],io[1], \ - SUBKEY_L(i + 3),SUBKEY_R(i + 3), \ - io[2],io[3],il,ir); \ - CAMELLIA_ROUNDSM(io[2],io[3], \ - SUBKEY_L(i + 2),SUBKEY_R(i + 2), \ - io[0],io[1],il,ir); \ + CAMELLIA_ROUNDSM(io[0], io[1], \ + SUBKEY_L(i + 7), SUBKEY_R(i + 7), \ + io[2], io[3], il, ir); \ + CAMELLIA_ROUNDSM(io[2], io[3], \ + SUBKEY_L(i + 6), SUBKEY_R(i + 6), \ + io[0], io[1], il, ir); \ + CAMELLIA_ROUNDSM(io[0], io[1], \ + SUBKEY_L(i + 5), SUBKEY_R(i + 5), \ + io[2], io[3], il, ir); \ + CAMELLIA_ROUNDSM(io[2], io[3], \ + SUBKEY_L(i + 4), SUBKEY_R(i + 4), \ + io[0], io[1], il, ir); \ + CAMELLIA_ROUNDSM(io[0], io[1], \ + SUBKEY_L(i + 3), SUBKEY_R(i + 3), \ + io[2], io[3], il, ir); \ + CAMELLIA_ROUNDSM(io[2], io[3], \ + SUBKEY_L(i + 2), SUBKEY_R(i + 2), \ + io[0], io[1], il, ir); \ } while (0) #define FLS(i) do { \ - CAMELLIA_FLS(io[0],io[1],io[2],io[3], \ - SUBKEY_L(i + 1),SUBKEY_R(i + 1), \ - SUBKEY_L(i + 0),SUBKEY_R(i + 0), \ - t0,t1,il,ir); \ + CAMELLIA_FLS(io[0], io[1], io[2], io[3], \ + SUBKEY_L(i + 1), SUBKEY_R(i + 1), \ + SUBKEY_L(i + 0), SUBKEY_R(i + 0), \ + t0, t1, il, ir); \ } while (0) if (i == 32) { diff --git a/crypto/cast5.c b/crypto/cast5.c index 8cbe28fa0e0..a1d2294b50a 100644 --- a/crypto/cast5.c +++ b/crypto/cast5.c @@ -569,12 +569,12 @@ static const u32 sb8[256] = { 0xeaee6801, 0x8db2a283, 0xea8bf59e }; -#define F1(D,m,r) ( (I = ((m) + (D))), (I=rol32(I,(r))), \ - (((s1[I >> 24] ^ s2[(I>>16)&0xff]) - s3[(I>>8)&0xff]) + s4[I&0xff]) ) -#define F2(D,m,r) ( (I = ((m) ^ (D))), (I=rol32(I,(r))), \ - (((s1[I >> 24] - s2[(I>>16)&0xff]) + s3[(I>>8)&0xff]) ^ s4[I&0xff]) ) -#define F3(D,m,r) ( (I = ((m) - (D))), (I=rol32(I,(r))), \ - (((s1[I >> 24] + s2[(I>>16)&0xff]) ^ s3[(I>>8)&0xff]) - s4[I&0xff]) ) +#define F1(D, m, r) ((I = ((m) + (D))), (I = rol32(I, (r))), \ + (((s1[I >> 24] ^ s2[(I>>16)&0xff]) - s3[(I>>8)&0xff]) + s4[I&0xff])) +#define F2(D, m, r) ((I = ((m) ^ (D))), (I = rol32(I, (r))), \ + (((s1[I >> 24] - s2[(I>>16)&0xff]) + s3[(I>>8)&0xff]) ^ s4[I&0xff])) +#define F3(D, m, r) ((I = ((m) - (D))), (I = rol32(I, (r))), \ + (((s1[I >> 24] + s2[(I>>16)&0xff]) ^ s3[(I>>8)&0xff]) - s4[I&0xff])) static void cast5_encrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf) @@ -694,7 +694,7 @@ static void cast5_decrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf) dst[1] = cpu_to_be32(l); } -static void key_schedule(u32 * x, u32 * z, u32 * k) +static void key_schedule(u32 *x, u32 *z, u32 *k) { #define xi(i) ((x[(i)/4] >> (8*(3-((i)%4)))) & 0xff) diff --git a/crypto/cast6.c b/crypto/cast6.c index 007d02beed6..e0c15a6c7c3 100644 --- a/crypto/cast6.c +++ b/crypto/cast6.c @@ -11,7 +11,7 @@ * under the terms of GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. - * + * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA @@ -35,12 +35,12 @@ struct cast6_ctx { u8 Kr[12][4]; }; -#define F1(D,r,m) ( (I = ((m) + (D))), (I=rol32(I,(r))), \ - (((s1[I >> 24] ^ s2[(I>>16)&0xff]) - s3[(I>>8)&0xff]) + s4[I&0xff]) ) -#define F2(D,r,m) ( (I = ((m) ^ (D))), (I=rol32(I,(r))), \ - (((s1[I >> 24] - s2[(I>>16)&0xff]) + s3[(I>>8)&0xff]) ^ s4[I&0xff]) ) -#define F3(D,r,m) ( (I = ((m) - (D))), (I=rol32(I,(r))), \ - (((s1[I >> 24] + s2[(I>>16)&0xff]) ^ s3[(I>>8)&0xff]) - s4[I&0xff]) ) +#define F1(D, r, m) ((I = ((m) + (D))), (I = rol32(I, (r))), \ + (((s1[I >> 24] ^ s2[(I>>16)&0xff]) - s3[(I>>8)&0xff]) + s4[I&0xff])) +#define F2(D, r, m) ((I = ((m) ^ (D))), (I = rol32(I, (r))), \ + (((s1[I >> 24] - s2[(I>>16)&0xff]) + s3[(I>>8)&0xff]) ^ s4[I&0xff])) +#define F3(D, r, m) ((I = ((m) - (D))), (I = rol32(I, (r))), \ + (((s1[I >> 24] + s2[(I>>16)&0xff]) ^ s3[(I>>8)&0xff]) - s4[I&0xff])) static const u32 s1[256] = { 0x30fb40d4, 0x9fa0ff0b, 0x6beccd2f, 0x3f258c7a, 0x1e213f2f, @@ -312,7 +312,7 @@ static const u32 s4[256] = { static const u32 Tm[24][8] = { { 0x5a827999, 0xc95c653a, 0x383650db, 0xa7103c7c, 0x15ea281d, - 0x84c413be, 0xf39dff5f, 0x6277eb00 } , + 0x84c413be, 0xf39dff5f, 0x6277eb00 } , { 0xd151d6a1, 0x402bc242, 0xaf05ade3, 0x1ddf9984, 0x8cb98525, 0xfb9370c6, 0x6a6d5c67, 0xd9474808 } , { 0x482133a9, 0xb6fb1f4a, 0x25d50aeb, 0x94aef68c, 0x0388e22d, @@ -369,7 +369,8 @@ static const u8 Tr[4][8] = { }; /* forward octave */ -static void W(u32 *key, unsigned int i) { +static void W(u32 *key, unsigned int i) +{ u32 I; key[6] ^= F1(key[7], Tr[i % 4][0], Tm[i][0]); key[5] ^= F2(key[6], Tr[i % 4][1], Tm[i][1]); @@ -377,7 +378,7 @@ static void W(u32 *key, unsigned int i) { key[3] ^= F1(key[4], Tr[i % 4][3], Tm[i][3]); key[2] ^= F2(key[3], Tr[i % 4][4], Tm[i][4]); key[1] ^= F3(key[2], Tr[i % 4][5], Tm[i][5]); - key[0] ^= F1(key[1], Tr[i % 4][6], Tm[i][6]); + key[0] ^= F1(key[1], Tr[i % 4][6], Tm[i][6]); key[7] ^= F2(key[0], Tr[i % 4][7], Tm[i][7]); } @@ -393,11 +394,11 @@ static int cast6_setkey(struct crypto_tfm *tfm, const u8 *in_key, if (key_len % 4 != 0) { *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; return -EINVAL; - } + } + + memset(p_key, 0, 32); + memcpy(p_key, in_key, key_len); - memset (p_key, 0, 32); - memcpy (p_key, in_key, key_len); - key[0] = be32_to_cpu(p_key[0]); /* A */ key[1] = be32_to_cpu(p_key[1]); /* B */ key[2] = be32_to_cpu(p_key[2]); /* C */ @@ -406,18 +407,16 @@ static int cast6_setkey(struct crypto_tfm *tfm, const u8 *in_key, key[5] = be32_to_cpu(p_key[5]); /* F */ key[6] = be32_to_cpu(p_key[6]); /* G */ key[7] = be32_to_cpu(p_key[7]); /* H */ - - for (i = 0; i < 12; i++) { - W (key, 2 * i); - W (key, 2 * i + 1); - + W(key, 2 * i); + W(key, 2 * i + 1); + c->Kr[i][0] = key[0] & 0x1f; c->Kr[i][1] = key[2] & 0x1f; c->Kr[i][2] = key[4] & 0x1f; c->Kr[i][3] = key[6] & 0x1f; - + c->Km[i][0] = key[7]; c->Km[i][1] = key[5]; c->Km[i][2] = key[3]; @@ -428,21 +427,23 @@ static int cast6_setkey(struct crypto_tfm *tfm, const u8 *in_key, } /*forward quad round*/ -static void Q (u32 * block, u8 * Kr, u32 * Km) { +static void Q(u32 *block, u8 *Kr, u32 *Km) +{ u32 I; block[2] ^= F1(block[3], Kr[0], Km[0]); block[1] ^= F2(block[2], Kr[1], Km[1]); block[0] ^= F3(block[1], Kr[2], Km[2]); - block[3] ^= F1(block[0], Kr[3], Km[3]); + block[3] ^= F1(block[0], Kr[3], Km[3]); } /*reverse quad round*/ -static void QBAR (u32 * block, u8 * Kr, u32 * Km) { +static void QBAR(u32 *block, u8 *Kr, u32 *Km) +{ u32 I; - block[3] ^= F1(block[0], Kr[3], Km[3]); - block[0] ^= F3(block[1], Kr[2], Km[2]); - block[1] ^= F2(block[2], Kr[1], Km[1]); - block[2] ^= F1(block[3], Kr[0], Km[0]); + block[3] ^= F1(block[0], Kr[3], Km[3]); + block[0] ^= F3(block[1], Kr[2], Km[2]); + block[1] ^= F2(block[2], Kr[1], Km[1]); + block[2] ^= F1(block[3], Kr[0], Km[0]); } static void cast6_encrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf) @@ -451,64 +452,65 @@ static void cast6_encrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf) const __be32 *src = (const __be32 *)inbuf; __be32 *dst = (__be32 *)outbuf; u32 block[4]; - u32 * Km; - u8 * Kr; + u32 *Km; + u8 *Kr; block[0] = be32_to_cpu(src[0]); block[1] = be32_to_cpu(src[1]); block[2] = be32_to_cpu(src[2]); block[3] = be32_to_cpu(src[3]); - Km = c->Km[0]; Kr = c->Kr[0]; Q (block, Kr, Km); - Km = c->Km[1]; Kr = c->Kr[1]; Q (block, Kr, Km); - Km = c->Km[2]; Kr = c->Kr[2]; Q (block, Kr, Km); - Km = c->Km[3]; Kr = c->Kr[3]; Q (block, Kr, Km); - Km = c->Km[4]; Kr = c->Kr[4]; Q (block, Kr, Km); - Km = c->Km[5]; Kr = c->Kr[5]; Q (block, Kr, Km); - Km = c->Km[6]; Kr = c->Kr[6]; QBAR (block, Kr, Km); - Km = c->Km[7]; Kr = c->Kr[7]; QBAR (block, Kr, Km); - Km = c->Km[8]; Kr = c->Kr[8]; QBAR (block, Kr, Km); - Km = c->Km[9]; Kr = c->Kr[9]; QBAR (block, Kr, Km); - Km = c->Km[10]; Kr = c->Kr[10]; QBAR (block, Kr, Km); - Km = c->Km[11]; Kr = c->Kr[11]; QBAR (block, Kr, Km); + Km = c->Km[0]; Kr = c->Kr[0]; Q(block, Kr, Km); + Km = c->Km[1]; Kr = c->Kr[1]; Q(block, Kr, Km); + Km = c->Km[2]; Kr = c->Kr[2]; Q(block, Kr, Km); + Km = c->Km[3]; Kr = c->Kr[3]; Q(block, Kr, Km); + Km = c->Km[4]; Kr = c->Kr[4]; Q(block, Kr, Km); + Km = c->Km[5]; Kr = c->Kr[5]; Q(block, Kr, Km); + Km = c->Km[6]; Kr = c->Kr[6]; QBAR(block, Kr, Km); + Km = c->Km[7]; Kr = c->Kr[7]; QBAR(block, Kr, Km); + Km = c->Km[8]; Kr = c->Kr[8]; QBAR(block, Kr, Km); + Km = c->Km[9]; Kr = c->Kr[9]; QBAR(block, Kr, Km); + Km = c->Km[10]; Kr = c->Kr[10]; QBAR(block, Kr, Km); + Km = c->Km[11]; Kr = c->Kr[11]; QBAR(block, Kr, Km); dst[0] = cpu_to_be32(block[0]); dst[1] = cpu_to_be32(block[1]); dst[2] = cpu_to_be32(block[2]); dst[3] = cpu_to_be32(block[3]); -} +} -static void cast6_decrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf) { - struct cast6_ctx * c = crypto_tfm_ctx(tfm); +static void cast6_decrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf) +{ + struct cast6_ctx *c = crypto_tfm_ctx(tfm); const __be32 *src = (const __be32 *)inbuf; __be32 *dst = (__be32 *)outbuf; u32 block[4]; - u32 * Km; - u8 * Kr; + u32 *Km; + u8 *Kr; block[0] = be32_to_cpu(src[0]); block[1] = be32_to_cpu(src[1]); block[2] = be32_to_cpu(src[2]); block[3] = be32_to_cpu(src[3]); - Km = c->Km[11]; Kr = c->Kr[11]; Q (block, Kr, Km); - Km = c->Km[10]; Kr = c->Kr[10]; Q (block, Kr, Km); - Km = c->Km[9]; Kr = c->Kr[9]; Q (block, Kr, Km); - Km = c->Km[8]; Kr = c->Kr[8]; Q (block, Kr, Km); - Km = c->Km[7]; Kr = c->Kr[7]; Q (block, Kr, Km); - Km = c->Km[6]; Kr = c->Kr[6]; Q (block, Kr, Km); - Km = c->Km[5]; Kr = c->Kr[5]; QBAR (block, Kr, Km); - Km = c->Km[4]; Kr = c->Kr[4]; QBAR (block, Kr, Km); - Km = c->Km[3]; Kr = c->Kr[3]; QBAR (block, Kr, Km); - Km = c->Km[2]; Kr = c->Kr[2]; QBAR (block, Kr, Km); - Km = c->Km[1]; Kr = c->Kr[1]; QBAR (block, Kr, Km); - Km = c->Km[0]; Kr = c->Kr[0]; QBAR (block, Kr, Km); - + Km = c->Km[11]; Kr = c->Kr[11]; Q(block, Kr, Km); + Km = c->Km[10]; Kr = c->Kr[10]; Q(block, Kr, Km); + Km = c->Km[9]; Kr = c->Kr[9]; Q(block, Kr, Km); + Km = c->Km[8]; Kr = c->Kr[8]; Q(block, Kr, Km); + Km = c->Km[7]; Kr = c->Kr[7]; Q(block, Kr, Km); + Km = c->Km[6]; Kr = c->Kr[6]; Q(block, Kr, Km); + Km = c->Km[5]; Kr = c->Kr[5]; QBAR(block, Kr, Km); + Km = c->Km[4]; Kr = c->Kr[4]; QBAR(block, Kr, Km); + Km = c->Km[3]; Kr = c->Kr[3]; QBAR(block, Kr, Km); + Km = c->Km[2]; Kr = c->Kr[2]; QBAR(block, Kr, Km); + Km = c->Km[1]; Kr = c->Kr[1]; QBAR(block, Kr, Km); + Km = c->Km[0]; Kr = c->Kr[0]; QBAR(block, Kr, Km); + dst[0] = cpu_to_be32(block[0]); dst[1] = cpu_to_be32(block[1]); dst[2] = cpu_to_be32(block[2]); dst[3] = cpu_to_be32(block[3]); -} +} static struct crypto_alg alg = { .cra_name = "cast6", diff --git a/crypto/cipher.c b/crypto/cipher.c index 9a1a7316eea..39541e0e537 100644 --- a/crypto/cipher.c +++ b/crypto/cipher.c @@ -8,7 +8,7 @@ * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) + * Software Foundation; either version 2 of the License, or (at your option) * any later version. * */ diff --git a/crypto/compress.c b/crypto/compress.c index 1ee357085d3..c33f0763a95 100644 --- a/crypto/compress.c +++ b/crypto/compress.c @@ -7,7 +7,7 @@ * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) + * Software Foundation; either version 2 of the License, or (at your option) * any later version. * */ @@ -39,7 +39,7 @@ int crypto_init_compress_ops(struct crypto_tfm *tfm) ops->cot_compress = crypto_compress; ops->cot_decompress = crypto_decompress; - + return 0; } diff --git a/crypto/crc32c.c b/crypto/crc32c.c index 973bc2cfab2..de9e55c2979 100644 --- a/crypto/crc32c.c +++ b/crypto/crc32c.c @@ -1,4 +1,4 @@ -/* +/* * Cryptographic API. * * CRC32C chksum @@ -30,7 +30,7 @@ * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) + * Software Foundation; either version 2 of the License, or (at your option) * any later version. * */ @@ -142,7 +142,7 @@ static u32 crc32c(u32 crc, const u8 *data, unsigned int length) } /* - * Steps through buffer one byte at at time, calculates reflected + * Steps through buffer one byte at at time, calculates reflected * crc using table. */ diff --git a/crypto/cryptd.c b/crypto/cryptd.c index 35335825a4e..ef71318976c 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c @@ -31,7 +31,7 @@ struct cryptd_cpu_queue { }; struct cryptd_queue { - struct cryptd_cpu_queue *cpu_queue; + struct cryptd_cpu_queue __percpu *cpu_queue; }; struct cryptd_instance_ctx { @@ -99,7 +99,7 @@ static int cryptd_enqueue_request(struct cryptd_queue *queue, struct cryptd_cpu_queue *cpu_queue; cpu = get_cpu(); - cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); + cpu_queue = this_cpu_ptr(queue->cpu_queue); err = crypto_enqueue_request(&cpu_queue->queue, request); queue_work_on(cpu, kcrypto_wq, &cpu_queue->work); put_cpu(); @@ -711,6 +711,13 @@ struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm) } EXPORT_SYMBOL_GPL(cryptd_ahash_child); +struct shash_desc *cryptd_shash_desc(struct ahash_request *req) +{ + struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); + return &rctx->desc; +} +EXPORT_SYMBOL_GPL(cryptd_shash_desc); + void cryptd_free_ahash(struct cryptd_ahash *tfm) { crypto_free_ahash(&tfm->base); diff --git a/crypto/crypto_null.c b/crypto/crypto_null.c index cb71c9122bc..07a8a96d46f 100644 --- a/crypto/crypto_null.c +++ b/crypto/crypto_null.c @@ -1,11 +1,11 @@ -/* +/* * Cryptographic API. * * Null algorithms, aka Much Ado About Nothing. * * These are needed for IPsec, and may be useful in general for * testing & debugging. - * + * * The null cipher is compliant with RFC2410. * * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> @@ -163,7 +163,7 @@ MODULE_ALIAS("cipher_null"); static int __init crypto_null_mod_init(void) { int ret = 0; - + ret = crypto_register_alg(&cipher_null); if (ret < 0) goto out; @@ -180,7 +180,7 @@ static int __init crypto_null_mod_init(void) if (ret < 0) goto out_unregister_digest; -out: +out: return ret; out_unregister_digest: diff --git a/crypto/ctr.c b/crypto/ctr.c index 6c3bfabb9d1..4ca7222cfeb 100644 --- a/crypto/ctr.c +++ b/crypto/ctr.c @@ -185,7 +185,7 @@ static struct crypto_instance *crypto_ctr_alloc(struct rtattr **tb) alg = crypto_attr_alg(tb[1], CRYPTO_ALG_TYPE_CIPHER, CRYPTO_ALG_TYPE_MASK); if (IS_ERR(alg)) - return ERR_PTR(PTR_ERR(alg)); + return ERR_CAST(alg); /* Block size must be >= 4 bytes. */ err = -EINVAL; diff --git a/crypto/deflate.c b/crypto/deflate.c index 9128da44e95..463dc859aa0 100644 --- a/crypto/deflate.c +++ b/crypto/deflate.c @@ -1,14 +1,14 @@ -/* +/* * Cryptographic API. * * Deflate algorithm (RFC 1951), implemented here primarily for use * by IPCOMP (RFC 3173 & RFC 2394). * * Copyright (c) 2003 James Morris <jmorris@intercode.com.au> - * + * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) + * Software Foundation; either version 2 of the License, or (at your option) * any later version. * * FIXME: deflate transforms will require up to a total of about 436k of kernel @@ -49,7 +49,7 @@ static int deflate_comp_init(struct deflate_ctx *ctx) struct z_stream_s *stream = &ctx->comp_stream; stream->workspace = vmalloc(zlib_deflate_workspacesize()); - if (!stream->workspace ) { + if (!stream->workspace) { ret = -ENOMEM; goto out; } @@ -61,7 +61,7 @@ static int deflate_comp_init(struct deflate_ctx *ctx) ret = -EINVAL; goto out_free; } -out: +out: return ret; out_free: vfree(stream->workspace); @@ -74,7 +74,7 @@ static int deflate_decomp_init(struct deflate_ctx *ctx) struct z_stream_s *stream = &ctx->decomp_stream; stream->workspace = kzalloc(zlib_inflate_workspacesize(), GFP_KERNEL); - if (!stream->workspace ) { + if (!stream->workspace) { ret = -ENOMEM; goto out; } @@ -106,7 +106,7 @@ static int deflate_init(struct crypto_tfm *tfm) { struct deflate_ctx *ctx = crypto_tfm_ctx(tfm); int ret; - + ret = deflate_comp_init(ctx); if (ret) goto out; @@ -153,11 +153,11 @@ static int deflate_compress(struct crypto_tfm *tfm, const u8 *src, out: return ret; } - + static int deflate_decompress(struct crypto_tfm *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen) { - + int ret = 0; struct deflate_ctx *dctx = crypto_tfm_ctx(tfm); struct z_stream_s *stream = &dctx->decomp_stream; @@ -182,7 +182,7 @@ static int deflate_decompress(struct crypto_tfm *tfm, const u8 *src, if (ret == Z_OK && !stream->avail_in && stream->avail_out) { u8 zerostuff = 0; stream->next_in = &zerostuff; - stream->avail_in = 1; + stream->avail_in = 1; ret = zlib_inflate(stream, Z_FINISH); } if (ret != Z_STREAM_END) { diff --git a/crypto/des_generic.c b/crypto/des_generic.c index 5bd3ee345a6..249f903cc45 100644 --- a/crypto/des_generic.c +++ b/crypto/des_generic.c @@ -869,8 +869,7 @@ static int des3_ede_setkey(struct crypto_tfm *tfm, const u8 *key, if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) || !((K[2] ^ K[4]) | (K[3] ^ K[5]))) && - (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) - { + (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) { *flags |= CRYPTO_TFM_RES_WEAK_KEY; return -EINVAL; } diff --git a/crypto/digest.c b/crypto/digest.c deleted file mode 100644 index 5d3f1303da9..00000000000 --- a/crypto/digest.c +++ /dev/null @@ -1,240 +0,0 @@ -/* - * Cryptographic API. - * - * Digest operations. - * - * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. - * - */ - -#include <crypto/internal/hash.h> -#include <crypto/scatterwalk.h> -#include <linux/mm.h> -#include <linux/errno.h> -#include <linux/hardirq.h> -#include <linux/highmem.h> -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/scatterlist.h> - -#include "internal.h" - -static int init(struct hash_desc *desc) -{ - struct crypto_tfm *tfm = crypto_hash_tfm(desc->tfm); - - tfm->__crt_alg->cra_digest.dia_init(tfm); - return 0; -} - -static int update2(struct hash_desc *desc, - struct scatterlist *sg, unsigned int nbytes) -{ - struct crypto_tfm *tfm = crypto_hash_tfm(desc->tfm); - unsigned int alignmask = crypto_tfm_alg_alignmask(tfm); - - if (!nbytes) - return 0; - - for (;;) { - struct page *pg = sg_page(sg); - unsigned int offset = sg->offset; - unsigned int l = sg->length; - - if (unlikely(l > nbytes)) - l = nbytes; - nbytes -= l; - - do { - unsigned int bytes_from_page = min(l, ((unsigned int) - (PAGE_SIZE)) - - offset); - char *src = crypto_kmap(pg, 0); - char *p = src + offset; - - if (unlikely(offset & alignmask)) { - unsigned int bytes = - alignmask + 1 - (offset & alignmask); - bytes = min(bytes, bytes_from_page); - tfm->__crt_alg->cra_digest.dia_update(tfm, p, - bytes); - p += bytes; - bytes_from_page -= bytes; - l -= bytes; - } - tfm->__crt_alg->cra_digest.dia_update(tfm, p, - bytes_from_page); - crypto_kunmap(src, 0); - crypto_yield(desc->flags); - offset = 0; - pg++; - l -= bytes_from_page; - } while (l > 0); - - if (!nbytes) - break; - sg = scatterwalk_sg_next(sg); - } - - return 0; -} - -static int update(struct hash_desc *desc, - struct scatterlist *sg, unsigned int nbytes) -{ - if (WARN_ON_ONCE(in_irq())) - return -EDEADLK; - return update2(desc, sg, nbytes); -} - -static int final(struct hash_desc *desc, u8 *out) -{ - struct crypto_tfm *tfm = crypto_hash_tfm(desc->tfm); - unsigned long alignmask = crypto_tfm_alg_alignmask(tfm); - struct digest_alg *digest = &tfm->__crt_alg->cra_digest; - - if (unlikely((unsigned long)out & alignmask)) { - unsigned long align = alignmask + 1; - unsigned long addr = (unsigned long)crypto_tfm_ctx(tfm); - u8 *dst = (u8 *)ALIGN(addr, align) + - ALIGN(tfm->__crt_alg->cra_ctxsize, align); - - digest->dia_final(tfm, dst); - memcpy(out, dst, digest->dia_digestsize); - } else - digest->dia_final(tfm, out); - - return 0; -} - -static int nosetkey(struct crypto_hash *tfm, const u8 *key, unsigned int keylen) -{ - crypto_hash_clear_flags(tfm, CRYPTO_TFM_RES_MASK); - return -ENOSYS; -} - -static int setkey(struct crypto_hash *hash, const u8 *key, unsigned int keylen) -{ - struct crypto_tfm *tfm = crypto_hash_tfm(hash); - - crypto_hash_clear_flags(hash, CRYPTO_TFM_RES_MASK); - return tfm->__crt_alg->cra_digest.dia_setkey(tfm, key, keylen); -} - -static int digest(struct hash_desc *desc, - struct scatterlist *sg, unsigned int nbytes, u8 *out) -{ - if (WARN_ON_ONCE(in_irq())) - return -EDEADLK; - - init(desc); - update2(desc, sg, nbytes); - return final(desc, out); -} - -int crypto_init_digest_ops(struct crypto_tfm *tfm) -{ - struct hash_tfm *ops = &tfm->crt_hash; - struct digest_alg *dalg = &tfm->__crt_alg->cra_digest; - - if (dalg->dia_digestsize > PAGE_SIZE / 8) - return -EINVAL; - - ops->init = init; - ops->update = update; - ops->final = final; - ops->digest = digest; - ops->setkey = dalg->dia_setkey ? setkey : nosetkey; - ops->digestsize = dalg->dia_digestsize; - - return 0; -} - -void crypto_exit_digest_ops(struct crypto_tfm *tfm) -{ -} - -static int digest_async_nosetkey(struct crypto_ahash *tfm_async, const u8 *key, - unsigned int keylen) -{ - crypto_ahash_clear_flags(tfm_async, CRYPTO_TFM_RES_MASK); - return -ENOSYS; -} - -static int digest_async_setkey(struct crypto_ahash *tfm_async, const u8 *key, - unsigned int keylen) -{ - struct crypto_tfm *tfm = crypto_ahash_tfm(tfm_async); - struct digest_alg *dalg = &tfm->__crt_alg->cra_digest; - - crypto_ahash_clear_flags(tfm_async, CRYPTO_TFM_RES_MASK); - return dalg->dia_setkey(tfm, key, keylen); -} - -static int digest_async_init(struct ahash_request *req) -{ - struct crypto_tfm *tfm = req->base.tfm; - struct digest_alg *dalg = &tfm->__crt_alg->cra_digest; - - dalg->dia_init(tfm); - return 0; -} - -static int digest_async_update(struct ahash_request *req) -{ - struct crypto_tfm *tfm = req->base.tfm; - struct hash_desc desc = { - .tfm = __crypto_hash_cast(tfm), - .flags = req->base.flags, - }; - - update(&desc, req->src, req->nbytes); - return 0; -} - -static int digest_async_final(struct ahash_request *req) -{ - struct crypto_tfm *tfm = req->base.tfm; - struct hash_desc desc = { - .tfm = __crypto_hash_cast(tfm), - .flags = req->base.flags, - }; - - final(&desc, req->result); - return 0; -} - -static int digest_async_digest(struct ahash_request *req) -{ - struct crypto_tfm *tfm = req->base.tfm; - struct hash_desc desc = { - .tfm = __crypto_hash_cast(tfm), - .flags = req->base.flags, - }; - - return digest(&desc, req->src, req->nbytes, req->result); -} - -int crypto_init_digest_ops_async(struct crypto_tfm *tfm) -{ - struct ahash_tfm *crt = &tfm->crt_ahash; - struct digest_alg *dalg = &tfm->__crt_alg->cra_digest; - - if (dalg->dia_digestsize > PAGE_SIZE / 8) - return -EINVAL; - - crt->init = digest_async_init; - crt->update = digest_async_update; - crt->final = digest_async_final; - crt->digest = digest_async_digest; - crt->setkey = dalg->dia_setkey ? digest_async_setkey : - digest_async_nosetkey; - crt->digestsize = dalg->dia_digestsize; - - return 0; -} diff --git a/crypto/ecb.c b/crypto/ecb.c index a46838e98a7..935cfef4aa8 100644 --- a/crypto/ecb.c +++ b/crypto/ecb.c @@ -55,7 +55,7 @@ static int crypto_ecb_crypt(struct blkcipher_desc *desc, do { fn(crypto_cipher_tfm(tfm), wdst, wsrc); - + wsrc += bsize; wdst += bsize; } while ((nbytes -= bsize) >= bsize); diff --git a/crypto/fcrypt.c b/crypto/fcrypt.c index b82d61f4e26..c33107e340b 100644 --- a/crypto/fcrypt.c +++ b/crypto/fcrypt.c @@ -60,13 +60,13 @@ do { \ u32 t = lo & ((1 << n) - 1); \ lo = (lo >> n) | ((hi & ((1 << n) - 1)) << (32 - n)); \ hi = (hi >> n) | (t << (24-n)); \ -} while(0) +} while (0) /* Rotate right one 64 bit number as a 56 bit number */ #define ror56_64(k, n) \ do { \ k = (k >> n) | ((k & ((1 << n) - 1)) << (56 - n)); \ -} while(0) +} while (0) /* * Sboxes for Feistel network derived from @@ -228,7 +228,7 @@ do { \ union lc4 { __be32 l; u8 c[4]; } u; \ u.l = sched ^ R; \ L ^= sbox0[u.c[0]] ^ sbox1[u.c[1]] ^ sbox2[u.c[2]] ^ sbox3[u.c[3]]; \ -} while(0) +} while (0) /* * encryptor diff --git a/crypto/gcm.c b/crypto/gcm.c index 5fc3292483e..2f5fbba6576 100644 --- a/crypto/gcm.c +++ b/crypto/gcm.c @@ -37,10 +37,23 @@ struct crypto_rfc4106_ctx { u8 nonce[4]; }; +struct crypto_rfc4543_ctx { + struct crypto_aead *child; + u8 nonce[4]; +}; + +struct crypto_rfc4543_req_ctx { + u8 auth_tag[16]; + struct scatterlist cipher[1]; + struct scatterlist payload[2]; + struct scatterlist assoc[2]; + struct aead_request subreq; +}; + struct crypto_gcm_ghash_ctx { unsigned int cryptlen; struct scatterlist *src; - crypto_completion_t complete; + void (*complete)(struct aead_request *req, int err); }; struct crypto_gcm_req_priv_ctx { @@ -267,23 +280,26 @@ static int gcm_hash_final(struct aead_request *req, return crypto_ahash_final(ahreq); } -static void gcm_hash_final_done(struct crypto_async_request *areq, - int err) +static void __gcm_hash_final_done(struct aead_request *req, int err) { - struct aead_request *req = areq->data; struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; if (!err) crypto_xor(pctx->auth_tag, pctx->iauth_tag, 16); - gctx->complete(areq, err); + gctx->complete(req, err); } -static void gcm_hash_len_done(struct crypto_async_request *areq, - int err) +static void gcm_hash_final_done(struct crypto_async_request *areq, int err) { struct aead_request *req = areq->data; + + __gcm_hash_final_done(req, err); +} + +static void __gcm_hash_len_done(struct aead_request *req, int err) +{ struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); if (!err) { @@ -292,13 +308,18 @@ static void gcm_hash_len_done(struct crypto_async_request *areq, return; } - gcm_hash_final_done(areq, err); + __gcm_hash_final_done(req, err); } -static void gcm_hash_crypt_remain_done(struct crypto_async_request *areq, - int err) +static void gcm_hash_len_done(struct crypto_async_request *areq, int err) { struct aead_request *req = areq->data; + + __gcm_hash_len_done(req, err); +} + +static void __gcm_hash_crypt_remain_done(struct aead_request *req, int err) +{ struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); if (!err) { @@ -307,13 +328,19 @@ static void gcm_hash_crypt_remain_done(struct crypto_async_request *areq, return; } - gcm_hash_len_done(areq, err); + __gcm_hash_len_done(req, err); } -static void gcm_hash_crypt_done(struct crypto_async_request *areq, - int err) +static void gcm_hash_crypt_remain_done(struct crypto_async_request *areq, + int err) { struct aead_request *req = areq->data; + + __gcm_hash_crypt_remain_done(req, err); +} + +static void __gcm_hash_crypt_done(struct aead_request *req, int err) +{ struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; unsigned int remain; @@ -327,13 +354,18 @@ static void gcm_hash_crypt_done(struct crypto_async_request *areq, return; } - gcm_hash_crypt_remain_done(areq, err); + __gcm_hash_crypt_remain_done(req, err); } -static void gcm_hash_assoc_remain_done(struct crypto_async_request *areq, - int err) +static void gcm_hash_crypt_done(struct crypto_async_request *areq, int err) { struct aead_request *req = areq->data; + + __gcm_hash_crypt_done(req, err); +} + +static void __gcm_hash_assoc_remain_done(struct aead_request *req, int err) +{ struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; crypto_completion_t complete; @@ -350,15 +382,21 @@ static void gcm_hash_assoc_remain_done(struct crypto_async_request *areq, } if (remain) - gcm_hash_crypt_done(areq, err); + __gcm_hash_crypt_done(req, err); else - gcm_hash_crypt_remain_done(areq, err); + __gcm_hash_crypt_remain_done(req, err); } -static void gcm_hash_assoc_done(struct crypto_async_request *areq, - int err) +static void gcm_hash_assoc_remain_done(struct crypto_async_request *areq, + int err) { struct aead_request *req = areq->data; + + __gcm_hash_assoc_remain_done(req, err); +} + +static void __gcm_hash_assoc_done(struct aead_request *req, int err) +{ struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); unsigned int remain; @@ -371,13 +409,18 @@ static void gcm_hash_assoc_done(struct crypto_async_request *areq, return; } - gcm_hash_assoc_remain_done(areq, err); + __gcm_hash_assoc_remain_done(req, err); } -static void gcm_hash_init_done(struct crypto_async_request *areq, - int err) +static void gcm_hash_assoc_done(struct crypto_async_request *areq, int err) { struct aead_request *req = areq->data; + + __gcm_hash_assoc_done(req, err); +} + +static void __gcm_hash_init_done(struct aead_request *req, int err) +{ struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); crypto_completion_t complete; unsigned int remain = 0; @@ -393,9 +436,16 @@ static void gcm_hash_init_done(struct crypto_async_request *areq, } if (remain) - gcm_hash_assoc_done(areq, err); + __gcm_hash_assoc_done(req, err); else - gcm_hash_assoc_remain_done(areq, err); + __gcm_hash_assoc_remain_done(req, err); +} + +static void gcm_hash_init_done(struct crypto_async_request *areq, int err) +{ + struct aead_request *req = areq->data; + + __gcm_hash_init_done(req, err); } static int gcm_hash(struct aead_request *req, @@ -457,10 +507,8 @@ static void gcm_enc_copy_hash(struct aead_request *req, crypto_aead_authsize(aead), 1); } -static void gcm_enc_hash_done(struct crypto_async_request *areq, - int err) +static void gcm_enc_hash_done(struct aead_request *req, int err) { - struct aead_request *req = areq->data; struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); if (!err) @@ -469,8 +517,7 @@ static void gcm_enc_hash_done(struct crypto_async_request *areq, aead_request_complete(req, err); } -static void gcm_encrypt_done(struct crypto_async_request *areq, - int err) +static void gcm_encrypt_done(struct crypto_async_request *areq, int err) { struct aead_request *req = areq->data; struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); @@ -479,9 +526,13 @@ static void gcm_encrypt_done(struct crypto_async_request *areq, err = gcm_hash(req, pctx); if (err == -EINPROGRESS || err == -EBUSY) return; + else if (!err) { + crypto_xor(pctx->auth_tag, pctx->iauth_tag, 16); + gcm_enc_copy_hash(req, pctx); + } } - gcm_enc_hash_done(areq, err); + aead_request_complete(req, err); } static int crypto_gcm_encrypt(struct aead_request *req) @@ -538,9 +589,8 @@ static void gcm_decrypt_done(struct crypto_async_request *areq, int err) aead_request_complete(req, err); } -static void gcm_dec_hash_done(struct crypto_async_request *areq, int err) +static void gcm_dec_hash_done(struct aead_request *req, int err) { - struct aead_request *req = areq->data; struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); struct ablkcipher_request *abreq = &pctx->u.abreq; struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; @@ -552,9 +602,11 @@ static void gcm_dec_hash_done(struct crypto_async_request *areq, int err) err = crypto_ablkcipher_decrypt(abreq); if (err == -EINPROGRESS || err == -EBUSY) return; + else if (!err) + err = crypto_gcm_verify(req, pctx); } - gcm_decrypt_done(areq, err); + aead_request_complete(req, err); } static int crypto_gcm_decrypt(struct aead_request *req) @@ -1008,6 +1060,272 @@ static struct crypto_template crypto_rfc4106_tmpl = { .module = THIS_MODULE, }; +static inline struct crypto_rfc4543_req_ctx *crypto_rfc4543_reqctx( + struct aead_request *req) +{ + unsigned long align = crypto_aead_alignmask(crypto_aead_reqtfm(req)); + + return (void *)PTR_ALIGN((u8 *)aead_request_ctx(req), align + 1); +} + +static int crypto_rfc4543_setkey(struct crypto_aead *parent, const u8 *key, + unsigned int keylen) +{ + struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(parent); + struct crypto_aead *child = ctx->child; + int err; + + if (keylen < 4) + return -EINVAL; + + keylen -= 4; + memcpy(ctx->nonce, key + keylen, 4); + + crypto_aead_clear_flags(child, CRYPTO_TFM_REQ_MASK); + crypto_aead_set_flags(child, crypto_aead_get_flags(parent) & + CRYPTO_TFM_REQ_MASK); + err = crypto_aead_setkey(child, key, keylen); + crypto_aead_set_flags(parent, crypto_aead_get_flags(child) & + CRYPTO_TFM_RES_MASK); + + return err; +} + +static int crypto_rfc4543_setauthsize(struct crypto_aead *parent, + unsigned int authsize) +{ + struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(parent); + + if (authsize != 16) + return -EINVAL; + + return crypto_aead_setauthsize(ctx->child, authsize); +} + +/* this is the same as crypto_authenc_chain */ +static void crypto_rfc4543_chain(struct scatterlist *head, + struct scatterlist *sg, int chain) +{ + if (chain) { + head->length += sg->length; + sg = scatterwalk_sg_next(sg); + } + + if (sg) + scatterwalk_sg_chain(head, 2, sg); + else + sg_mark_end(head); +} + +static struct aead_request *crypto_rfc4543_crypt(struct aead_request *req, + int enc) +{ + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(aead); + struct crypto_rfc4543_req_ctx *rctx = crypto_rfc4543_reqctx(req); + struct aead_request *subreq = &rctx->subreq; + struct scatterlist *dst = req->dst; + struct scatterlist *cipher = rctx->cipher; + struct scatterlist *payload = rctx->payload; + struct scatterlist *assoc = rctx->assoc; + unsigned int authsize = crypto_aead_authsize(aead); + unsigned int assoclen = req->assoclen; + struct page *dstp; + u8 *vdst; + u8 *iv = PTR_ALIGN((u8 *)(rctx + 1) + crypto_aead_reqsize(ctx->child), + crypto_aead_alignmask(ctx->child) + 1); + + memcpy(iv, ctx->nonce, 4); + memcpy(iv + 4, req->iv, 8); + + /* construct cipher/plaintext */ + if (enc) + memset(rctx->auth_tag, 0, authsize); + else + scatterwalk_map_and_copy(rctx->auth_tag, dst, + req->cryptlen - authsize, + authsize, 0); + + sg_init_one(cipher, rctx->auth_tag, authsize); + + /* construct the aad */ + dstp = sg_page(dst); + vdst = PageHighMem(dstp) ? NULL : page_address(dstp) + dst->offset; + + sg_init_table(payload, 2); + sg_set_buf(payload, req->iv, 8); + crypto_rfc4543_chain(payload, dst, vdst == req->iv + 8); + assoclen += 8 + req->cryptlen - (enc ? 0 : authsize); + + sg_init_table(assoc, 2); + sg_set_page(assoc, sg_page(req->assoc), req->assoc->length, + req->assoc->offset); + crypto_rfc4543_chain(assoc, payload, 0); + + aead_request_set_tfm(subreq, ctx->child); + aead_request_set_callback(subreq, req->base.flags, req->base.complete, + req->base.data); + aead_request_set_crypt(subreq, cipher, cipher, enc ? 0 : authsize, iv); + aead_request_set_assoc(subreq, assoc, assoclen); + + return subreq; +} + +static int crypto_rfc4543_encrypt(struct aead_request *req) +{ + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct crypto_rfc4543_req_ctx *rctx = crypto_rfc4543_reqctx(req); + struct aead_request *subreq; + int err; + + subreq = crypto_rfc4543_crypt(req, 1); + err = crypto_aead_encrypt(subreq); + if (err) + return err; + + scatterwalk_map_and_copy(rctx->auth_tag, req->dst, req->cryptlen, + crypto_aead_authsize(aead), 1); + + return 0; +} + +static int crypto_rfc4543_decrypt(struct aead_request *req) +{ + req = crypto_rfc4543_crypt(req, 0); + + return crypto_aead_decrypt(req); +} + +static int crypto_rfc4543_init_tfm(struct crypto_tfm *tfm) +{ + struct crypto_instance *inst = (void *)tfm->__crt_alg; + struct crypto_aead_spawn *spawn = crypto_instance_ctx(inst); + struct crypto_rfc4543_ctx *ctx = crypto_tfm_ctx(tfm); + struct crypto_aead *aead; + unsigned long align; + + aead = crypto_spawn_aead(spawn); + if (IS_ERR(aead)) + return PTR_ERR(aead); + + ctx->child = aead; + + align = crypto_aead_alignmask(aead); + align &= ~(crypto_tfm_ctx_alignment() - 1); + tfm->crt_aead.reqsize = sizeof(struct crypto_rfc4543_req_ctx) + + ALIGN(crypto_aead_reqsize(aead), + crypto_tfm_ctx_alignment()) + + align + 16; + + return 0; +} + +static void crypto_rfc4543_exit_tfm(struct crypto_tfm *tfm) +{ + struct crypto_rfc4543_ctx *ctx = crypto_tfm_ctx(tfm); + + crypto_free_aead(ctx->child); +} + +static struct crypto_instance *crypto_rfc4543_alloc(struct rtattr **tb) +{ + struct crypto_attr_type *algt; + struct crypto_instance *inst; + struct crypto_aead_spawn *spawn; + struct crypto_alg *alg; + const char *ccm_name; + int err; + + algt = crypto_get_attr_type(tb); + err = PTR_ERR(algt); + if (IS_ERR(algt)) + return ERR_PTR(err); + + if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) + return ERR_PTR(-EINVAL); + + ccm_name = crypto_attr_alg_name(tb[1]); + err = PTR_ERR(ccm_name); + if (IS_ERR(ccm_name)) + return ERR_PTR(err); + + inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); + if (!inst) + return ERR_PTR(-ENOMEM); + + spawn = crypto_instance_ctx(inst); + crypto_set_aead_spawn(spawn, inst); + err = crypto_grab_aead(spawn, ccm_name, 0, + crypto_requires_sync(algt->type, algt->mask)); + if (err) + goto out_free_inst; + + alg = crypto_aead_spawn_alg(spawn); + + err = -EINVAL; + + /* We only support 16-byte blocks. */ + if (alg->cra_aead.ivsize != 16) + goto out_drop_alg; + + /* Not a stream cipher? */ + if (alg->cra_blocksize != 1) + goto out_drop_alg; + + err = -ENAMETOOLONG; + if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, + "rfc4543(%s)", alg->cra_name) >= CRYPTO_MAX_ALG_NAME || + snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, + "rfc4543(%s)", alg->cra_driver_name) >= + CRYPTO_MAX_ALG_NAME) + goto out_drop_alg; + + inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD; + inst->alg.cra_flags |= alg->cra_flags & CRYPTO_ALG_ASYNC; + inst->alg.cra_priority = alg->cra_priority; + inst->alg.cra_blocksize = 1; + inst->alg.cra_alignmask = alg->cra_alignmask; + inst->alg.cra_type = &crypto_nivaead_type; + + inst->alg.cra_aead.ivsize = 8; + inst->alg.cra_aead.maxauthsize = 16; + + inst->alg.cra_ctxsize = sizeof(struct crypto_rfc4543_ctx); + + inst->alg.cra_init = crypto_rfc4543_init_tfm; + inst->alg.cra_exit = crypto_rfc4543_exit_tfm; + + inst->alg.cra_aead.setkey = crypto_rfc4543_setkey; + inst->alg.cra_aead.setauthsize = crypto_rfc4543_setauthsize; + inst->alg.cra_aead.encrypt = crypto_rfc4543_encrypt; + inst->alg.cra_aead.decrypt = crypto_rfc4543_decrypt; + + inst->alg.cra_aead.geniv = "seqiv"; + +out: + return inst; + +out_drop_alg: + crypto_drop_aead(spawn); +out_free_inst: + kfree(inst); + inst = ERR_PTR(err); + goto out; +} + +static void crypto_rfc4543_free(struct crypto_instance *inst) +{ + crypto_drop_spawn(crypto_instance_ctx(inst)); + kfree(inst); +} + +static struct crypto_template crypto_rfc4543_tmpl = { + .name = "rfc4543", + .alloc = crypto_rfc4543_alloc, + .free = crypto_rfc4543_free, + .module = THIS_MODULE, +}; + static int __init crypto_gcm_module_init(void) { int err; @@ -1028,8 +1346,14 @@ static int __init crypto_gcm_module_init(void) if (err) goto out_undo_gcm; + err = crypto_register_template(&crypto_rfc4543_tmpl); + if (err) + goto out_undo_rfc4106; + return 0; +out_undo_rfc4106: + crypto_unregister_template(&crypto_rfc4106_tmpl); out_undo_gcm: crypto_unregister_template(&crypto_gcm_tmpl); out_undo_base: @@ -1042,6 +1366,7 @@ out: static void __exit crypto_gcm_module_exit(void) { kfree(gcm_zeroes); + crypto_unregister_template(&crypto_rfc4543_tmpl); crypto_unregister_template(&crypto_rfc4106_tmpl); crypto_unregister_template(&crypto_gcm_tmpl); crypto_unregister_template(&crypto_gcm_base_tmpl); @@ -1055,3 +1380,4 @@ MODULE_DESCRIPTION("Galois/Counter Mode"); MODULE_AUTHOR("Mikko Herranen <mh1@iki.fi>"); MODULE_ALIAS("gcm_base"); MODULE_ALIAS("rfc4106"); +MODULE_ALIAS("rfc4543"); diff --git a/crypto/hash.c b/crypto/hash.c deleted file mode 100644 index cb86b19fd10..00000000000 --- a/crypto/hash.c +++ /dev/null @@ -1,183 +0,0 @@ -/* - * Cryptographic Hash operations. - * - * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. - */ - -#include <crypto/internal/hash.h> -#include <linux/errno.h> -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/slab.h> -#include <linux/seq_file.h> - -#include "internal.h" - -static unsigned int crypto_hash_ctxsize(struct crypto_alg *alg, u32 type, - u32 mask) -{ - return alg->cra_ctxsize; -} - -static int hash_setkey_unaligned(struct crypto_hash *crt, const u8 *key, - unsigned int keylen) -{ - struct crypto_tfm *tfm = crypto_hash_tfm(crt); - struct hash_alg *alg = &tfm->__crt_alg->cra_hash; - unsigned long alignmask = crypto_hash_alignmask(crt); - int ret; - u8 *buffer, *alignbuffer; - unsigned long absize; - - absize = keylen + alignmask; - buffer = kmalloc(absize, GFP_ATOMIC); - if (!buffer) - return -ENOMEM; - - alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); - memcpy(alignbuffer, key, keylen); - ret = alg->setkey(crt, alignbuffer, keylen); - memset(alignbuffer, 0, keylen); - kfree(buffer); - return ret; -} - -static int hash_setkey(struct crypto_hash *crt, const u8 *key, - unsigned int keylen) -{ - struct crypto_tfm *tfm = crypto_hash_tfm(crt); - struct hash_alg *alg = &tfm->__crt_alg->cra_hash; - unsigned long alignmask = crypto_hash_alignmask(crt); - - if ((unsigned long)key & alignmask) - return hash_setkey_unaligned(crt, key, keylen); - - return alg->setkey(crt, key, keylen); -} - -static int hash_async_setkey(struct crypto_ahash *tfm_async, const u8 *key, - unsigned int keylen) -{ - struct crypto_tfm *tfm = crypto_ahash_tfm(tfm_async); - struct crypto_hash *tfm_hash = __crypto_hash_cast(tfm); - struct hash_alg *alg = &tfm->__crt_alg->cra_hash; - - return alg->setkey(tfm_hash, key, keylen); -} - -static int hash_async_init(struct ahash_request *req) -{ - struct crypto_tfm *tfm = req->base.tfm; - struct hash_alg *alg = &tfm->__crt_alg->cra_hash; - struct hash_desc desc = { - .tfm = __crypto_hash_cast(tfm), - .flags = req->base.flags, - }; - - return alg->init(&desc); -} - -static int hash_async_update(struct ahash_request *req) -{ - struct crypto_tfm *tfm = req->base.tfm; - struct hash_alg *alg = &tfm->__crt_alg->cra_hash; - struct hash_desc desc = { - .tfm = __crypto_hash_cast(tfm), - .flags = req->base.flags, - }; - - return alg->update(&desc, req->src, req->nbytes); -} - -static int hash_async_final(struct ahash_request *req) -{ - struct crypto_tfm *tfm = req->base.tfm; - struct hash_alg *alg = &tfm->__crt_alg->cra_hash; - struct hash_desc desc = { - .tfm = __crypto_hash_cast(tfm), - .flags = req->base.flags, - }; - - return alg->final(&desc, req->result); -} - -static int hash_async_digest(struct ahash_request *req) -{ - struct crypto_tfm *tfm = req->base.tfm; - struct hash_alg *alg = &tfm->__crt_alg->cra_hash; - struct hash_desc desc = { - .tfm = __crypto_hash_cast(tfm), - .flags = req->base.flags, - }; - - return alg->digest(&desc, req->src, req->nbytes, req->result); -} - -static int crypto_init_hash_ops_async(struct crypto_tfm *tfm) -{ - struct ahash_tfm *crt = &tfm->crt_ahash; - struct hash_alg *alg = &tfm->__crt_alg->cra_hash; - - crt->init = hash_async_init; - crt->update = hash_async_update; - crt->final = hash_async_final; - crt->digest = hash_async_digest; - crt->setkey = hash_async_setkey; - crt->digestsize = alg->digestsize; - - return 0; -} - -static int crypto_init_hash_ops_sync(struct crypto_tfm *tfm) -{ - struct hash_tfm *crt = &tfm->crt_hash; - struct hash_alg *alg = &tfm->__crt_alg->cra_hash; - - crt->init = alg->init; - crt->update = alg->update; - crt->final = alg->final; - crt->digest = alg->digest; - crt->setkey = hash_setkey; - crt->digestsize = alg->digestsize; - - return 0; -} - -static int crypto_init_hash_ops(struct crypto_tfm *tfm, u32 type, u32 mask) -{ - struct hash_alg *alg = &tfm->__crt_alg->cra_hash; - - if (alg->digestsize > PAGE_SIZE / 8) - return -EINVAL; - - if ((mask & CRYPTO_ALG_TYPE_HASH_MASK) != CRYPTO_ALG_TYPE_HASH_MASK) - return crypto_init_hash_ops_async(tfm); - else - return crypto_init_hash_ops_sync(tfm); -} - -static void crypto_hash_show(struct seq_file *m, struct crypto_alg *alg) - __attribute__ ((unused)); -static void crypto_hash_show(struct seq_file *m, struct crypto_alg *alg) -{ - seq_printf(m, "type : hash\n"); - seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); - seq_printf(m, "digestsize : %u\n", alg->cra_hash.digestsize); -} - -const struct crypto_type crypto_hash_type = { - .ctxsize = crypto_hash_ctxsize, - .init = crypto_init_hash_ops, -#ifdef CONFIG_PROC_FS - .show = crypto_hash_show, -#endif -}; -EXPORT_SYMBOL_GPL(crypto_hash_type); - -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("Generic cryptographic hash type"); diff --git a/crypto/hmac.c b/crypto/hmac.c index 15c2eb53454..8d9544cf816 100644 --- a/crypto/hmac.c +++ b/crypto/hmac.c @@ -23,7 +23,6 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/scatterlist.h> -#include <linux/slab.h> #include <linux/string.h> struct hmac_ctx { diff --git a/crypto/internal.h b/crypto/internal.h index 2d226362e59..d4384b08ab2 100644 --- a/crypto/internal.h +++ b/crypto/internal.h @@ -6,7 +6,7 @@ * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) + * Software Foundation; either version 2 of the License, or (at your option) * any later version. * */ diff --git a/crypto/md5.c b/crypto/md5.c index 83eb5296175..30efc7dad89 100644 --- a/crypto/md5.c +++ b/crypto/md5.c @@ -16,17 +16,13 @@ * */ #include <crypto/internal/hash.h> +#include <crypto/md5.h> #include <linux/init.h> #include <linux/module.h> #include <linux/string.h> #include <linux/types.h> #include <asm/byteorder.h> -#define MD5_DIGEST_SIZE 16 -#define MD5_HMAC_BLOCK_SIZE 64 -#define MD5_BLOCK_WORDS 16 -#define MD5_HASH_WORDS 4 - #define F1(x, y, z) (z ^ (x & (y ^ z))) #define F2(x, y, z) F1(z, x, y) #define F3(x, y, z) (x ^ y ^ z) @@ -35,12 +31,6 @@ #define MD5STEP(f, w, x, y, z, in, s) \ (w += f(x, y, z) + in, w = (w<<s | w>>(32-s)) + x) -struct md5_ctx { - u32 hash[MD5_HASH_WORDS]; - u32 block[MD5_BLOCK_WORDS]; - u64 byte_count; -}; - static void md5_transform(u32 *hash, u32 const *in) { u32 a, b, c, d; @@ -141,7 +131,7 @@ static inline void cpu_to_le32_array(u32 *buf, unsigned int words) } } -static inline void md5_transform_helper(struct md5_ctx *ctx) +static inline void md5_transform_helper(struct md5_state *ctx) { le32_to_cpu_array(ctx->block, sizeof(ctx->block) / sizeof(u32)); md5_transform(ctx->hash, ctx->block); @@ -149,7 +139,7 @@ static inline void md5_transform_helper(struct md5_ctx *ctx) static int md5_init(struct shash_desc *desc) { - struct md5_ctx *mctx = shash_desc_ctx(desc); + struct md5_state *mctx = shash_desc_ctx(desc); mctx->hash[0] = 0x67452301; mctx->hash[1] = 0xefcdab89; @@ -162,7 +152,7 @@ static int md5_init(struct shash_desc *desc) static int md5_update(struct shash_desc *desc, const u8 *data, unsigned int len) { - struct md5_ctx *mctx = shash_desc_ctx(desc); + struct md5_state *mctx = shash_desc_ctx(desc); const u32 avail = sizeof(mctx->block) - (mctx->byte_count & 0x3f); mctx->byte_count += len; @@ -194,7 +184,7 @@ static int md5_update(struct shash_desc *desc, const u8 *data, unsigned int len) static int md5_final(struct shash_desc *desc, u8 *out) { - struct md5_ctx *mctx = shash_desc_ctx(desc); + struct md5_state *mctx = shash_desc_ctx(desc); const unsigned int offset = mctx->byte_count & 0x3f; char *p = (char *)mctx->block + offset; int padding = 56 - (offset + 1); @@ -220,12 +210,31 @@ static int md5_final(struct shash_desc *desc, u8 *out) return 0; } +static int md5_export(struct shash_desc *desc, void *out) +{ + struct md5_state *ctx = shash_desc_ctx(desc); + + memcpy(out, ctx, sizeof(*ctx)); + return 0; +} + +static int md5_import(struct shash_desc *desc, const void *in) +{ + struct md5_state *ctx = shash_desc_ctx(desc); + + memcpy(ctx, in, sizeof(*ctx)); + return 0; +} + static struct shash_alg alg = { .digestsize = MD5_DIGEST_SIZE, .init = md5_init, .update = md5_update, .final = md5_final, - .descsize = sizeof(struct md5_ctx), + .export = md5_export, + .import = md5_import, + .descsize = sizeof(struct md5_state), + .statesize = sizeof(struct md5_state), .base = { .cra_name = "md5", .cra_flags = CRYPTO_ALG_TYPE_SHASH, diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c new file mode 100644 index 00000000000..de3078215fe --- /dev/null +++ b/crypto/pcrypt.c @@ -0,0 +1,567 @@ +/* + * pcrypt - Parallel crypto wrapper. + * + * Copyright (C) 2009 secunet Security Networks AG + * Copyright (C) 2009 Steffen Klassert <steffen.klassert@secunet.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#include <crypto/algapi.h> +#include <crypto/internal/aead.h> +#include <linux/err.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/notifier.h> +#include <linux/kobject.h> +#include <linux/cpu.h> +#include <crypto/pcrypt.h> + +struct padata_pcrypt { + struct padata_instance *pinst; + struct workqueue_struct *wq; + + /* + * Cpumask for callback CPUs. It should be + * equal to serial cpumask of corresponding padata instance, + * so it is updated when padata notifies us about serial + * cpumask change. + * + * cb_cpumask is protected by RCU. This fact prevents us from + * using cpumask_var_t directly because the actual type of + * cpumsak_var_t depends on kernel configuration(particularly on + * CONFIG_CPUMASK_OFFSTACK macro). Depending on the configuration + * cpumask_var_t may be either a pointer to the struct cpumask + * or a variable allocated on the stack. Thus we can not safely use + * cpumask_var_t with RCU operations such as rcu_assign_pointer or + * rcu_dereference. So cpumask_var_t is wrapped with struct + * pcrypt_cpumask which makes possible to use it with RCU. + */ + struct pcrypt_cpumask { + cpumask_var_t mask; + } *cb_cpumask; + struct notifier_block nblock; +}; + +static struct padata_pcrypt pencrypt; +static struct padata_pcrypt pdecrypt; +static struct kset *pcrypt_kset; + +struct pcrypt_instance_ctx { + struct crypto_spawn spawn; + unsigned int tfm_count; +}; + +struct pcrypt_aead_ctx { + struct crypto_aead *child; + unsigned int cb_cpu; +}; + +static int pcrypt_do_parallel(struct padata_priv *padata, unsigned int *cb_cpu, + struct padata_pcrypt *pcrypt) +{ + unsigned int cpu_index, cpu, i; + struct pcrypt_cpumask *cpumask; + + cpu = *cb_cpu; + + rcu_read_lock_bh(); + cpumask = rcu_dereference(pcrypt->cb_cpumask); + if (cpumask_test_cpu(cpu, cpumask->mask)) + goto out; + + if (!cpumask_weight(cpumask->mask)) + goto out; + + cpu_index = cpu % cpumask_weight(cpumask->mask); + + cpu = cpumask_first(cpumask->mask); + for (i = 0; i < cpu_index; i++) + cpu = cpumask_next(cpu, cpumask->mask); + + *cb_cpu = cpu; + +out: + rcu_read_unlock_bh(); + return padata_do_parallel(pcrypt->pinst, padata, cpu); +} + +static int pcrypt_aead_setkey(struct crypto_aead *parent, + const u8 *key, unsigned int keylen) +{ + struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(parent); + + return crypto_aead_setkey(ctx->child, key, keylen); +} + +static int pcrypt_aead_setauthsize(struct crypto_aead *parent, + unsigned int authsize) +{ + struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(parent); + + return crypto_aead_setauthsize(ctx->child, authsize); +} + +static void pcrypt_aead_serial(struct padata_priv *padata) +{ + struct pcrypt_request *preq = pcrypt_padata_request(padata); + struct aead_request *req = pcrypt_request_ctx(preq); + + aead_request_complete(req->base.data, padata->info); +} + +static void pcrypt_aead_giv_serial(struct padata_priv *padata) +{ + struct pcrypt_request *preq = pcrypt_padata_request(padata); + struct aead_givcrypt_request *req = pcrypt_request_ctx(preq); + + aead_request_complete(req->areq.base.data, padata->info); +} + +static void pcrypt_aead_done(struct crypto_async_request *areq, int err) +{ + struct aead_request *req = areq->data; + struct pcrypt_request *preq = aead_request_ctx(req); + struct padata_priv *padata = pcrypt_request_padata(preq); + + padata->info = err; + req->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; + + padata_do_serial(padata); +} + +static void pcrypt_aead_enc(struct padata_priv *padata) +{ + struct pcrypt_request *preq = pcrypt_padata_request(padata); + struct aead_request *req = pcrypt_request_ctx(preq); + + padata->info = crypto_aead_encrypt(req); + + if (padata->info == -EINPROGRESS) + return; + + padata_do_serial(padata); +} + +static int pcrypt_aead_encrypt(struct aead_request *req) +{ + int err; + struct pcrypt_request *preq = aead_request_ctx(req); + struct aead_request *creq = pcrypt_request_ctx(preq); + struct padata_priv *padata = pcrypt_request_padata(preq); + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead); + u32 flags = aead_request_flags(req); + + memset(padata, 0, sizeof(struct padata_priv)); + + padata->parallel = pcrypt_aead_enc; + padata->serial = pcrypt_aead_serial; + + aead_request_set_tfm(creq, ctx->child); + aead_request_set_callback(creq, flags & ~CRYPTO_TFM_REQ_MAY_SLEEP, + pcrypt_aead_done, req); + aead_request_set_crypt(creq, req->src, req->dst, + req->cryptlen, req->iv); + aead_request_set_assoc(creq, req->assoc, req->assoclen); + + err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pencrypt); + if (!err) + return -EINPROGRESS; + + return err; +} + +static void pcrypt_aead_dec(struct padata_priv *padata) +{ + struct pcrypt_request *preq = pcrypt_padata_request(padata); + struct aead_request *req = pcrypt_request_ctx(preq); + + padata->info = crypto_aead_decrypt(req); + + if (padata->info == -EINPROGRESS) + return; + + padata_do_serial(padata); +} + +static int pcrypt_aead_decrypt(struct aead_request *req) +{ + int err; + struct pcrypt_request *preq = aead_request_ctx(req); + struct aead_request *creq = pcrypt_request_ctx(preq); + struct padata_priv *padata = pcrypt_request_padata(preq); + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead); + u32 flags = aead_request_flags(req); + + memset(padata, 0, sizeof(struct padata_priv)); + + padata->parallel = pcrypt_aead_dec; + padata->serial = pcrypt_aead_serial; + + aead_request_set_tfm(creq, ctx->child); + aead_request_set_callback(creq, flags & ~CRYPTO_TFM_REQ_MAY_SLEEP, + pcrypt_aead_done, req); + aead_request_set_crypt(creq, req->src, req->dst, + req->cryptlen, req->iv); + aead_request_set_assoc(creq, req->assoc, req->assoclen); + + err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pdecrypt); + if (!err) + return -EINPROGRESS; + + return err; +} + +static void pcrypt_aead_givenc(struct padata_priv *padata) +{ + struct pcrypt_request *preq = pcrypt_padata_request(padata); + struct aead_givcrypt_request *req = pcrypt_request_ctx(preq); + + padata->info = crypto_aead_givencrypt(req); + + if (padata->info == -EINPROGRESS) + return; + + padata_do_serial(padata); +} + +static int pcrypt_aead_givencrypt(struct aead_givcrypt_request *req) +{ + int err; + struct aead_request *areq = &req->areq; + struct pcrypt_request *preq = aead_request_ctx(areq); + struct aead_givcrypt_request *creq = pcrypt_request_ctx(preq); + struct padata_priv *padata = pcrypt_request_padata(preq); + struct crypto_aead *aead = aead_givcrypt_reqtfm(req); + struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead); + u32 flags = aead_request_flags(areq); + + memset(padata, 0, sizeof(struct padata_priv)); + + padata->parallel = pcrypt_aead_givenc; + padata->serial = pcrypt_aead_giv_serial; + + aead_givcrypt_set_tfm(creq, ctx->child); + aead_givcrypt_set_callback(creq, flags & ~CRYPTO_TFM_REQ_MAY_SLEEP, + pcrypt_aead_done, areq); + aead_givcrypt_set_crypt(creq, areq->src, areq->dst, + areq->cryptlen, areq->iv); + aead_givcrypt_set_assoc(creq, areq->assoc, areq->assoclen); + aead_givcrypt_set_giv(creq, req->giv, req->seq); + + err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pencrypt); + if (!err) + return -EINPROGRESS; + + return err; +} + +static int pcrypt_aead_init_tfm(struct crypto_tfm *tfm) +{ + int cpu, cpu_index; + struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); + struct pcrypt_instance_ctx *ictx = crypto_instance_ctx(inst); + struct pcrypt_aead_ctx *ctx = crypto_tfm_ctx(tfm); + struct crypto_aead *cipher; + + ictx->tfm_count++; + + cpu_index = ictx->tfm_count % cpumask_weight(cpu_active_mask); + + ctx->cb_cpu = cpumask_first(cpu_active_mask); + for (cpu = 0; cpu < cpu_index; cpu++) + ctx->cb_cpu = cpumask_next(ctx->cb_cpu, cpu_active_mask); + + cipher = crypto_spawn_aead(crypto_instance_ctx(inst)); + + if (IS_ERR(cipher)) + return PTR_ERR(cipher); + + ctx->child = cipher; + tfm->crt_aead.reqsize = sizeof(struct pcrypt_request) + + sizeof(struct aead_givcrypt_request) + + crypto_aead_reqsize(cipher); + + return 0; +} + +static void pcrypt_aead_exit_tfm(struct crypto_tfm *tfm) +{ + struct pcrypt_aead_ctx *ctx = crypto_tfm_ctx(tfm); + + crypto_free_aead(ctx->child); +} + +static struct crypto_instance *pcrypt_alloc_instance(struct crypto_alg *alg) +{ + struct crypto_instance *inst; + struct pcrypt_instance_ctx *ctx; + int err; + + inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); + if (!inst) { + inst = ERR_PTR(-ENOMEM); + goto out; + } + + err = -ENAMETOOLONG; + if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, + "pcrypt(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) + goto out_free_inst; + + memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME); + + ctx = crypto_instance_ctx(inst); + err = crypto_init_spawn(&ctx->spawn, alg, inst, + CRYPTO_ALG_TYPE_MASK); + if (err) + goto out_free_inst; + + inst->alg.cra_priority = alg->cra_priority + 100; + inst->alg.cra_blocksize = alg->cra_blocksize; + inst->alg.cra_alignmask = alg->cra_alignmask; + +out: + return inst; + +out_free_inst: + kfree(inst); + inst = ERR_PTR(err); + goto out; +} + +static struct crypto_instance *pcrypt_alloc_aead(struct rtattr **tb, + u32 type, u32 mask) +{ + struct crypto_instance *inst; + struct crypto_alg *alg; + + alg = crypto_get_attr_alg(tb, type, (mask & CRYPTO_ALG_TYPE_MASK)); + if (IS_ERR(alg)) + return ERR_CAST(alg); + + inst = pcrypt_alloc_instance(alg); + if (IS_ERR(inst)) + goto out_put_alg; + + inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC; + inst->alg.cra_type = &crypto_aead_type; + + inst->alg.cra_aead.ivsize = alg->cra_aead.ivsize; + inst->alg.cra_aead.geniv = alg->cra_aead.geniv; + inst->alg.cra_aead.maxauthsize = alg->cra_aead.maxauthsize; + + inst->alg.cra_ctxsize = sizeof(struct pcrypt_aead_ctx); + + inst->alg.cra_init = pcrypt_aead_init_tfm; + inst->alg.cra_exit = pcrypt_aead_exit_tfm; + + inst->alg.cra_aead.setkey = pcrypt_aead_setkey; + inst->alg.cra_aead.setauthsize = pcrypt_aead_setauthsize; + inst->alg.cra_aead.encrypt = pcrypt_aead_encrypt; + inst->alg.cra_aead.decrypt = pcrypt_aead_decrypt; + inst->alg.cra_aead.givencrypt = pcrypt_aead_givencrypt; + +out_put_alg: + crypto_mod_put(alg); + return inst; +} + +static struct crypto_instance *pcrypt_alloc(struct rtattr **tb) +{ + struct crypto_attr_type *algt; + + algt = crypto_get_attr_type(tb); + if (IS_ERR(algt)) + return ERR_CAST(algt); + + switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { + case CRYPTO_ALG_TYPE_AEAD: + return pcrypt_alloc_aead(tb, algt->type, algt->mask); + } + + return ERR_PTR(-EINVAL); +} + +static void pcrypt_free(struct crypto_instance *inst) +{ + struct pcrypt_instance_ctx *ctx = crypto_instance_ctx(inst); + + crypto_drop_spawn(&ctx->spawn); + kfree(inst); +} + +static int pcrypt_cpumask_change_notify(struct notifier_block *self, + unsigned long val, void *data) +{ + struct padata_pcrypt *pcrypt; + struct pcrypt_cpumask *new_mask, *old_mask; + struct padata_cpumask *cpumask = (struct padata_cpumask *)data; + + if (!(val & PADATA_CPU_SERIAL)) + return 0; + + pcrypt = container_of(self, struct padata_pcrypt, nblock); + new_mask = kmalloc(sizeof(*new_mask), GFP_KERNEL); + if (!new_mask) + return -ENOMEM; + if (!alloc_cpumask_var(&new_mask->mask, GFP_KERNEL)) { + kfree(new_mask); + return -ENOMEM; + } + + old_mask = pcrypt->cb_cpumask; + + cpumask_copy(new_mask->mask, cpumask->cbcpu); + rcu_assign_pointer(pcrypt->cb_cpumask, new_mask); + synchronize_rcu_bh(); + + free_cpumask_var(old_mask->mask); + kfree(old_mask); + return 0; +} + +static int pcrypt_sysfs_add(struct padata_instance *pinst, const char *name) +{ + int ret; + + pinst->kobj.kset = pcrypt_kset; + ret = kobject_add(&pinst->kobj, NULL, name); + if (!ret) + kobject_uevent(&pinst->kobj, KOBJ_ADD); + + return ret; +} + +static int pcrypt_init_padata(struct padata_pcrypt *pcrypt, + const char *name) +{ + int ret = -ENOMEM; + struct pcrypt_cpumask *mask; + + get_online_cpus(); + + pcrypt->wq = create_workqueue(name); + if (!pcrypt->wq) + goto err; + + pcrypt->pinst = padata_alloc_possible(pcrypt->wq); + if (!pcrypt->pinst) + goto err_destroy_workqueue; + + mask = kmalloc(sizeof(*mask), GFP_KERNEL); + if (!mask) + goto err_free_padata; + if (!alloc_cpumask_var(&mask->mask, GFP_KERNEL)) { + kfree(mask); + goto err_free_padata; + } + + cpumask_and(mask->mask, cpu_possible_mask, cpu_active_mask); + rcu_assign_pointer(pcrypt->cb_cpumask, mask); + + pcrypt->nblock.notifier_call = pcrypt_cpumask_change_notify; + ret = padata_register_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock); + if (ret) + goto err_free_cpumask; + + ret = pcrypt_sysfs_add(pcrypt->pinst, name); + if (ret) + goto err_unregister_notifier; + + put_online_cpus(); + + return ret; + +err_unregister_notifier: + padata_unregister_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock); +err_free_cpumask: + free_cpumask_var(mask->mask); + kfree(mask); +err_free_padata: + padata_free(pcrypt->pinst); +err_destroy_workqueue: + destroy_workqueue(pcrypt->wq); +err: + put_online_cpus(); + + return ret; +} + +static void pcrypt_fini_padata(struct padata_pcrypt *pcrypt) +{ + kobject_put(&pcrypt->pinst->kobj); + free_cpumask_var(pcrypt->cb_cpumask->mask); + kfree(pcrypt->cb_cpumask); + + padata_stop(pcrypt->pinst); + padata_unregister_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock); + destroy_workqueue(pcrypt->wq); + padata_free(pcrypt->pinst); +} + +static struct crypto_template pcrypt_tmpl = { + .name = "pcrypt", + .alloc = pcrypt_alloc, + .free = pcrypt_free, + .module = THIS_MODULE, +}; + +static int __init pcrypt_init(void) +{ + int err = -ENOMEM; + + pcrypt_kset = kset_create_and_add("pcrypt", NULL, kernel_kobj); + if (!pcrypt_kset) + goto err; + + err = pcrypt_init_padata(&pencrypt, "pencrypt"); + if (err) + goto err_unreg_kset; + + err = pcrypt_init_padata(&pdecrypt, "pdecrypt"); + if (err) + goto err_deinit_pencrypt; + + padata_start(pencrypt.pinst); + padata_start(pdecrypt.pinst); + + return crypto_register_template(&pcrypt_tmpl); + +err_deinit_pencrypt: + pcrypt_fini_padata(&pencrypt); +err_unreg_kset: + kset_unregister(pcrypt_kset); +err: + return err; +} + +static void __exit pcrypt_exit(void) +{ + pcrypt_fini_padata(&pencrypt); + pcrypt_fini_padata(&pdecrypt); + + kset_unregister(pcrypt_kset); + crypto_unregister_template(&pcrypt_tmpl); +} + +module_init(pcrypt_init); +module_exit(pcrypt_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>"); +MODULE_DESCRIPTION("Parallel crypto wrapper"); diff --git a/crypto/proc.c b/crypto/proc.c index 5dc07e442fc..58fef67d4f4 100644 --- a/crypto/proc.c +++ b/crypto/proc.c @@ -25,28 +25,22 @@ #ifdef CONFIG_CRYPTO_FIPS static struct ctl_table crypto_sysctl_table[] = { { - .ctl_name = CTL_UNNUMBERED, .procname = "fips_enabled", .data = &fips_enabled, .maxlen = sizeof(int), .mode = 0444, - .proc_handler = &proc_dointvec - }, - { - .ctl_name = 0, + .proc_handler = proc_dointvec }, + {} }; static struct ctl_table crypto_dir_table[] = { { - .ctl_name = CTL_UNNUMBERED, .procname = "crypto", .mode = 0555, .child = crypto_sysctl_table }, - { - .ctl_name = 0, - }, + {} }; static struct ctl_table_header *crypto_sysctls; @@ -115,13 +109,6 @@ static int c_show(struct seq_file *m, void *p) seq_printf(m, "max keysize : %u\n", alg->cra_cipher.cia_max_keysize); break; - - case CRYPTO_ALG_TYPE_DIGEST: - seq_printf(m, "type : digest\n"); - seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); - seq_printf(m, "digestsize : %u\n", - alg->cra_digest.dia_digestsize); - break; case CRYPTO_ALG_TYPE_COMPRESS: seq_printf(m, "type : compression\n"); break; diff --git a/crypto/rng.c b/crypto/rng.c index ba05e7380e7..f93cb531118 100644 --- a/crypto/rng.c +++ b/crypto/rng.c @@ -19,6 +19,7 @@ #include <linux/mutex.h> #include <linux/random.h> #include <linux/seq_file.h> +#include <linux/slab.h> #include <linux/string.h> static DEFINE_MUTEX(crypto_default_rng_lock); diff --git a/crypto/scatterwalk.c b/crypto/scatterwalk.c index 3de89a42440..41e529af077 100644 --- a/crypto/scatterwalk.c +++ b/crypto/scatterwalk.c @@ -68,7 +68,7 @@ static void scatterwalk_pagedone(struct scatter_walk *walk, int out, void scatterwalk_done(struct scatter_walk *walk, int out, int more) { - if (!offset_in_page(walk->offset) || !more) + if (!(scatterwalk_pagelen(walk) & (PAGE_SIZE - 1)) || !more) scatterwalk_pagedone(walk, out, more); } EXPORT_SYMBOL_GPL(scatterwalk_done); diff --git a/crypto/seqiv.c b/crypto/seqiv.c index 5a013a8bf87..4c449122941 100644 --- a/crypto/seqiv.c +++ b/crypto/seqiv.c @@ -20,6 +20,7 @@ #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> +#include <linux/slab.h> #include <linux/spinlock.h> #include <linux/string.h> diff --git a/crypto/shash.c b/crypto/shash.c index 91f7b9d8388..22fd9433141 100644 --- a/crypto/shash.c +++ b/crypto/shash.c @@ -37,7 +37,7 @@ static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key, u8 *buffer, *alignbuffer; int err; - absize = keylen + (alignmask & ~(CRYPTO_MINALIGN - 1)); + absize = keylen + (alignmask & ~(crypto_tfm_ctx_alignment() - 1)); buffer = kmalloc(absize, GFP_KERNEL); if (!buffer) return -ENOMEM; diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index aa3f84ccc78..3ca68f9fc14 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -18,8 +18,8 @@ #include <crypto/hash.h> #include <linux/err.h> #include <linux/init.h> +#include <linux/gfp.h> #include <linux/module.h> -#include <linux/slab.h> #include <linux/scatterlist.h> #include <linux/string.h> #include <linux/moduleparam.h> @@ -394,6 +394,17 @@ out: return 0; } +static void test_hash_sg_init(struct scatterlist *sg) +{ + int i; + + sg_init_table(sg, TVMEMSIZE); + for (i = 0; i < TVMEMSIZE; i++) { + sg_set_buf(sg + i, tvmem[i], PAGE_SIZE); + memset(tvmem[i], 0xff, PAGE_SIZE); + } +} + static void test_hash_speed(const char *algo, unsigned int sec, struct hash_speed *speed) { @@ -423,12 +434,7 @@ static void test_hash_speed(const char *algo, unsigned int sec, goto out; } - sg_init_table(sg, TVMEMSIZE); - for (i = 0; i < TVMEMSIZE; i++) { - sg_set_buf(sg + i, tvmem[i], PAGE_SIZE); - memset(tvmem[i], 0xff, PAGE_SIZE); - } - + test_hash_sg_init(sg); for (i = 0; speed[i].blen != 0; i++) { if (speed[i].blen > TVMEMSIZE * PAGE_SIZE) { printk(KERN_ERR @@ -437,6 +443,9 @@ static void test_hash_speed(const char *algo, unsigned int sec, goto out; } + if (speed[i].klen) + crypto_hash_setkey(tfm, tvmem[0], speed[i].klen); + printk(KERN_INFO "test%3u " "(%5u byte blocks,%5u bytes per update,%4u updates): ", i, speed[i].blen, speed[i].plen, speed[i].blen / speed[i].plen); @@ -458,6 +467,250 @@ out: crypto_free_hash(tfm); } +struct tcrypt_result { + struct completion completion; + int err; +}; + +static void tcrypt_complete(struct crypto_async_request *req, int err) +{ + struct tcrypt_result *res = req->data; + + if (err == -EINPROGRESS) + return; + + res->err = err; + complete(&res->completion); +} + +static inline int do_one_ahash_op(struct ahash_request *req, int ret) +{ + if (ret == -EINPROGRESS || ret == -EBUSY) { + struct tcrypt_result *tr = req->base.data; + + ret = wait_for_completion_interruptible(&tr->completion); + if (!ret) + ret = tr->err; + INIT_COMPLETION(tr->completion); + } + return ret; +} + +static int test_ahash_jiffies_digest(struct ahash_request *req, int blen, + char *out, int sec) +{ + unsigned long start, end; + int bcount; + int ret; + + for (start = jiffies, end = start + sec * HZ, bcount = 0; + time_before(jiffies, end); bcount++) { + ret = do_one_ahash_op(req, crypto_ahash_digest(req)); + if (ret) + return ret; + } + + printk("%6u opers/sec, %9lu bytes/sec\n", + bcount / sec, ((long)bcount * blen) / sec); + + return 0; +} + +static int test_ahash_jiffies(struct ahash_request *req, int blen, + int plen, char *out, int sec) +{ + unsigned long start, end; + int bcount, pcount; + int ret; + + if (plen == blen) + return test_ahash_jiffies_digest(req, blen, out, sec); + + for (start = jiffies, end = start + sec * HZ, bcount = 0; + time_before(jiffies, end); bcount++) { + ret = crypto_ahash_init(req); + if (ret) + return ret; + for (pcount = 0; pcount < blen; pcount += plen) { + ret = do_one_ahash_op(req, crypto_ahash_update(req)); + if (ret) + return ret; + } + /* we assume there is enough space in 'out' for the result */ + ret = do_one_ahash_op(req, crypto_ahash_final(req)); + if (ret) + return ret; + } + + pr_cont("%6u opers/sec, %9lu bytes/sec\n", + bcount / sec, ((long)bcount * blen) / sec); + + return 0; +} + +static int test_ahash_cycles_digest(struct ahash_request *req, int blen, + char *out) +{ + unsigned long cycles = 0; + int ret, i; + + /* Warm-up run. */ + for (i = 0; i < 4; i++) { + ret = do_one_ahash_op(req, crypto_ahash_digest(req)); + if (ret) + goto out; + } + + /* The real thing. */ + for (i = 0; i < 8; i++) { + cycles_t start, end; + + start = get_cycles(); + + ret = do_one_ahash_op(req, crypto_ahash_digest(req)); + if (ret) + goto out; + + end = get_cycles(); + + cycles += end - start; + } + +out: + if (ret) + return ret; + + pr_cont("%6lu cycles/operation, %4lu cycles/byte\n", + cycles / 8, cycles / (8 * blen)); + + return 0; +} + +static int test_ahash_cycles(struct ahash_request *req, int blen, + int plen, char *out) +{ + unsigned long cycles = 0; + int i, pcount, ret; + + if (plen == blen) + return test_ahash_cycles_digest(req, blen, out); + + /* Warm-up run. */ + for (i = 0; i < 4; i++) { + ret = crypto_ahash_init(req); + if (ret) + goto out; + for (pcount = 0; pcount < blen; pcount += plen) { + ret = do_one_ahash_op(req, crypto_ahash_update(req)); + if (ret) + goto out; + } + ret = do_one_ahash_op(req, crypto_ahash_final(req)); + if (ret) + goto out; + } + + /* The real thing. */ + for (i = 0; i < 8; i++) { + cycles_t start, end; + + start = get_cycles(); + + ret = crypto_ahash_init(req); + if (ret) + goto out; + for (pcount = 0; pcount < blen; pcount += plen) { + ret = do_one_ahash_op(req, crypto_ahash_update(req)); + if (ret) + goto out; + } + ret = do_one_ahash_op(req, crypto_ahash_final(req)); + if (ret) + goto out; + + end = get_cycles(); + + cycles += end - start; + } + +out: + if (ret) + return ret; + + pr_cont("%6lu cycles/operation, %4lu cycles/byte\n", + cycles / 8, cycles / (8 * blen)); + + return 0; +} + +static void test_ahash_speed(const char *algo, unsigned int sec, + struct hash_speed *speed) +{ + struct scatterlist sg[TVMEMSIZE]; + struct tcrypt_result tresult; + struct ahash_request *req; + struct crypto_ahash *tfm; + static char output[1024]; + int i, ret; + + printk(KERN_INFO "\ntesting speed of async %s\n", algo); + + tfm = crypto_alloc_ahash(algo, 0, 0); + if (IS_ERR(tfm)) { + pr_err("failed to load transform for %s: %ld\n", + algo, PTR_ERR(tfm)); + return; + } + + if (crypto_ahash_digestsize(tfm) > sizeof(output)) { + pr_err("digestsize(%u) > outputbuffer(%zu)\n", + crypto_ahash_digestsize(tfm), sizeof(output)); + goto out; + } + + test_hash_sg_init(sg); + req = ahash_request_alloc(tfm, GFP_KERNEL); + if (!req) { + pr_err("ahash request allocation failure\n"); + goto out; + } + + init_completion(&tresult.completion); + ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, + tcrypt_complete, &tresult); + + for (i = 0; speed[i].blen != 0; i++) { + if (speed[i].blen > TVMEMSIZE * PAGE_SIZE) { + pr_err("template (%u) too big for tvmem (%lu)\n", + speed[i].blen, TVMEMSIZE * PAGE_SIZE); + break; + } + + pr_info("test%3u " + "(%5u byte blocks,%5u bytes per update,%4u updates): ", + i, speed[i].blen, speed[i].plen, speed[i].blen / speed[i].plen); + + ahash_request_set_crypt(req, sg, output, speed[i].plen); + + if (sec) + ret = test_ahash_jiffies(req, speed[i].blen, + speed[i].plen, output, sec); + else + ret = test_ahash_cycles(req, speed[i].blen, + speed[i].plen, output); + + if (ret) { + pr_err("hashing failed ret=%d\n", ret); + break; + } + } + + ahash_request_free(req); + +out: + crypto_free_ahash(tfm); +} + static void test_available(void) { char **name = check; @@ -881,9 +1134,87 @@ static int do_test(int m) test_hash_speed("rmd320", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; + case 318: + test_hash_speed("ghash-generic", sec, hash_speed_template_16); + if (mode > 300 && mode < 400) break; + case 399: break; + case 400: + /* fall through */ + + case 401: + test_ahash_speed("md4", sec, generic_hash_speed_template); + if (mode > 400 && mode < 500) break; + + case 402: + test_ahash_speed("md5", sec, generic_hash_speed_template); + if (mode > 400 && mode < 500) break; + + case 403: + test_ahash_speed("sha1", sec, generic_hash_speed_template); + if (mode > 400 && mode < 500) break; + + case 404: + test_ahash_speed("sha256", sec, generic_hash_speed_template); + if (mode > 400 && mode < 500) break; + + case 405: + test_ahash_speed("sha384", sec, generic_hash_speed_template); + if (mode > 400 && mode < 500) break; + + case 406: + test_ahash_speed("sha512", sec, generic_hash_speed_template); + if (mode > 400 && mode < 500) break; + + case 407: + test_ahash_speed("wp256", sec, generic_hash_speed_template); + if (mode > 400 && mode < 500) break; + + case 408: + test_ahash_speed("wp384", sec, generic_hash_speed_template); + if (mode > 400 && mode < 500) break; + + case 409: + test_ahash_speed("wp512", sec, generic_hash_speed_template); + if (mode > 400 && mode < 500) break; + + case 410: + test_ahash_speed("tgr128", sec, generic_hash_speed_template); + if (mode > 400 && mode < 500) break; + + case 411: + test_ahash_speed("tgr160", sec, generic_hash_speed_template); + if (mode > 400 && mode < 500) break; + + case 412: + test_ahash_speed("tgr192", sec, generic_hash_speed_template); + if (mode > 400 && mode < 500) break; + + case 413: + test_ahash_speed("sha224", sec, generic_hash_speed_template); + if (mode > 400 && mode < 500) break; + + case 414: + test_ahash_speed("rmd128", sec, generic_hash_speed_template); + if (mode > 400 && mode < 500) break; + + case 415: + test_ahash_speed("rmd160", sec, generic_hash_speed_template); + if (mode > 400 && mode < 500) break; + + case 416: + test_ahash_speed("rmd256", sec, generic_hash_speed_template); + if (mode > 400 && mode < 500) break; + + case 417: + test_ahash_speed("rmd320", sec, generic_hash_speed_template); + if (mode > 400 && mode < 500) break; + + case 499: + break; + case 1000: test_available(); break; diff --git a/crypto/tcrypt.h b/crypto/tcrypt.h index 966bbfaf95b..10cb925132c 100644 --- a/crypto/tcrypt.h +++ b/crypto/tcrypt.h @@ -25,6 +25,7 @@ struct cipher_speed_template { struct hash_speed { unsigned int blen; /* buffer length */ unsigned int plen; /* per-update length */ + unsigned int klen; /* key length */ }; /* @@ -83,4 +84,32 @@ static struct hash_speed generic_hash_speed_template[] = { { .blen = 0, .plen = 0, } }; +static struct hash_speed hash_speed_template_16[] = { + { .blen = 16, .plen = 16, .klen = 16, }, + { .blen = 64, .plen = 16, .klen = 16, }, + { .blen = 64, .plen = 64, .klen = 16, }, + { .blen = 256, .plen = 16, .klen = 16, }, + { .blen = 256, .plen = 64, .klen = 16, }, + { .blen = 256, .plen = 256, .klen = 16, }, + { .blen = 1024, .plen = 16, .klen = 16, }, + { .blen = 1024, .plen = 256, .klen = 16, }, + { .blen = 1024, .plen = 1024, .klen = 16, }, + { .blen = 2048, .plen = 16, .klen = 16, }, + { .blen = 2048, .plen = 256, .klen = 16, }, + { .blen = 2048, .plen = 1024, .klen = 16, }, + { .blen = 2048, .plen = 2048, .klen = 16, }, + { .blen = 4096, .plen = 16, .klen = 16, }, + { .blen = 4096, .plen = 256, .klen = 16, }, + { .blen = 4096, .plen = 1024, .klen = 16, }, + { .blen = 4096, .plen = 4096, .klen = 16, }, + { .blen = 8192, .plen = 16, .klen = 16, }, + { .blen = 8192, .plen = 256, .klen = 16, }, + { .blen = 8192, .plen = 1024, .klen = 16, }, + { .blen = 8192, .plen = 4096, .klen = 16, }, + { .blen = 8192, .plen = 8192, .klen = 16, }, + + /* End marker */ + { .blen = 0, .plen = 0, .klen = 0, } +}; + #endif /* _CRYPTO_TCRYPT_H */ diff --git a/crypto/testmgr.c b/crypto/testmgr.c index 6d5b746637b..abd980c729e 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -22,6 +22,17 @@ #include <crypto/rng.h> #include "internal.h" + +#ifndef CONFIG_CRYPTO_MANAGER_TESTS + +/* a perfect nop */ +int alg_test(const char *driver, const char *alg, u32 type, u32 mask) +{ + return 0; +} + +#else + #include "testmgr.h" /* @@ -153,8 +164,21 @@ static void testmgr_free_buf(char *buf[XBUFSIZE]) free_page((unsigned long)buf[i]); } +static int do_one_async_hash_op(struct ahash_request *req, + struct tcrypt_result *tr, + int ret) +{ + if (ret == -EINPROGRESS || ret == -EBUSY) { + ret = wait_for_completion_interruptible(&tr->completion); + if (!ret) + ret = tr->err; + INIT_COMPLETION(tr->completion); + } + return ret; +} + static int test_hash(struct crypto_ahash *tfm, struct hash_testvec *template, - unsigned int tcount) + unsigned int tcount, bool use_digest) { const char *algo = crypto_tfm_alg_driver_name(crypto_ahash_tfm(tfm)); unsigned int i, j, k, temp; @@ -206,23 +230,36 @@ static int test_hash(struct crypto_ahash *tfm, struct hash_testvec *template, } ahash_request_set_crypt(req, sg, result, template[i].psize); - ret = crypto_ahash_digest(req); - switch (ret) { - case 0: - break; - case -EINPROGRESS: - case -EBUSY: - ret = wait_for_completion_interruptible( - &tresult.completion); - if (!ret && !(ret = tresult.err)) { - INIT_COMPLETION(tresult.completion); - break; + if (use_digest) { + ret = do_one_async_hash_op(req, &tresult, + crypto_ahash_digest(req)); + if (ret) { + pr_err("alg: hash: digest failed on test %d " + "for %s: ret=%d\n", j, algo, -ret); + goto out; + } + } else { + ret = do_one_async_hash_op(req, &tresult, + crypto_ahash_init(req)); + if (ret) { + pr_err("alt: hash: init failed on test %d " + "for %s: ret=%d\n", j, algo, -ret); + goto out; + } + ret = do_one_async_hash_op(req, &tresult, + crypto_ahash_update(req)); + if (ret) { + pr_err("alt: hash: update failed on test %d " + "for %s: ret=%d\n", j, algo, -ret); + goto out; + } + ret = do_one_async_hash_op(req, &tresult, + crypto_ahash_final(req)); + if (ret) { + pr_err("alt: hash: final failed on test %d " + "for %s: ret=%d\n", j, algo, -ret); + goto out; } - /* fall through */ - default: - printk(KERN_ERR "alg: hash: digest failed on test %d " - "for %s: ret=%d\n", j, algo, -ret); - goto out; } if (memcmp(result, template[i].digest, @@ -1201,7 +1238,7 @@ static int test_cprng(struct crypto_rng *tfm, struct cprng_testvec *template, unsigned int tcount) { const char *algo = crypto_tfm_alg_driver_name(crypto_rng_tfm(tfm)); - int err, i, j, seedsize; + int err = 0, i, j, seedsize; u8 *seed; char result[32]; @@ -1402,7 +1439,11 @@ static int alg_test_hash(const struct alg_test_desc *desc, const char *driver, return PTR_ERR(tfm); } - err = test_hash(tfm, desc->suite.hash.vecs, desc->suite.hash.count); + err = test_hash(tfm, desc->suite.hash.vecs, + desc->suite.hash.count, true); + if (!err) + err = test_hash(tfm, desc->suite.hash.vecs, + desc->suite.hash.count, false); crypto_free_ahash(tfm); return err; @@ -1477,9 +1518,54 @@ static int alg_test_cprng(const struct alg_test_desc *desc, const char *driver, return err; } +static int alg_test_null(const struct alg_test_desc *desc, + const char *driver, u32 type, u32 mask) +{ + return 0; +} + /* Please keep this list sorted by algorithm name. */ static const struct alg_test_desc alg_test_descs[] = { { + .alg = "__driver-cbc-aes-aesni", + .test = alg_test_null, + .suite = { + .cipher = { + .enc = { + .vecs = NULL, + .count = 0 + }, + .dec = { + .vecs = NULL, + .count = 0 + } + } + } + }, { + .alg = "__driver-ecb-aes-aesni", + .test = alg_test_null, + .suite = { + .cipher = { + .enc = { + .vecs = NULL, + .count = 0 + }, + .dec = { + .vecs = NULL, + .count = 0 + } + } + } + }, { + .alg = "__ghash-pclmulqdqni", + .test = alg_test_null, + .suite = { + .hash = { + .vecs = NULL, + .count = 0 + } + } + }, { .alg = "ansi_cprng", .test = alg_test_cprng, .fips_allowed = 1, @@ -1623,6 +1709,30 @@ static const struct alg_test_desc alg_test_descs[] = { } } }, { + .alg = "cryptd(__driver-ecb-aes-aesni)", + .test = alg_test_null, + .suite = { + .cipher = { + .enc = { + .vecs = NULL, + .count = 0 + }, + .dec = { + .vecs = NULL, + .count = 0 + } + } + } + }, { + .alg = "cryptd(__ghash-pclmulqdqni)", + .test = alg_test_null, + .suite = { + .hash = { + .vecs = NULL, + .count = 0 + } + } + }, { .alg = "ctr(aes)", .test = alg_test_skcipher, .fips_allowed = 1, @@ -1669,6 +1779,21 @@ static const struct alg_test_desc alg_test_descs[] = { } } }, { + .alg = "ecb(__aes-aesni)", + .test = alg_test_null, + .suite = { + .cipher = { + .enc = { + .vecs = NULL, + .count = 0 + }, + .dec = { + .vecs = NULL, + .count = 0 + } + } + } + }, { .alg = "ecb(aes)", .test = alg_test_skcipher, .fips_allowed = 1, @@ -1943,6 +2068,15 @@ static const struct alg_test_desc alg_test_descs[] = { } } }, { + .alg = "ghash", + .test = alg_test_hash, + .suite = { + .hash = { + .vecs = ghash_tv_template, + .count = GHASH_TEST_VECTORS + } + } + }, { .alg = "hmac(md5)", .test = alg_test_hash, .suite = { @@ -2407,4 +2541,7 @@ notest: non_fips_alg: return -EINVAL; } + +#endif /* CONFIG_CRYPTO_MANAGER_TESTS */ + EXPORT_SYMBOL_GPL(alg_test); diff --git a/crypto/testmgr.h b/crypto/testmgr.h index 9963b18983a..74e35377fd3 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h @@ -1003,6 +1003,21 @@ static struct hash_testvec tgr128_tv_template[] = { }, }; +#define GHASH_TEST_VECTORS 1 + +static struct hash_testvec ghash_tv_template[] = +{ + { + + .key = "\xdf\xa6\xbf\x4d\xed\x81\xdb\x03\xff\xca\xff\x95\xf8\x30\xf0\x61", + .ksize = 16, + .plaintext = "\x95\x2b\x2a\x56\xa5\x60\x04a\xc0\xb3\x2b\x66\x56\xa0\x5b\x40\xb6", + .psize = 16, + .digest = "\xda\x53\xeb\x0a\xd2\xc5\x5b\xb6" + "\x4f\xc4\x80\x2c\xc3\xfe\xda\x60", + }, +}; + /* * HMAC-MD5 test vectors from RFC2202 * (These need to be fixed to not use strlen). @@ -1654,17 +1669,73 @@ static struct hash_testvec aes_xcbc128_tv_template[] = { } }; -#define VMAC_AES_TEST_VECTORS 1 -static char vmac_string[128] = {'\x01', '\x01', '\x01', '\x01', +#define VMAC_AES_TEST_VECTORS 8 +static char vmac_string1[128] = {'\x01', '\x01', '\x01', '\x01', '\x02', '\x03', '\x02', '\x02', '\x02', '\x04', '\x01', '\x07', '\x04', '\x01', '\x04', '\x03',}; +static char vmac_string2[128] = {'a', 'b', 'c',}; +static char vmac_string3[128] = {'a', 'b', 'c', 'a', 'b', 'c', + 'a', 'b', 'c', 'a', 'b', 'c', + 'a', 'b', 'c', 'a', 'b', 'c', + 'a', 'b', 'c', 'a', 'b', 'c', + 'a', 'b', 'c', 'a', 'b', 'c', + 'a', 'b', 'c', 'a', 'b', 'c', + 'a', 'b', 'c', 'a', 'b', 'c', + 'a', 'b', 'c', 'a', 'b', 'c', + }; + static struct hash_testvec aes_vmac128_tv_template[] = { { + .key = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", + .plaintext = NULL, + .digest = "\x07\x58\x80\x35\x77\xa4\x7b\x54", + .psize = 0, + .ksize = 16, + }, { .key = "\x00\x01\x02\x03\x04\x05\x06\x07" "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", - .plaintext = vmac_string, - .digest = "\xcb\xd7\x8a\xfd\xb7\x33\x79\xe7", + .plaintext = vmac_string1, + .digest = "\xce\xf5\x3c\xd3\xae\x68\x8c\xa1", + .psize = 128, + .ksize = 16, + }, { + .key = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", + .plaintext = vmac_string2, + .digest = "\xc9\x27\xb0\x73\x81\xbd\x14\x2d", + .psize = 128, + .ksize = 16, + }, { + .key = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", + .plaintext = vmac_string3, + .digest = "\x8d\x1a\x95\x8c\x98\x47\x0b\x19", + .psize = 128, + .ksize = 16, + }, { + .key = "abcdefghijklmnop", + .plaintext = NULL, + .digest = "\x3b\x89\xa1\x26\x9e\x55\x8f\x84", + .psize = 0, + .ksize = 16, + }, { + .key = "abcdefghijklmnop", + .plaintext = vmac_string1, + .digest = "\xab\x5e\xab\xb0\xf6\x8d\x74\xc2", + .psize = 128, + .ksize = 16, + }, { + .key = "abcdefghijklmnop", + .plaintext = vmac_string2, + .digest = "\x11\x15\x68\x42\x3d\x7b\x09\xdf", + .psize = 128, + .ksize = 16, + }, { + .key = "abcdefghijklmnop", + .plaintext = vmac_string3, + .digest = "\x8b\x32\x8f\xe1\xed\x8f\xfa\xd4", .psize = 128, .ksize = 16, }, diff --git a/crypto/twofish.c b/crypto/twofish_generic.c index dfcda231f87..1f07b843e07 100644 --- a/crypto/twofish.c +++ b/crypto/twofish_generic.c @@ -212,3 +212,4 @@ module_exit(twofish_mod_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION ("Twofish Cipher Algorithm"); +MODULE_ALIAS("twofish"); diff --git a/crypto/vmac.c b/crypto/vmac.c index 0a9468e575d..0999274a27a 100644 --- a/crypto/vmac.c +++ b/crypto/vmac.c @@ -43,6 +43,8 @@ const u64 m63 = UINT64_C(0x7fffffffffffffff); /* 63-bit mask */ const u64 m64 = UINT64_C(0xffffffffffffffff); /* 64-bit mask */ const u64 mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */ +#define pe64_to_cpup le64_to_cpup /* Prefer little endian */ + #ifdef __LITTLE_ENDIAN #define INDEX_HIGH 1 #define INDEX_LOW 0 @@ -110,8 +112,8 @@ const u64 mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */ int i; u64 th, tl; \ rh = rl = 0; \ for (i = 0; i < nw; i += 2) { \ - MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i], \ - le64_to_cpup((mp)+i+1)+(kp)[i+1]); \ + MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \ + pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \ ADD128(rh, rl, th, tl); \ } \ } while (0) @@ -121,11 +123,11 @@ const u64 mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */ int i; u64 th, tl; \ rh1 = rl1 = rh = rl = 0; \ for (i = 0; i < nw; i += 2) { \ - MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i], \ - le64_to_cpup((mp)+i+1)+(kp)[i+1]); \ + MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \ + pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \ ADD128(rh, rl, th, tl); \ - MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i+2], \ - le64_to_cpup((mp)+i+1)+(kp)[i+3]); \ + MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i+2], \ + pe64_to_cpup((mp)+i+1)+(kp)[i+3]); \ ADD128(rh1, rl1, th, tl); \ } \ } while (0) @@ -136,17 +138,17 @@ const u64 mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */ int i; u64 th, tl; \ rh = rl = 0; \ for (i = 0; i < nw; i += 8) { \ - MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i], \ - le64_to_cpup((mp)+i+1)+(kp)[i+1]); \ + MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \ + pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \ ADD128(rh, rl, th, tl); \ - MUL64(th, tl, le64_to_cpup((mp)+i+2)+(kp)[i+2], \ - le64_to_cpup((mp)+i+3)+(kp)[i+3]); \ + MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+2], \ + pe64_to_cpup((mp)+i+3)+(kp)[i+3]); \ ADD128(rh, rl, th, tl); \ - MUL64(th, tl, le64_to_cpup((mp)+i+4)+(kp)[i+4], \ - le64_to_cpup((mp)+i+5)+(kp)[i+5]); \ + MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+4], \ + pe64_to_cpup((mp)+i+5)+(kp)[i+5]); \ ADD128(rh, rl, th, tl); \ - MUL64(th, tl, le64_to_cpup((mp)+i+6)+(kp)[i+6], \ - le64_to_cpup((mp)+i+7)+(kp)[i+7]); \ + MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+6], \ + pe64_to_cpup((mp)+i+7)+(kp)[i+7]); \ ADD128(rh, rl, th, tl); \ } \ } while (0) @@ -156,29 +158,29 @@ const u64 mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */ int i; u64 th, tl; \ rh1 = rl1 = rh = rl = 0; \ for (i = 0; i < nw; i += 8) { \ - MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i], \ - le64_to_cpup((mp)+i+1)+(kp)[i+1]); \ + MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \ + pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \ ADD128(rh, rl, th, tl); \ - MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i+2], \ - le64_to_cpup((mp)+i+1)+(kp)[i+3]); \ + MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i+2], \ + pe64_to_cpup((mp)+i+1)+(kp)[i+3]); \ ADD128(rh1, rl1, th, tl); \ - MUL64(th, tl, le64_to_cpup((mp)+i+2)+(kp)[i+2], \ - le64_to_cpup((mp)+i+3)+(kp)[i+3]); \ + MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+2], \ + pe64_to_cpup((mp)+i+3)+(kp)[i+3]); \ ADD128(rh, rl, th, tl); \ - MUL64(th, tl, le64_to_cpup((mp)+i+2)+(kp)[i+4], \ - le64_to_cpup((mp)+i+3)+(kp)[i+5]); \ + MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+4], \ + pe64_to_cpup((mp)+i+3)+(kp)[i+5]); \ ADD128(rh1, rl1, th, tl); \ - MUL64(th, tl, le64_to_cpup((mp)+i+4)+(kp)[i+4], \ - le64_to_cpup((mp)+i+5)+(kp)[i+5]); \ + MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+4], \ + pe64_to_cpup((mp)+i+5)+(kp)[i+5]); \ ADD128(rh, rl, th, tl); \ - MUL64(th, tl, le64_to_cpup((mp)+i+4)+(kp)[i+6], \ - le64_to_cpup((mp)+i+5)+(kp)[i+7]); \ + MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+6], \ + pe64_to_cpup((mp)+i+5)+(kp)[i+7]); \ ADD128(rh1, rl1, th, tl); \ - MUL64(th, tl, le64_to_cpup((mp)+i+6)+(kp)[i+6], \ - le64_to_cpup((mp)+i+7)+(kp)[i+7]); \ + MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+6], \ + pe64_to_cpup((mp)+i+7)+(kp)[i+7]); \ ADD128(rh, rl, th, tl); \ - MUL64(th, tl, le64_to_cpup((mp)+i+6)+(kp)[i+8], \ - le64_to_cpup((mp)+i+7)+(kp)[i+9]); \ + MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+8], \ + pe64_to_cpup((mp)+i+7)+(kp)[i+9]); \ ADD128(rh1, rl1, th, tl); \ } \ } while (0) @@ -216,8 +218,8 @@ const u64 mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */ int i; \ rh = rl = t = 0; \ for (i = 0; i < nw; i += 2) { \ - t1 = le64_to_cpup(mp+i) + kp[i]; \ - t2 = le64_to_cpup(mp+i+1) + kp[i+1]; \ + t1 = pe64_to_cpup(mp+i) + kp[i]; \ + t2 = pe64_to_cpup(mp+i+1) + kp[i+1]; \ m2 = MUL32(t1 >> 32, t2); \ m1 = MUL32(t1, t2 >> 32); \ ADD128(rh, rl, MUL32(t1 >> 32, t2 >> 32), \ @@ -322,8 +324,7 @@ static void vhash_abort(struct vmac_ctx *ctx) ctx->first_block_processed = 0; } -static u64 l3hash(u64 p1, u64 p2, - u64 k1, u64 k2, u64 len) +static u64 l3hash(u64 p1, u64 p2, u64 k1, u64 k2, u64 len) { u64 rh, rl, t, z = 0; @@ -474,7 +475,7 @@ static u64 vmac(unsigned char m[], unsigned int mbytes, } p = be64_to_cpup(out_p + i); h = vhash(m, mbytes, (u64 *)0, &ctx->__vmac_ctx); - return p + h; + return le64_to_cpu(p + h); } static int vmac_set_key(unsigned char user_key[], struct vmac_ctx_t *ctx) @@ -549,10 +550,6 @@ static int vmac_setkey(struct crypto_shash *parent, static int vmac_init(struct shash_desc *pdesc) { - struct crypto_shash *parent = pdesc->tfm; - struct vmac_ctx_t *ctx = crypto_shash_ctx(parent); - - memset(&ctx->__vmac_ctx, 0, sizeof(struct vmac_ctx)); return 0; } diff --git a/crypto/xor.c b/crypto/xor.c index fc5b836f343..b75182d8ab1 100644 --- a/crypto/xor.c +++ b/crypto/xor.c @@ -18,6 +18,7 @@ #define BH_TRACE 0 #include <linux/module.h> +#include <linux/gfp.h> #include <linux/raid/xor.h> #include <linux/jiffies.h> #include <asm/xor.h> diff --git a/crypto/xts.c b/crypto/xts.c index d87b0f3102c..555ecaab1e5 100644 --- a/crypto/xts.c +++ b/crypto/xts.c @@ -224,7 +224,7 @@ static struct crypto_instance *alloc(struct rtattr **tb) alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, CRYPTO_ALG_TYPE_MASK); if (IS_ERR(alg)) - return ERR_PTR(PTR_ERR(alg)); + return ERR_CAST(alg); inst = crypto_alloc_instance("xts", alg); if (IS_ERR(inst)) |