diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-09-07 14:31:18 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-09-07 14:31:18 -0700 |
commit | 6be48f2940af9ea8d93c23a0dd8e322672c92efd (patch) | |
tree | 1bdc85a9d3fd0c19e108ea27a29a83ef2b44f5d0 /drivers/crypto/nx/nx-sha512.c | |
parent | 0ffb01d9def22f1954e99529b7e4ded497b2e88b (diff) | |
parent | 68411521cc6055edc6274e03ab3210a5893533ba (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto update from Herbert Xu:
"Here is the crypto update for 3.12:
- Added MODULE_SOFTDEP to allow pre-loading of modules.
- Reinstated crct10dif driver using the module softdep feature.
- Allow via rng driver to be auto-loaded.
- Split large input data when necessary in nx.
- Handle zero length messages correctly for GCM/XCBC in nx.
- Handle SHA-2 chunks bigger than block size properly in nx.
- Handle unaligned lengths in omap-aes.
- Added SHA384/SHA512 to omap-sham.
- Added OMAP5/AM43XX SHAM support.
- Added OMAP4 TRNG support.
- Misc fixes"
* git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (66 commits)
Reinstate "crypto: crct10dif - Wrap crc_t10dif function all to use crypto transform framework"
hwrng: via - Add MODULE_DEVICE_TABLE
crypto: fcrypt - Fix bitoperation for compilation with clang
crypto: nx - fix SHA-2 for chunks bigger than block size
crypto: nx - fix GCM for zero length messages
crypto: nx - fix XCBC for zero length messages
crypto: nx - fix limits to sg lists for AES-CCM
crypto: nx - fix limits to sg lists for AES-XCBC
crypto: nx - fix limits to sg lists for AES-GCM
crypto: nx - fix limits to sg lists for AES-CTR
crypto: nx - fix limits to sg lists for AES-CBC
crypto: nx - fix limits to sg lists for AES-ECB
crypto: nx - add offset to nx_build_sg_lists()
padata - Register hotcpu notifier after initialization
padata - share code between CPU_ONLINE and CPU_DOWN_FAILED, same to CPU_DOWN_PREPARE and CPU_UP_CANCELED
hwrng: omap - reorder OMAP TRNG driver code
crypto: omap-sham - correct dma burst size
crypto: omap-sham - Enable Polling mode if DMA fails
crypto: tegra-aes - bitwise vs logical and
crypto: sahara - checking the wrong variable
...
Diffstat (limited to 'drivers/crypto/nx/nx-sha512.c')
-rw-r--r-- | drivers/crypto/nx/nx-sha512.c | 131 |
1 files changed, 83 insertions, 48 deletions
diff --git a/drivers/crypto/nx/nx-sha512.c b/drivers/crypto/nx/nx-sha512.c index 08eee112234..4ae5b0f221d 100644 --- a/drivers/crypto/nx/nx-sha512.c +++ b/drivers/crypto/nx/nx-sha512.c @@ -55,73 +55,93 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data, struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; struct nx_sg *in_sg; - u64 to_process, leftover, spbc_bits; + u64 to_process, leftover, total, spbc_bits; + u32 max_sg_len; + unsigned long irq_flags; int rc = 0; - if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) { - /* we've hit the nx chip previously and we're updating again, - * so copy over the partial digest */ - memcpy(csbcpb->cpb.sha512.input_partial_digest, - csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE); - } + spin_lock_irqsave(&nx_ctx->lock, irq_flags); /* 2 cases for total data len: - * 1: <= SHA512_BLOCK_SIZE: copy into state, return 0 - * 2: > SHA512_BLOCK_SIZE: process X blocks, copy in leftover + * 1: < SHA512_BLOCK_SIZE: copy into state, return 0 + * 2: >= SHA512_BLOCK_SIZE: process X blocks, copy in leftover */ - if ((u64)len + sctx->count[0] < SHA512_BLOCK_SIZE) { + total = sctx->count[0] + len; + if (total < SHA512_BLOCK_SIZE) { memcpy(sctx->buf + sctx->count[0], data, len); sctx->count[0] += len; goto out; } - /* to_process: the SHA512_BLOCK_SIZE data chunk to process in this - * update */ - to_process = (sctx->count[0] + len) & ~(SHA512_BLOCK_SIZE - 1); - leftover = (sctx->count[0] + len) & (SHA512_BLOCK_SIZE - 1); - - if (sctx->count[0]) { - in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)sctx->buf, - sctx->count[0], nx_ctx->ap->sglen); - in_sg = nx_build_sg_list(in_sg, (u8 *)data, + in_sg = nx_ctx->in_sg; + max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg), + nx_ctx->ap->sglen); + + do { + /* + * to_process: the SHA512_BLOCK_SIZE data chunk to process in + * this update. This value is also restricted by the sg list + * limits. + */ + to_process = min_t(u64, total, nx_ctx->ap->databytelen); + to_process = min_t(u64, to_process, + NX_PAGE_SIZE * (max_sg_len - 1)); + to_process = to_process & ~(SHA512_BLOCK_SIZE - 1); + leftover = total - to_process; + + if (sctx->count[0]) { + in_sg = nx_build_sg_list(nx_ctx->in_sg, + (u8 *) sctx->buf, + sctx->count[0], max_sg_len); + } + in_sg = nx_build_sg_list(in_sg, (u8 *) data, to_process - sctx->count[0], - nx_ctx->ap->sglen); - nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * - sizeof(struct nx_sg); - } else { - in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)data, - to_process, nx_ctx->ap->sglen); + max_sg_len); nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); - } - NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; + if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) { + /* + * we've hit the nx chip previously and we're updating + * again, so copy over the partial digest. + */ + memcpy(csbcpb->cpb.sha512.input_partial_digest, + csbcpb->cpb.sha512.message_digest, + SHA512_DIGEST_SIZE); + } - if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) { - rc = -EINVAL; - goto out; - } - - rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, - desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP); - if (rc) - goto out; + NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; + if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) { + rc = -EINVAL; + goto out; + } + + rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, + desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP); + if (rc) + goto out; + + atomic_inc(&(nx_ctx->stats->sha512_ops)); + spbc_bits = csbcpb->cpb.sha512.spbc * 8; + csbcpb->cpb.sha512.message_bit_length_lo += spbc_bits; + if (csbcpb->cpb.sha512.message_bit_length_lo < spbc_bits) + csbcpb->cpb.sha512.message_bit_length_hi++; + + /* everything after the first update is continuation */ + NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; - atomic_inc(&(nx_ctx->stats->sha512_ops)); + total -= to_process; + data += to_process - sctx->count[0]; + sctx->count[0] = 0; + in_sg = nx_ctx->in_sg; + } while (leftover >= SHA512_BLOCK_SIZE); /* copy the leftover back into the state struct */ if (leftover) - memcpy(sctx->buf, data + len - leftover, leftover); + memcpy(sctx->buf, data, leftover); sctx->count[0] = leftover; - - spbc_bits = csbcpb->cpb.sha512.spbc * 8; - csbcpb->cpb.sha512.message_bit_length_lo += spbc_bits; - if (csbcpb->cpb.sha512.message_bit_length_lo < spbc_bits) - csbcpb->cpb.sha512.message_bit_length_hi++; - - /* everything after the first update is continuation */ - NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; out: + spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); return rc; } @@ -131,9 +151,15 @@ static int nx_sha512_final(struct shash_desc *desc, u8 *out) struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; struct nx_sg *in_sg, *out_sg; + u32 max_sg_len; u64 count0; + unsigned long irq_flags; int rc; + spin_lock_irqsave(&nx_ctx->lock, irq_flags); + + max_sg_len = min_t(u32, nx_driver.of.max_sg_len, nx_ctx->ap->sglen); + if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) { /* we've hit the nx chip previously, now we're finalizing, * so copy over the partial digest */ @@ -152,9 +178,9 @@ static int nx_sha512_final(struct shash_desc *desc, u8 *out) csbcpb->cpb.sha512.message_bit_length_hi++; in_sg = nx_build_sg_list(nx_ctx->in_sg, sctx->buf, sctx->count[0], - nx_ctx->ap->sglen); + max_sg_len); out_sg = nx_build_sg_list(nx_ctx->out_sg, out, SHA512_DIGEST_SIZE, - nx_ctx->ap->sglen); + max_sg_len); nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); @@ -174,6 +200,7 @@ static int nx_sha512_final(struct shash_desc *desc, u8 *out) memcpy(out, csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE); out: + spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); return rc; } @@ -183,6 +210,9 @@ static int nx_sha512_export(struct shash_desc *desc, void *out) struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; struct sha512_state *octx = out; + unsigned long irq_flags; + + spin_lock_irqsave(&nx_ctx->lock, irq_flags); /* move message_bit_length (128 bits) into count and convert its value * to bytes */ @@ -214,6 +244,7 @@ static int nx_sha512_export(struct shash_desc *desc, void *out) octx->state[7] = SHA512_H7; } + spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); return 0; } @@ -223,6 +254,9 @@ static int nx_sha512_import(struct shash_desc *desc, const void *in) struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; const struct sha512_state *ictx = in; + unsigned long irq_flags; + + spin_lock_irqsave(&nx_ctx->lock, irq_flags); memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf)); sctx->count[0] = ictx->count[0] & 0x3f; @@ -240,6 +274,7 @@ static int nx_sha512_import(struct shash_desc *desc, const void *in) NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; } + spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); return 0; } |