From 2021abaa00da64a4b98948c93bf31a55386cd2d0 Mon Sep 17 00:00:00 2001 From: Marek Vasut Date: Tue, 14 Jan 2014 18:31:01 +0100 Subject: crypto: dcp - Move the AES operation type from actx to rctx Move the AES operation type and mode from async crypto context to crypto request context. This allows for recycling of the async crypto context for different kinds of operations. I found this problem when I used dm-crypt, which uses the same async crypto context (actx) for both encryption and decryption requests. Since the requests are enqueued into the processing queue, immediatelly storing the type of operation into async crypto context (actx) caused corruption of this information when encryption and decryption operations followed imediatelly one after the other. When the first operation was dequeued, the second operation was already enqueued and overwritten the type of operation in actx, thus causing incorrect result of the first operation. Fix this problem by storing the type of operation into the crypto request context. Signed-off-by: Marek Vasut Cc: David S. Miller Cc: Fabio Estevam Cc: Shawn Guo Cc: Tom Lendacky Signed-off-by: Herbert Xu --- drivers/crypto/mxs-dcp.c | 27 +++++++++++++++++---------- 1 file changed, 17 insertions(+), 10 deletions(-) diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c index a6db7fa6f89..56bde65ddad 100644 --- a/drivers/crypto/mxs-dcp.c +++ b/drivers/crypto/mxs-dcp.c @@ -83,13 +83,16 @@ struct dcp_async_ctx { unsigned int hot:1; /* Crypto-specific context */ - unsigned int enc:1; - unsigned int ecb:1; struct crypto_ablkcipher *fallback; unsigned int key_len; uint8_t key[AES_KEYSIZE_128]; }; +struct dcp_aes_req_ctx { + unsigned int enc:1; + unsigned int ecb:1; +}; + struct dcp_sha_req_ctx { unsigned int init:1; unsigned int fini:1; @@ -190,10 +193,12 @@ static int mxs_dcp_start_dma(struct dcp_async_ctx *actx) /* * Encryption (AES128) */ -static int mxs_dcp_run_aes(struct dcp_async_ctx *actx, int init) +static int mxs_dcp_run_aes(struct dcp_async_ctx *actx, + struct ablkcipher_request *req, int init) { struct dcp *sdcp = global_sdcp; struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan]; + struct dcp_aes_req_ctx *rctx = ablkcipher_request_ctx(req); int ret; dma_addr_t key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key, @@ -212,14 +217,14 @@ static int mxs_dcp_run_aes(struct dcp_async_ctx *actx, int init) /* Payload contains the key. */ desc->control0 |= MXS_DCP_CONTROL0_PAYLOAD_KEY; - if (actx->enc) + if (rctx->enc) desc->control0 |= MXS_DCP_CONTROL0_CIPHER_ENCRYPT; if (init) desc->control0 |= MXS_DCP_CONTROL0_CIPHER_INIT; desc->control1 = MXS_DCP_CONTROL1_CIPHER_SELECT_AES128; - if (actx->ecb) + if (rctx->ecb) desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_ECB; else desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_CBC; @@ -247,6 +252,7 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq) struct ablkcipher_request *req = ablkcipher_request_cast(arq); struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm); + struct dcp_aes_req_ctx *rctx = ablkcipher_request_ctx(req); struct scatterlist *dst = req->dst; struct scatterlist *src = req->src; @@ -271,7 +277,7 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq) /* Copy the key from the temporary location. */ memcpy(key, actx->key, actx->key_len); - if (!actx->ecb) { + if (!rctx->ecb) { /* Copy the CBC IV just past the key. */ memcpy(key + AES_KEYSIZE_128, req->info, AES_KEYSIZE_128); /* CBC needs the INIT set. */ @@ -300,7 +306,7 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq) * submit the buffer. */ if (actx->fill == out_off || sg_is_last(src)) { - ret = mxs_dcp_run_aes(actx, init); + ret = mxs_dcp_run_aes(actx, req, init); if (ret) return ret; init = 0; @@ -391,13 +397,14 @@ static int mxs_dcp_aes_enqueue(struct ablkcipher_request *req, int enc, int ecb) struct dcp *sdcp = global_sdcp; struct crypto_async_request *arq = &req->base; struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm); + struct dcp_aes_req_ctx *rctx = ablkcipher_request_ctx(req); int ret; if (unlikely(actx->key_len != AES_KEYSIZE_128)) return mxs_dcp_block_fallback(req, enc); - actx->enc = enc; - actx->ecb = ecb; + rctx->enc = enc; + rctx->ecb = ecb; actx->chan = DCP_CHAN_CRYPTO; mutex_lock(&sdcp->mutex[actx->chan]); @@ -484,7 +491,7 @@ static int mxs_dcp_aes_fallback_init(struct crypto_tfm *tfm) return PTR_ERR(blk); actx->fallback = blk; - tfm->crt_ablkcipher.reqsize = sizeof(struct dcp_async_ctx); + tfm->crt_ablkcipher.reqsize = sizeof(struct dcp_aes_req_ctx); return 0; } -- cgit v1.2.3-18-g5258 From 0a63b09dd6e321d4488347d6283685d08ee4bb7f Mon Sep 17 00:00:00 2001 From: Nitesh Lal Date: Sun, 9 Feb 2014 09:59:13 +0800 Subject: crypto: caam - Fix job ring discovery in controller driver The SEC Controller driver creates platform devices for it's child job ring nodes. Currently the driver uses for_each_compatible routine which traverses the whole device tree to create the job rings for the platform device. The patch changes this to search for the compatible property of job ring only in the child nodes i.e., the job rings are created as per the number of children associated with the crypto node. Signed-off-by: Nitesh Lal Reviewed-by: Horia Geanta Signed-off-by: Herbert Xu --- drivers/crypto/caam/ctrl.c | 25 ++++++------------------- 1 file changed, 6 insertions(+), 19 deletions(-) diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c index 63fb1af2c43..30f434fd5de 100644 --- a/drivers/crypto/caam/ctrl.c +++ b/drivers/crypto/caam/ctrl.c @@ -443,13 +443,10 @@ static int caam_probe(struct platform_device *pdev) * for all, then go probe each one. */ rspec = 0; - for_each_compatible_node(np, NULL, "fsl,sec-v4.0-job-ring") - rspec++; - if (!rspec) { - /* for backward compatible with device trees */ - for_each_compatible_node(np, NULL, "fsl,sec4.0-job-ring") + for_each_available_child_of_node(nprop, np) + if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") || + of_device_is_compatible(np, "fsl,sec4.0-job-ring")) rspec++; - } ctrlpriv->jrpdev = kzalloc(sizeof(struct platform_device *) * rspec, GFP_KERNEL); @@ -460,18 +457,9 @@ static int caam_probe(struct platform_device *pdev) ring = 0; ctrlpriv->total_jobrs = 0; - for_each_compatible_node(np, NULL, "fsl,sec-v4.0-job-ring") { - ctrlpriv->jrpdev[ring] = - of_platform_device_create(np, NULL, dev); - if (!ctrlpriv->jrpdev[ring]) { - pr_warn("JR%d Platform device creation error\n", ring); - continue; - } - ctrlpriv->total_jobrs++; - ring++; - } - if (!ring) { - for_each_compatible_node(np, NULL, "fsl,sec4.0-job-ring") { + for_each_available_child_of_node(nprop, np) + if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") || + of_device_is_compatible(np, "fsl,sec4.0-job-ring")) { ctrlpriv->jrpdev[ring] = of_platform_device_create(np, NULL, dev); if (!ctrlpriv->jrpdev[ring]) { @@ -482,7 +470,6 @@ static int caam_probe(struct platform_device *pdev) ctrlpriv->total_jobrs++; ring++; } - } /* Check to see if QI present. If so, enable */ ctrlpriv->qi_present = !!(rd_reg64(&topregs->ctrl.perfmon.comp_parms) & -- cgit v1.2.3-18-g5258 From 80e84c16e72a0eac30085322b4664b7b6b0dde75 Mon Sep 17 00:00:00 2001 From: Dave Jones Date: Sun, 9 Feb 2014 09:59:14 +0800 Subject: crypto: ccp - Fix ccp_run_passthru_cmd dma variable assignments There are some suspicious looking lines of code in the new ccp driver, including one that assigns a variable to itself, and another that overwrites a previous assignment. This may have been a cut-and-paste error where 'src' was forgotten to be changed to 'dst'. I have no hardware to test this, so this is untested. Signed-off-by: Dave Jones Acked-by: Tom Lendacky Signed-off-by: Herbert Xu --- drivers/crypto/ccp/ccp-ops.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c index 71ed3ade7e1..c266a7b154b 100644 --- a/drivers/crypto/ccp/ccp-ops.c +++ b/drivers/crypto/ccp/ccp-ops.c @@ -1666,8 +1666,8 @@ static int ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q, op.dst.type = CCP_MEMTYPE_SYSTEM; op.dst.u.dma.address = sg_dma_address(dst.sg_wa.sg); - op.src.u.dma.offset = dst.sg_wa.sg_used; - op.src.u.dma.length = op.src.u.dma.length; + op.dst.u.dma.offset = dst.sg_wa.sg_used; + op.dst.u.dma.length = op.src.u.dma.length; ret = ccp_perform_passthru(&op); if (ret) { -- cgit v1.2.3-18-g5258 From d81ed6534fd988a8a24fb607b459444d4b3d391a Mon Sep 17 00:00:00 2001 From: Tom Lendacky Date: Fri, 24 Jan 2014 16:17:56 -0600 Subject: crypto: ccp - Allow for selective disablement of crypto API algorithms Introduce module parameters that allow for disabling of a crypto algorithm by not registering the algorithm with the crypto API. Signed-off-by: Tom Lendacky Signed-off-by: Herbert Xu --- drivers/crypto/ccp/ccp-crypto-main.c | 37 ++++++++++++++++++++++++------------ 1 file changed, 25 insertions(+), 12 deletions(-) diff --git a/drivers/crypto/ccp/ccp-crypto-main.c b/drivers/crypto/ccp/ccp-crypto-main.c index 2636f044789..b3f22b07b5b 100644 --- a/drivers/crypto/ccp/ccp-crypto-main.c +++ b/drivers/crypto/ccp/ccp-crypto-main.c @@ -11,6 +11,7 @@ */ #include +#include #include #include #include @@ -24,6 +25,14 @@ MODULE_LICENSE("GPL"); MODULE_VERSION("1.0.0"); MODULE_DESCRIPTION("AMD Cryptographic Coprocessor crypto API support"); +static unsigned int aes_disable; +module_param(aes_disable, uint, 0444); +MODULE_PARM_DESC(aes_disable, "Disable use of AES - any non-zero value"); + +static unsigned int sha_disable; +module_param(sha_disable, uint, 0444); +MODULE_PARM_DESC(sha_disable, "Disable use of SHA - any non-zero value"); + /* List heads for the supported algorithms */ static LIST_HEAD(hash_algs); @@ -337,21 +346,25 @@ static int ccp_register_algs(void) { int ret; - ret = ccp_register_aes_algs(&cipher_algs); - if (ret) - return ret; + if (!aes_disable) { + ret = ccp_register_aes_algs(&cipher_algs); + if (ret) + return ret; - ret = ccp_register_aes_cmac_algs(&hash_algs); - if (ret) - return ret; + ret = ccp_register_aes_cmac_algs(&hash_algs); + if (ret) + return ret; - ret = ccp_register_aes_xts_algs(&cipher_algs); - if (ret) - return ret; + ret = ccp_register_aes_xts_algs(&cipher_algs); + if (ret) + return ret; + } - ret = ccp_register_sha_algs(&hash_algs); - if (ret) - return ret; + if (!sha_disable) { + ret = ccp_register_sha_algs(&hash_algs); + if (ret) + return ret; + } return 0; } -- cgit v1.2.3-18-g5258 From c11baa02c5d6ea06362fa61da070af34b7706c83 Mon Sep 17 00:00:00 2001 From: Tom Lendacky Date: Fri, 24 Jan 2014 16:18:02 -0600 Subject: crypto: ccp - Move HMAC calculation down to ccp ops file Move the support to perform an HMAC calculation into the CCP operations file. This eliminates the need to perform a synchronous SHA operation used to calculate the HMAC. Signed-off-by: Tom Lendacky Signed-off-by: Herbert Xu --- drivers/crypto/ccp/ccp-crypto-sha.c | 130 +++++++----------------------------- drivers/crypto/ccp/ccp-crypto.h | 8 +-- drivers/crypto/ccp/ccp-ops.c | 104 ++++++++++++++++++++++++++++- include/linux/ccp.h | 7 ++ 4 files changed, 139 insertions(+), 110 deletions(-) diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c index 3867290b353..873f2342524 100644 --- a/drivers/crypto/ccp/ccp-crypto-sha.c +++ b/drivers/crypto/ccp/ccp-crypto-sha.c @@ -24,75 +24,10 @@ #include "ccp-crypto.h" -struct ccp_sha_result { - struct completion completion; - int err; -}; - -static void ccp_sync_hash_complete(struct crypto_async_request *req, int err) -{ - struct ccp_sha_result *result = req->data; - - if (err == -EINPROGRESS) - return; - - result->err = err; - complete(&result->completion); -} - -static int ccp_sync_hash(struct crypto_ahash *tfm, u8 *buf, - struct scatterlist *sg, unsigned int len) -{ - struct ccp_sha_result result; - struct ahash_request *req; - int ret; - - init_completion(&result.completion); - - req = ahash_request_alloc(tfm, GFP_KERNEL); - if (!req) - return -ENOMEM; - - ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, - ccp_sync_hash_complete, &result); - ahash_request_set_crypt(req, sg, buf, len); - - ret = crypto_ahash_digest(req); - if ((ret == -EINPROGRESS) || (ret == -EBUSY)) { - ret = wait_for_completion_interruptible(&result.completion); - if (!ret) - ret = result.err; - } - - ahash_request_free(req); - - return ret; -} - -static int ccp_sha_finish_hmac(struct crypto_async_request *async_req) -{ - struct ahash_request *req = ahash_request_cast(async_req); - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct ccp_ctx *ctx = crypto_ahash_ctx(tfm); - struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req); - struct scatterlist sg[2]; - unsigned int block_size = - crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); - unsigned int digest_size = crypto_ahash_digestsize(tfm); - - sg_init_table(sg, ARRAY_SIZE(sg)); - sg_set_buf(&sg[0], ctx->u.sha.opad, block_size); - sg_set_buf(&sg[1], rctx->ctx, digest_size); - - return ccp_sync_hash(ctx->u.sha.hmac_tfm, req->result, sg, - block_size + digest_size); -} - static int ccp_sha_complete(struct crypto_async_request *async_req, int ret) { struct ahash_request *req = ahash_request_cast(async_req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct ccp_ctx *ctx = crypto_ahash_ctx(tfm); struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req); unsigned int digest_size = crypto_ahash_digestsize(tfm); @@ -112,10 +47,6 @@ static int ccp_sha_complete(struct crypto_async_request *async_req, int ret) if (req->result) memcpy(req->result, rctx->ctx, digest_size); - /* If we're doing an HMAC, we need to perform that on the final op */ - if (rctx->final && ctx->u.sha.key_len) - ret = ccp_sha_finish_hmac(async_req); - e_free: sg_free_table(&rctx->data_sg); @@ -126,6 +57,7 @@ static int ccp_do_sha_update(struct ahash_request *req, unsigned int nbytes, unsigned int final) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct ccp_ctx *ctx = crypto_ahash_ctx(tfm); struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req); struct scatterlist *sg; unsigned int block_size = @@ -196,6 +128,11 @@ static int ccp_do_sha_update(struct ahash_request *req, unsigned int nbytes, rctx->cmd.u.sha.ctx_len = sizeof(rctx->ctx); rctx->cmd.u.sha.src = sg; rctx->cmd.u.sha.src_len = rctx->hash_cnt; + rctx->cmd.u.sha.opad = ctx->u.sha.key_len ? + &ctx->u.sha.opad_sg : NULL; + rctx->cmd.u.sha.opad_len = ctx->u.sha.key_len ? + ctx->u.sha.opad_count : 0; + rctx->cmd.u.sha.first = rctx->first; rctx->cmd.u.sha.final = rctx->final; rctx->cmd.u.sha.msg_bits = rctx->msg_bits; @@ -218,7 +155,6 @@ static int ccp_sha_init(struct ahash_request *req) memset(rctx, 0, sizeof(*rctx)); - memcpy(rctx->ctx, alg->init, sizeof(rctx->ctx)); rctx->type = alg->type; rctx->first = 1; @@ -261,10 +197,13 @@ static int ccp_sha_setkey(struct crypto_ahash *tfm, const u8 *key, unsigned int key_len) { struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); - struct scatterlist sg; - unsigned int block_size = - crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); - unsigned int digest_size = crypto_ahash_digestsize(tfm); + struct crypto_shash *shash = ctx->u.sha.hmac_tfm; + struct { + struct shash_desc sdesc; + char ctx[crypto_shash_descsize(shash)]; + } desc; + unsigned int block_size = crypto_shash_blocksize(shash); + unsigned int digest_size = crypto_shash_digestsize(shash); int i, ret; /* Set to zero until complete */ @@ -277,8 +216,12 @@ static int ccp_sha_setkey(struct crypto_ahash *tfm, const u8 *key, if (key_len > block_size) { /* Must hash the input key */ - sg_init_one(&sg, key, key_len); - ret = ccp_sync_hash(tfm, ctx->u.sha.key, &sg, key_len); + desc.sdesc.tfm = shash; + desc.sdesc.flags = crypto_ahash_get_flags(tfm) & + CRYPTO_TFM_REQ_MAY_SLEEP; + + ret = crypto_shash_digest(&desc.sdesc, key, key_len, + ctx->u.sha.key); if (ret) { crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; @@ -293,6 +236,9 @@ static int ccp_sha_setkey(struct crypto_ahash *tfm, const u8 *key, ctx->u.sha.opad[i] = ctx->u.sha.key[i] ^ 0x5c; } + sg_init_one(&ctx->u.sha.opad_sg, ctx->u.sha.opad, block_size); + ctx->u.sha.opad_count = block_size; + ctx->u.sha.key_len = key_len; return 0; @@ -319,10 +265,9 @@ static int ccp_hmac_sha_cra_init(struct crypto_tfm *tfm) { struct ccp_ctx *ctx = crypto_tfm_ctx(tfm); struct ccp_crypto_ahash_alg *alg = ccp_crypto_ahash_alg(tfm); - struct crypto_ahash *hmac_tfm; + struct crypto_shash *hmac_tfm; - hmac_tfm = crypto_alloc_ahash(alg->child_alg, - CRYPTO_ALG_TYPE_AHASH, 0); + hmac_tfm = crypto_alloc_shash(alg->child_alg, 0, 0); if (IS_ERR(hmac_tfm)) { pr_warn("could not load driver %s need for HMAC support\n", alg->child_alg); @@ -339,35 +284,14 @@ static void ccp_hmac_sha_cra_exit(struct crypto_tfm *tfm) struct ccp_ctx *ctx = crypto_tfm_ctx(tfm); if (ctx->u.sha.hmac_tfm) - crypto_free_ahash(ctx->u.sha.hmac_tfm); + crypto_free_shash(ctx->u.sha.hmac_tfm); ccp_sha_cra_exit(tfm); } -static const __be32 sha1_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = { - cpu_to_be32(SHA1_H0), cpu_to_be32(SHA1_H1), - cpu_to_be32(SHA1_H2), cpu_to_be32(SHA1_H3), - cpu_to_be32(SHA1_H4), 0, 0, 0, -}; - -static const __be32 sha224_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = { - cpu_to_be32(SHA224_H0), cpu_to_be32(SHA224_H1), - cpu_to_be32(SHA224_H2), cpu_to_be32(SHA224_H3), - cpu_to_be32(SHA224_H4), cpu_to_be32(SHA224_H5), - cpu_to_be32(SHA224_H6), cpu_to_be32(SHA224_H7), -}; - -static const __be32 sha256_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = { - cpu_to_be32(SHA256_H0), cpu_to_be32(SHA256_H1), - cpu_to_be32(SHA256_H2), cpu_to_be32(SHA256_H3), - cpu_to_be32(SHA256_H4), cpu_to_be32(SHA256_H5), - cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7), -}; - struct ccp_sha_def { const char *name; const char *drv_name; - const __be32 *init; enum ccp_sha_type type; u32 digest_size; u32 block_size; @@ -377,7 +301,6 @@ static struct ccp_sha_def sha_algs[] = { { .name = "sha1", .drv_name = "sha1-ccp", - .init = sha1_init, .type = CCP_SHA_TYPE_1, .digest_size = SHA1_DIGEST_SIZE, .block_size = SHA1_BLOCK_SIZE, @@ -385,7 +308,6 @@ static struct ccp_sha_def sha_algs[] = { { .name = "sha224", .drv_name = "sha224-ccp", - .init = sha224_init, .type = CCP_SHA_TYPE_224, .digest_size = SHA224_DIGEST_SIZE, .block_size = SHA224_BLOCK_SIZE, @@ -393,7 +315,6 @@ static struct ccp_sha_def sha_algs[] = { { .name = "sha256", .drv_name = "sha256-ccp", - .init = sha256_init, .type = CCP_SHA_TYPE_256, .digest_size = SHA256_DIGEST_SIZE, .block_size = SHA256_BLOCK_SIZE, @@ -460,7 +381,6 @@ static int ccp_register_sha_alg(struct list_head *head, INIT_LIST_HEAD(&ccp_alg->entry); - ccp_alg->init = def->init; ccp_alg->type = def->type; alg = &ccp_alg->alg; diff --git a/drivers/crypto/ccp/ccp-crypto.h b/drivers/crypto/ccp/ccp-crypto.h index b222231b616..9aa4ae184f7 100644 --- a/drivers/crypto/ccp/ccp-crypto.h +++ b/drivers/crypto/ccp/ccp-crypto.h @@ -137,11 +137,14 @@ struct ccp_aes_cmac_req_ctx { #define MAX_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE struct ccp_sha_ctx { + struct scatterlist opad_sg; + unsigned int opad_count; + unsigned int key_len; u8 key[MAX_SHA_BLOCK_SIZE]; u8 ipad[MAX_SHA_BLOCK_SIZE]; u8 opad[MAX_SHA_BLOCK_SIZE]; - struct crypto_ahash *hmac_tfm; + struct crypto_shash *hmac_tfm; }; struct ccp_sha_req_ctx { @@ -167,9 +170,6 @@ struct ccp_sha_req_ctx { unsigned int buf_count; u8 buf[MAX_SHA_BLOCK_SIZE]; - /* HMAC support field */ - struct scatterlist pad_sg; - /* CCP driver command */ struct ccp_cmd cmd; }; diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c index c266a7b154b..9ae006d69df 100644 --- a/drivers/crypto/ccp/ccp-ops.c +++ b/drivers/crypto/ccp/ccp-ops.c @@ -23,6 +23,7 @@ #include #include #include +#include #include "ccp-dev.h" @@ -132,6 +133,27 @@ struct ccp_op { } u; }; +/* SHA initial context values */ +static const __be32 ccp_sha1_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = { + cpu_to_be32(SHA1_H0), cpu_to_be32(SHA1_H1), + cpu_to_be32(SHA1_H2), cpu_to_be32(SHA1_H3), + cpu_to_be32(SHA1_H4), 0, 0, 0, +}; + +static const __be32 ccp_sha224_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = { + cpu_to_be32(SHA224_H0), cpu_to_be32(SHA224_H1), + cpu_to_be32(SHA224_H2), cpu_to_be32(SHA224_H3), + cpu_to_be32(SHA224_H4), cpu_to_be32(SHA224_H5), + cpu_to_be32(SHA224_H6), cpu_to_be32(SHA224_H7), +}; + +static const __be32 ccp_sha256_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = { + cpu_to_be32(SHA256_H0), cpu_to_be32(SHA256_H1), + cpu_to_be32(SHA256_H2), cpu_to_be32(SHA256_H3), + cpu_to_be32(SHA256_H4), cpu_to_be32(SHA256_H5), + cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7), +}; + /* The CCP cannot perform zero-length sha operations so the caller * is required to buffer data for the final operation. However, a * sha operation for a message with a total length of zero is valid @@ -1411,7 +1433,27 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) if (ret) return ret; - ccp_set_dm_area(&ctx, 0, sha->ctx, 0, sha->ctx_len); + if (sha->first) { + const __be32 *init; + + switch (sha->type) { + case CCP_SHA_TYPE_1: + init = ccp_sha1_init; + break; + case CCP_SHA_TYPE_224: + init = ccp_sha224_init; + break; + case CCP_SHA_TYPE_256: + init = ccp_sha256_init; + break; + default: + ret = -EINVAL; + goto e_ctx; + } + memcpy(ctx.address, init, CCP_SHA_CTXSIZE); + } else + ccp_set_dm_area(&ctx, 0, sha->ctx, 0, sha->ctx_len); + ret = ccp_copy_to_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx, CCP_PASSTHRU_BYTESWAP_256BIT); if (ret) { @@ -1451,6 +1493,66 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) ccp_get_dm_area(&ctx, 0, sha->ctx, 0, sha->ctx_len); + if (sha->final && sha->opad) { + /* HMAC operation, recursively perform final SHA */ + struct ccp_cmd hmac_cmd; + struct scatterlist sg; + u64 block_size, digest_size; + u8 *hmac_buf; + + switch (sha->type) { + case CCP_SHA_TYPE_1: + block_size = SHA1_BLOCK_SIZE; + digest_size = SHA1_DIGEST_SIZE; + break; + case CCP_SHA_TYPE_224: + block_size = SHA224_BLOCK_SIZE; + digest_size = SHA224_DIGEST_SIZE; + break; + case CCP_SHA_TYPE_256: + block_size = SHA256_BLOCK_SIZE; + digest_size = SHA256_DIGEST_SIZE; + break; + default: + ret = -EINVAL; + goto e_data; + } + + if (sha->opad_len != block_size) { + ret = -EINVAL; + goto e_data; + } + + hmac_buf = kmalloc(block_size + digest_size, GFP_KERNEL); + if (!hmac_buf) { + ret = -ENOMEM; + goto e_data; + } + sg_init_one(&sg, hmac_buf, block_size + digest_size); + + scatterwalk_map_and_copy(hmac_buf, sha->opad, 0, block_size, 0); + memcpy(hmac_buf + block_size, ctx.address, digest_size); + + memset(&hmac_cmd, 0, sizeof(hmac_cmd)); + hmac_cmd.engine = CCP_ENGINE_SHA; + hmac_cmd.u.sha.type = sha->type; + hmac_cmd.u.sha.ctx = sha->ctx; + hmac_cmd.u.sha.ctx_len = sha->ctx_len; + hmac_cmd.u.sha.src = &sg; + hmac_cmd.u.sha.src_len = block_size + digest_size; + hmac_cmd.u.sha.opad = NULL; + hmac_cmd.u.sha.opad_len = 0; + hmac_cmd.u.sha.first = 1; + hmac_cmd.u.sha.final = 1; + hmac_cmd.u.sha.msg_bits = (block_size + digest_size) << 3; + + ret = ccp_run_sha_cmd(cmd_q, &hmac_cmd); + if (ret) + cmd->engine_error = hmac_cmd.engine_error; + + kfree(hmac_buf); + } + e_data: ccp_free_data(&src, cmd_q); diff --git a/include/linux/ccp.h b/include/linux/ccp.h index b941ab9f762..ebcc9d14621 100644 --- a/include/linux/ccp.h +++ b/include/linux/ccp.h @@ -232,6 +232,9 @@ enum ccp_sha_type { * @ctx_len: length in bytes of hash value * @src: data to be used for this operation * @src_len: length in bytes of data used for this operation + * @opad: data to be used for final HMAC operation + * @opad_len: length in bytes of data used for final HMAC operation + * @first: indicates first SHA operation * @final: indicates final SHA operation * @msg_bits: total length of the message in bits used in final SHA operation * @@ -251,6 +254,10 @@ struct ccp_sha_engine { struct scatterlist *src; u64 src_len; /* In bytes */ + struct scatterlist *opad; + u32 opad_len; /* In bytes */ + + u32 first; /* Indicates first sha cmd */ u32 final; /* Indicates final sha cmd */ u64 msg_bits; /* Message length in bits required for * final sha cmd */ -- cgit v1.2.3-18-g5258 From bc3854476f36d816d52cd8d41d1ecab2f8b6cdcf Mon Sep 17 00:00:00 2001 From: Tom Lendacky Date: Fri, 24 Jan 2014 16:18:08 -0600 Subject: crypto: ccp - Use a single queue for proper ordering of tfm requests Move to a single queue to serialize requests within a tfm. When testing using IPSec with a large number of network connections the per cpu tfm queuing logic was not working properly. Signed-off-by: Tom Lendacky Signed-off-by: Herbert Xu --- drivers/crypto/ccp/ccp-crypto-main.c | 164 ++++++++++------------------------- 1 file changed, 48 insertions(+), 116 deletions(-) diff --git a/drivers/crypto/ccp/ccp-crypto-main.c b/drivers/crypto/ccp/ccp-crypto-main.c index b3f22b07b5b..010fded5d46 100644 --- a/drivers/crypto/ccp/ccp-crypto-main.c +++ b/drivers/crypto/ccp/ccp-crypto-main.c @@ -38,23 +38,20 @@ MODULE_PARM_DESC(sha_disable, "Disable use of SHA - any non-zero value"); static LIST_HEAD(hash_algs); static LIST_HEAD(cipher_algs); -/* For any tfm, requests for that tfm on the same CPU must be returned - * in the order received. With multiple queues available, the CCP can - * process more than one cmd at a time. Therefore we must maintain - * a cmd list to insure the proper ordering of requests on a given tfm/cpu - * combination. +/* For any tfm, requests for that tfm must be returned on the order + * received. With multiple queues available, the CCP can process more + * than one cmd at a time. Therefore we must maintain a cmd list to insure + * the proper ordering of requests on a given tfm. */ -struct ccp_crypto_cpu_queue { +struct ccp_crypto_queue { struct list_head cmds; struct list_head *backlog; unsigned int cmd_count; }; -#define CCP_CRYPTO_MAX_QLEN 50 +#define CCP_CRYPTO_MAX_QLEN 100 -struct ccp_crypto_percpu_queue { - struct ccp_crypto_cpu_queue __percpu *cpu_queue; -}; -static struct ccp_crypto_percpu_queue req_queue; +static struct ccp_crypto_queue req_queue; +static spinlock_t req_queue_lock; struct ccp_crypto_cmd { struct list_head entry; @@ -71,8 +68,6 @@ struct ccp_crypto_cmd { /* Used for held command processing to determine state */ int ret; - - int cpu; }; struct ccp_crypto_cpu { @@ -91,25 +86,21 @@ static inline bool ccp_crypto_success(int err) return true; } -/* - * ccp_crypto_cmd_complete must be called while running on the appropriate - * cpu and the caller must have done a get_cpu to disable preemption - */ static struct ccp_crypto_cmd *ccp_crypto_cmd_complete( struct ccp_crypto_cmd *crypto_cmd, struct ccp_crypto_cmd **backlog) { - struct ccp_crypto_cpu_queue *cpu_queue; struct ccp_crypto_cmd *held = NULL, *tmp; + unsigned long flags; *backlog = NULL; - cpu_queue = this_cpu_ptr(req_queue.cpu_queue); + spin_lock_irqsave(&req_queue_lock, flags); /* Held cmds will be after the current cmd in the queue so start * searching for a cmd with a matching tfm for submission. */ tmp = crypto_cmd; - list_for_each_entry_continue(tmp, &cpu_queue->cmds, entry) { + list_for_each_entry_continue(tmp, &req_queue.cmds, entry) { if (crypto_cmd->tfm != tmp->tfm) continue; held = tmp; @@ -120,47 +111,45 @@ static struct ccp_crypto_cmd *ccp_crypto_cmd_complete( * Because cmds can be executed from any point in the cmd list * special precautions have to be taken when handling the backlog. */ - if (cpu_queue->backlog != &cpu_queue->cmds) { + if (req_queue.backlog != &req_queue.cmds) { /* Skip over this cmd if it is the next backlog cmd */ - if (cpu_queue->backlog == &crypto_cmd->entry) - cpu_queue->backlog = crypto_cmd->entry.next; + if (req_queue.backlog == &crypto_cmd->entry) + req_queue.backlog = crypto_cmd->entry.next; - *backlog = container_of(cpu_queue->backlog, + *backlog = container_of(req_queue.backlog, struct ccp_crypto_cmd, entry); - cpu_queue->backlog = cpu_queue->backlog->next; + req_queue.backlog = req_queue.backlog->next; /* Skip over this cmd if it is now the next backlog cmd */ - if (cpu_queue->backlog == &crypto_cmd->entry) - cpu_queue->backlog = crypto_cmd->entry.next; + if (req_queue.backlog == &crypto_cmd->entry) + req_queue.backlog = crypto_cmd->entry.next; } /* Remove the cmd entry from the list of cmds */ - cpu_queue->cmd_count--; + req_queue.cmd_count--; list_del(&crypto_cmd->entry); + spin_unlock_irqrestore(&req_queue_lock, flags); + return held; } -static void ccp_crypto_complete_on_cpu(struct work_struct *work) +static void ccp_crypto_complete(void *data, int err) { - struct ccp_crypto_cpu *cpu_work = - container_of(work, struct ccp_crypto_cpu, work); - struct ccp_crypto_cmd *crypto_cmd = cpu_work->crypto_cmd; + struct ccp_crypto_cmd *crypto_cmd = data; struct ccp_crypto_cmd *held, *next, *backlog; struct crypto_async_request *req = crypto_cmd->req; struct ccp_ctx *ctx = crypto_tfm_ctx(req->tfm); - int cpu, ret; - - cpu = get_cpu(); + int ret; - if (cpu_work->err == -EINPROGRESS) { + if (err == -EINPROGRESS) { /* Only propogate the -EINPROGRESS if necessary */ if (crypto_cmd->ret == -EBUSY) { crypto_cmd->ret = -EINPROGRESS; req->complete(req, -EINPROGRESS); } - goto e_cpu; + return; } /* Operation has completed - update the queue before invoking @@ -178,7 +167,7 @@ static void ccp_crypto_complete_on_cpu(struct work_struct *work) req->complete(req, -EINPROGRESS); /* Completion callbacks */ - ret = cpu_work->err; + ret = err; if (ctx->complete) ret = ctx->complete(req, ret); req->complete(req, ret); @@ -203,52 +192,28 @@ static void ccp_crypto_complete_on_cpu(struct work_struct *work) } kfree(crypto_cmd); - -e_cpu: - put_cpu(); - - complete(&cpu_work->completion); -} - -static void ccp_crypto_complete(void *data, int err) -{ - struct ccp_crypto_cmd *crypto_cmd = data; - struct ccp_crypto_cpu cpu_work; - - INIT_WORK(&cpu_work.work, ccp_crypto_complete_on_cpu); - init_completion(&cpu_work.completion); - cpu_work.crypto_cmd = crypto_cmd; - cpu_work.err = err; - - schedule_work_on(crypto_cmd->cpu, &cpu_work.work); - - /* Keep the completion call synchronous */ - wait_for_completion(&cpu_work.completion); } static int ccp_crypto_enqueue_cmd(struct ccp_crypto_cmd *crypto_cmd) { - struct ccp_crypto_cpu_queue *cpu_queue; struct ccp_crypto_cmd *active = NULL, *tmp; - int cpu, ret; - - cpu = get_cpu(); - crypto_cmd->cpu = cpu; + unsigned long flags; + int ret; - cpu_queue = this_cpu_ptr(req_queue.cpu_queue); + spin_lock_irqsave(&req_queue_lock, flags); /* Check if the cmd can/should be queued */ - if (cpu_queue->cmd_count >= CCP_CRYPTO_MAX_QLEN) { + if (req_queue.cmd_count >= CCP_CRYPTO_MAX_QLEN) { ret = -EBUSY; if (!(crypto_cmd->cmd->flags & CCP_CMD_MAY_BACKLOG)) - goto e_cpu; + goto e_lock; } /* Look for an entry with the same tfm. If there is a cmd - * with the same tfm in the list for this cpu then the current - * cmd cannot be submitted to the CCP yet. + * with the same tfm in the list then the current cmd cannot + * be submitted to the CCP yet. */ - list_for_each_entry(tmp, &cpu_queue->cmds, entry) { + list_for_each_entry(tmp, &req_queue.cmds, entry) { if (crypto_cmd->tfm != tmp->tfm) continue; active = tmp; @@ -259,21 +224,21 @@ static int ccp_crypto_enqueue_cmd(struct ccp_crypto_cmd *crypto_cmd) if (!active) { ret = ccp_enqueue_cmd(crypto_cmd->cmd); if (!ccp_crypto_success(ret)) - goto e_cpu; + goto e_lock; } - if (cpu_queue->cmd_count >= CCP_CRYPTO_MAX_QLEN) { + if (req_queue.cmd_count >= CCP_CRYPTO_MAX_QLEN) { ret = -EBUSY; - if (cpu_queue->backlog == &cpu_queue->cmds) - cpu_queue->backlog = &crypto_cmd->entry; + if (req_queue.backlog == &req_queue.cmds) + req_queue.backlog = &crypto_cmd->entry; } crypto_cmd->ret = ret; - cpu_queue->cmd_count++; - list_add_tail(&crypto_cmd->entry, &cpu_queue->cmds); + req_queue.cmd_count++; + list_add_tail(&crypto_cmd->entry, &req_queue.cmds); -e_cpu: - put_cpu(); +e_lock: + spin_unlock_irqrestore(&req_queue_lock, flags); return ret; } @@ -387,50 +352,18 @@ static void ccp_unregister_algs(void) } } -static int ccp_init_queues(void) -{ - struct ccp_crypto_cpu_queue *cpu_queue; - int cpu; - - req_queue.cpu_queue = alloc_percpu(struct ccp_crypto_cpu_queue); - if (!req_queue.cpu_queue) - return -ENOMEM; - - for_each_possible_cpu(cpu) { - cpu_queue = per_cpu_ptr(req_queue.cpu_queue, cpu); - INIT_LIST_HEAD(&cpu_queue->cmds); - cpu_queue->backlog = &cpu_queue->cmds; - cpu_queue->cmd_count = 0; - } - - return 0; -} - -static void ccp_fini_queue(void) -{ - struct ccp_crypto_cpu_queue *cpu_queue; - int cpu; - - for_each_possible_cpu(cpu) { - cpu_queue = per_cpu_ptr(req_queue.cpu_queue, cpu); - BUG_ON(!list_empty(&cpu_queue->cmds)); - } - free_percpu(req_queue.cpu_queue); -} - static int ccp_crypto_init(void) { int ret; - ret = ccp_init_queues(); - if (ret) - return ret; + spin_lock_init(&req_queue_lock); + INIT_LIST_HEAD(&req_queue.cmds); + req_queue.backlog = &req_queue.cmds; + req_queue.cmd_count = 0; ret = ccp_register_algs(); - if (ret) { + if (ret) ccp_unregister_algs(); - ccp_fini_queue(); - } return ret; } @@ -438,7 +371,6 @@ static int ccp_crypto_init(void) static void ccp_crypto_exit(void) { ccp_unregister_algs(); - ccp_fini_queue(); } module_init(ccp_crypto_init); -- cgit v1.2.3-18-g5258 From 530abd89387b5213000b214be64fadd8ab3176a7 Mon Sep 17 00:00:00 2001 From: Tom Lendacky Date: Fri, 24 Jan 2014 16:18:14 -0600 Subject: crypto: ccp - Perform completion callbacks using a tasklet Change from scheduling work to scheduling a tasklet to perform the callback operations. Signed-off-by: Tom Lendacky Signed-off-by: Herbert Xu --- drivers/crypto/ccp/ccp-dev.c | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c index c3bc2126460..2c7816149b0 100644 --- a/drivers/crypto/ccp/ccp-dev.c +++ b/drivers/crypto/ccp/ccp-dev.c @@ -30,6 +30,11 @@ MODULE_LICENSE("GPL"); MODULE_VERSION("1.0.0"); MODULE_DESCRIPTION("AMD Cryptographic Coprocessor driver"); +struct ccp_tasklet_data { + struct completion completion; + struct ccp_cmd *cmd; +}; + static struct ccp_device *ccp_dev; static inline struct ccp_device *ccp_get_device(void) @@ -192,17 +197,23 @@ static struct ccp_cmd *ccp_dequeue_cmd(struct ccp_cmd_queue *cmd_q) return cmd; } -static void ccp_do_cmd_complete(struct work_struct *work) +static void ccp_do_cmd_complete(unsigned long data) { - struct ccp_cmd *cmd = container_of(work, struct ccp_cmd, work); + struct ccp_tasklet_data *tdata = (struct ccp_tasklet_data *)data; + struct ccp_cmd *cmd = tdata->cmd; cmd->callback(cmd->data, cmd->ret); + complete(&tdata->completion); } static int ccp_cmd_queue_thread(void *data) { struct ccp_cmd_queue *cmd_q = (struct ccp_cmd_queue *)data; struct ccp_cmd *cmd; + struct ccp_tasklet_data tdata; + struct tasklet_struct tasklet; + + tasklet_init(&tasklet, ccp_do_cmd_complete, (unsigned long)&tdata); set_current_state(TASK_INTERRUPTIBLE); while (!kthread_should_stop()) { @@ -220,8 +231,10 @@ static int ccp_cmd_queue_thread(void *data) cmd->ret = ccp_run_cmd(cmd_q, cmd); /* Schedule the completion callback */ - INIT_WORK(&cmd->work, ccp_do_cmd_complete); - schedule_work(&cmd->work); + tdata.cmd = cmd; + init_completion(&tdata.completion); + tasklet_schedule(&tasklet); + wait_for_completion(&tdata.completion); } __set_current_state(TASK_RUNNING); -- cgit v1.2.3-18-g5258 From f3de9cb1ca6ce347393c4789b3a6142f96827f18 Mon Sep 17 00:00:00 2001 From: Kevin Hao Date: Tue, 28 Jan 2014 20:17:23 +0800 Subject: crypto: talitos: init the priv->alg_list more earlier in talitos_probe() In function talitos_probe(), it will jump to err_out when getting an error in talitos_probe_irq(). Then the uninitialized list head priv->alg_list will be used in function talitos_remove(). In this case we would get a call trace like the following. So move up the initialization of priv->alg_list. Unable to handle kernel paging request for data at address 0x00000000 Faulting instruction address: 0xc0459ff4 Oops: Kernel access of bad area, sig: 11 [#1] SMP NR_CPUS=8 P1020 RDB Modules linked in: CPU: 1 PID: 1 Comm: swapper/0 Tainted: G W 3.13.0-08789-g54c0a4b46150 #33 task: cf050000 ti: cf04c000 task.ti: cf04c000 NIP: c0459ff4 LR: c0459fd4 CTR: c02f2438 REGS: cf04dcb0 TRAP: 0300 Tainted: G W (3.13.0-08789-g54c0a4b46150) MSR: 00029000 CR: 82000028 XER: 20000000 DEAR: 00000000 ESR: 00000000 GPR00: c045ac28 cf04dd60 cf050000 cf2579c0 00021000 00000000 c02f35b0 0000014e GPR08: c07e702c cf104300 c07e702c 0000014e 22000024 00000000 c0002a3c 00000000 GPR16: 00000000 00000000 00000000 00000000 00000000 00000000 c082e4e0 000000df GPR24: 00000000 00100100 00200200 cf257a2c cf0efe10 cf2579c0 cf0efe10 00000000 NIP [c0459ff4] talitos_remove+0x3c/0x1c8 LR [c0459fd4] talitos_remove+0x1c/0x1c8 Call Trace: [cf04dd60] [c07485d8] __func__.13331+0x1241c8/0x1391c0 (unreliable) [cf04dd90] [c045ac28] talitos_probe+0x244/0x998 [cf04dde0] [c0306a74] platform_drv_probe+0x28/0x68 [cf04ddf0] [c0304d38] really_probe+0x78/0x250 [cf04de10] [c030505c] __driver_attach+0xc8/0xcc [cf04de30] [c0302e98] bus_for_each_dev+0x6c/0xb8 [cf04de60] [c03043cc] bus_add_driver+0x168/0x220 [cf04de80] [c0305798] driver_register+0x88/0x130 [cf04de90] [c0002458] do_one_initcall+0x14c/0x198 [cf04df00] [c079f904] kernel_init_freeable+0x138/0x1d4 [cf04df30] [c0002a50] kernel_init+0x14/0x124 [cf04df40] [c000ec40] ret_from_kernel_thread+0x5c/0x64 Signed-off-by: Kevin Hao Signed-off-by: Herbert Xu --- drivers/crypto/talitos.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index 5967667e1a8..624b8be0c36 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -2637,6 +2637,8 @@ static int talitos_probe(struct platform_device *ofdev) if (!priv) return -ENOMEM; + INIT_LIST_HEAD(&priv->alg_list); + dev_set_drvdata(dev, priv); priv->ofdev = ofdev; @@ -2657,8 +2659,6 @@ static int talitos_probe(struct platform_device *ofdev) (unsigned long)dev); } - INIT_LIST_HEAD(&priv->alg_list); - priv->reg = of_iomap(np, 0); if (!priv->reg) { dev_err(dev, "failed to of_iomap\n"); -- cgit v1.2.3-18-g5258 From e921f0307531b27dbe34c17e8a5be5a88010d179 Mon Sep 17 00:00:00 2001 From: Fabio Estevam Date: Tue, 28 Jan 2014 22:36:11 -0200 Subject: crypto: mxs-dcp: Use devm_kzalloc() Using devm_kzalloc() can make the code cleaner. While at it, remove the devm_kzalloc error message as there is standard OOM message done by the core. Signed-off-by: Fabio Estevam Acked-by: Marek Vasut Signed-off-by: Herbert Xu --- drivers/crypto/mxs-dcp.c | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c index 56bde65ddad..30941d0c580 100644 --- a/drivers/crypto/mxs-dcp.c +++ b/drivers/crypto/mxs-dcp.c @@ -942,9 +942,8 @@ static int mxs_dcp_probe(struct platform_device *pdev) } /* Allocate coherent helper block. */ - sdcp->coh = kzalloc(sizeof(struct dcp_coherent_block), GFP_KERNEL); + sdcp->coh = devm_kzalloc(dev, sizeof(*sdcp->coh), GFP_KERNEL); if (!sdcp->coh) { - dev_err(dev, "Error allocating coherent block\n"); ret = -ENOMEM; goto err_mutex; } @@ -989,7 +988,7 @@ static int mxs_dcp_probe(struct platform_device *pdev) if (IS_ERR(sdcp->thread[DCP_CHAN_HASH_SHA])) { dev_err(dev, "Error starting SHA thread!\n"); ret = PTR_ERR(sdcp->thread[DCP_CHAN_HASH_SHA]); - goto err_free_coherent; + goto err_mutex; } sdcp->thread[DCP_CHAN_CRYPTO] = kthread_run(dcp_chan_thread_aes, @@ -1047,8 +1046,6 @@ err_destroy_aes_thread: err_destroy_sha_thread: kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]); -err_free_coherent: - kfree(sdcp->coh); err_mutex: mutex_unlock(&global_mutex); return ret; @@ -1058,8 +1055,6 @@ static int mxs_dcp_remove(struct platform_device *pdev) { struct dcp *sdcp = platform_get_drvdata(pdev); - kfree(sdcp->coh); - if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA256) crypto_unregister_ahash(&dcp_sha256_alg); -- cgit v1.2.3-18-g5258 From fecfd7f7e91fc1e82d44b0e64a6bda8133f2037b Mon Sep 17 00:00:00 2001 From: Fabio Estevam Date: Tue, 28 Jan 2014 22:36:12 -0200 Subject: crypto: mxs-dcp: Check the return value of stmp_reset_block() stmp_reset_block() may fail, so check its return value and propagate it in the case of error. Signed-off-by: Fabio Estevam Acked-by: Marek Vasut Signed-off-by: Herbert Xu --- drivers/crypto/mxs-dcp.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c index 30941d0c580..37e07067070 100644 --- a/drivers/crypto/mxs-dcp.c +++ b/drivers/crypto/mxs-dcp.c @@ -949,7 +949,9 @@ static int mxs_dcp_probe(struct platform_device *pdev) } /* Restart the DCP block. */ - stmp_reset_block(sdcp->base); + ret = stmp_reset_block(sdcp->base); + if (ret) + goto err_mutex; /* Initialize control register. */ writel(MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES | -- cgit v1.2.3-18-g5258 From d167b6e1fb8ad386b17485ca88804d14f1695805 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Thu, 30 Jan 2014 14:49:43 +0300 Subject: hwrng: cleanup in hwrng_register() My static checker complains that: drivers/char/hw_random/core.c:341 hwrng_register() warn: we tested 'old_rng' before and it was 'false' The problem is that sometimes we test "if (!old_rng)" and sometimes we test "if (must_register_misc)". The static checker knows they are equivalent but a human being reading the code could easily be confused. I have simplified the code by removing the "must_register_misc" variable and I have removed the redundant check on "if (!old_rng)". Signed-off-by: Dan Carpenter Reviewed-by: Rusty Russell Signed-off-by: Herbert Xu --- drivers/char/hw_random/core.c | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c index a0f7724852e..cf49f1c88f0 100644 --- a/drivers/char/hw_random/core.c +++ b/drivers/char/hw_random/core.c @@ -302,7 +302,6 @@ err_misc_dereg: int hwrng_register(struct hwrng *rng) { - int must_register_misc; int err = -EINVAL; struct hwrng *old_rng, *tmp; @@ -327,7 +326,6 @@ int hwrng_register(struct hwrng *rng) goto out_unlock; } - must_register_misc = (current_rng == NULL); old_rng = current_rng; if (!old_rng) { err = hwrng_init(rng); @@ -336,13 +334,11 @@ int hwrng_register(struct hwrng *rng) current_rng = rng; } err = 0; - if (must_register_misc) { + if (!old_rng) { err = register_miscdev(); if (err) { - if (!old_rng) { - hwrng_cleanup(rng); - current_rng = NULL; - } + hwrng_cleanup(rng); + current_rng = NULL; goto out_unlock; } } -- cgit v1.2.3-18-g5258 From 883619a931e9f54fca7495321b339669f11cc727 Mon Sep 17 00:00:00 2001 From: Alex Porosanu Date: Thu, 6 Feb 2014 10:27:19 +0200 Subject: crypto: caam - fix ERA retrieval function SEC ERA has to be retrieved by reading the "fsl,sec-era" property from the device tree. This property is updated/filled in by u-boot. Signed-off-by: Alex Porosanu Reviewed-by: Horia Geanta Signed-off-by: Herbert Xu --- drivers/crypto/caam/ctrl.c | 36 ++++++++++-------------------------- drivers/crypto/caam/ctrl.h | 2 +- 2 files changed, 11 insertions(+), 27 deletions(-) diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c index 30f434fd5de..1c38f86bf63 100644 --- a/drivers/crypto/caam/ctrl.c +++ b/drivers/crypto/caam/ctrl.c @@ -14,7 +14,6 @@ #include "jr.h" #include "desc_constr.h" #include "error.h" -#include "ctrl.h" /* * Descriptor to instantiate RNG State Handle 0 in normal mode and @@ -352,32 +351,17 @@ static void kick_trng(struct platform_device *pdev, int ent_delay) /** * caam_get_era() - Return the ERA of the SEC on SoC, based - * on the SEC_VID register. - * Returns the ERA number (1..4) or -ENOTSUPP if the ERA is unknown. - * @caam_id - the value of the SEC_VID register + * on "sec-era" propery in the DTS. This property is updated by u-boot. **/ -int caam_get_era(u64 caam_id) +int caam_get_era(void) { - struct sec_vid *sec_vid = (struct sec_vid *)&caam_id; - static const struct { - u16 ip_id; - u8 maj_rev; - u8 era; - } caam_eras[] = { - {0x0A10, 1, 1}, - {0x0A10, 2, 2}, - {0x0A12, 1, 3}, - {0x0A14, 1, 3}, - {0x0A14, 2, 4}, - {0x0A16, 1, 4}, - {0x0A11, 1, 4} - }; - int i; - - for (i = 0; i < ARRAY_SIZE(caam_eras); i++) - if (caam_eras[i].ip_id == sec_vid->ip_id && - caam_eras[i].maj_rev == sec_vid->maj_rev) - return caam_eras[i].era; + struct device_node *caam_node; + for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") { + const uint32_t *prop = (uint32_t *)of_get_property(caam_node, + "fsl,sec-era", + NULL); + return prop ? *prop : -ENOTSUPP; + } return -ENOTSUPP; } @@ -551,7 +535,7 @@ static int caam_probe(struct platform_device *pdev) /* Report "alive" for developer to see */ dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id, - caam_get_era(caam_id)); + caam_get_era()); dev_info(dev, "job rings = %d, qi = %d\n", ctrlpriv->total_jobrs, ctrlpriv->qi_present); diff --git a/drivers/crypto/caam/ctrl.h b/drivers/crypto/caam/ctrl.h index 980d44eaaf4..cac5402a46e 100644 --- a/drivers/crypto/caam/ctrl.h +++ b/drivers/crypto/caam/ctrl.h @@ -8,6 +8,6 @@ #define CTRL_H /* Prototypes for backend-level services exposed to APIs */ -int caam_get_era(u64 caam_id); +int caam_get_era(void); #endif /* CTRL_H */ -- cgit v1.2.3-18-g5258 From 06e5a1f29819759392239669beb2cad27059c8ec Mon Sep 17 00:00:00 2001 From: Tim Chen Date: Thu, 23 Jan 2014 03:25:47 -0800 Subject: CRC32C: Add soft module dependency to load other accelerated crc32c modules We added the soft module dependency of crc32c module alias to generic crc32c module so other hardware accelerated crc32c modules could get loaded and used before the generic version. We also renamed the crypto/crc32c.c containing the generic crc32c crypto computation to crypto/crc32c_generic.c according to convention. Signed-off-by: Tim Chen Signed-off-by: Herbert Xu --- crypto/Makefile | 2 +- crypto/crc32c.c | 172 ----------------------------------------------- crypto/crc32c_generic.c | 174 ++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 175 insertions(+), 173 deletions(-) delete mode 100644 crypto/crc32c.c create mode 100644 crypto/crc32c_generic.c diff --git a/crypto/Makefile b/crypto/Makefile index b29402a7b9b..38e64231dcd 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -81,7 +81,7 @@ obj-$(CONFIG_CRYPTO_SALSA20) += salsa20_generic.o obj-$(CONFIG_CRYPTO_DEFLATE) += deflate.o obj-$(CONFIG_CRYPTO_ZLIB) += zlib.o obj-$(CONFIG_CRYPTO_MICHAEL_MIC) += michael_mic.o -obj-$(CONFIG_CRYPTO_CRC32C) += crc32c.o +obj-$(CONFIG_CRYPTO_CRC32C) += crc32c_generic.o obj-$(CONFIG_CRYPTO_CRC32) += crc32.o obj-$(CONFIG_CRYPTO_CRCT10DIF) += crct10dif_common.o crct10dif_generic.o obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o authencesn.o diff --git a/crypto/crc32c.c b/crypto/crc32c.c deleted file mode 100644 index 06f7018c9d9..00000000000 --- a/crypto/crc32c.c +++ /dev/null @@ -1,172 +0,0 @@ -/* - * Cryptographic API. - * - * CRC32C chksum - * - *@Article{castagnoli-crc, - * author = { Guy Castagnoli and Stefan Braeuer and Martin Herrman}, - * title = {{Optimization of Cyclic Redundancy-Check Codes with 24 - * and 32 Parity Bits}}, - * journal = IEEE Transactions on Communication, - * year = {1993}, - * volume = {41}, - * number = {6}, - * pages = {}, - * month = {June}, - *} - * Used by the iSCSI driver, possibly others, and derived from the - * the iscsi-crc.c module of the linux-iscsi driver at - * http://linux-iscsi.sourceforge.net. - * - * Following the example of lib/crc32, this function is intended to be - * flexible and useful for all users. Modules that currently have their - * own crc32c, but hopefully may be able to use this one are: - * net/sctp (please add all your doco to here if you change to - * use this one!) - * - * - * Copyright (c) 2004 Cisco Systems, Inc. - * Copyright (c) 2008 Herbert Xu - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. - * - */ - -#include -#include -#include -#include -#include -#include - -#define CHKSUM_BLOCK_SIZE 1 -#define CHKSUM_DIGEST_SIZE 4 - -struct chksum_ctx { - u32 key; -}; - -struct chksum_desc_ctx { - u32 crc; -}; - -/* - * Steps through buffer one byte at at time, calculates reflected - * crc using table. - */ - -static int chksum_init(struct shash_desc *desc) -{ - struct chksum_ctx *mctx = crypto_shash_ctx(desc->tfm); - struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); - - ctx->crc = mctx->key; - - return 0; -} - -/* - * Setting the seed allows arbitrary accumulators and flexible XOR policy - * If your algorithm starts with ~0, then XOR with ~0 before you set - * the seed. - */ -static int chksum_setkey(struct crypto_shash *tfm, const u8 *key, - unsigned int keylen) -{ - struct chksum_ctx *mctx = crypto_shash_ctx(tfm); - - if (keylen != sizeof(mctx->key)) { - crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); - return -EINVAL; - } - mctx->key = le32_to_cpu(*(__le32 *)key); - return 0; -} - -static int chksum_update(struct shash_desc *desc, const u8 *data, - unsigned int length) -{ - struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); - - ctx->crc = __crc32c_le(ctx->crc, data, length); - return 0; -} - -static int chksum_final(struct shash_desc *desc, u8 *out) -{ - struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); - - *(__le32 *)out = ~cpu_to_le32p(&ctx->crc); - return 0; -} - -static int __chksum_finup(u32 *crcp, const u8 *data, unsigned int len, u8 *out) -{ - *(__le32 *)out = ~cpu_to_le32(__crc32c_le(*crcp, data, len)); - return 0; -} - -static int chksum_finup(struct shash_desc *desc, const u8 *data, - unsigned int len, u8 *out) -{ - struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); - - return __chksum_finup(&ctx->crc, data, len, out); -} - -static int chksum_digest(struct shash_desc *desc, const u8 *data, - unsigned int length, u8 *out) -{ - struct chksum_ctx *mctx = crypto_shash_ctx(desc->tfm); - - return __chksum_finup(&mctx->key, data, length, out); -} - -static int crc32c_cra_init(struct crypto_tfm *tfm) -{ - struct chksum_ctx *mctx = crypto_tfm_ctx(tfm); - - mctx->key = ~0; - return 0; -} - -static struct shash_alg alg = { - .digestsize = CHKSUM_DIGEST_SIZE, - .setkey = chksum_setkey, - .init = chksum_init, - .update = chksum_update, - .final = chksum_final, - .finup = chksum_finup, - .digest = chksum_digest, - .descsize = sizeof(struct chksum_desc_ctx), - .base = { - .cra_name = "crc32c", - .cra_driver_name = "crc32c-generic", - .cra_priority = 100, - .cra_blocksize = CHKSUM_BLOCK_SIZE, - .cra_alignmask = 3, - .cra_ctxsize = sizeof(struct chksum_ctx), - .cra_module = THIS_MODULE, - .cra_init = crc32c_cra_init, - } -}; - -static int __init crc32c_mod_init(void) -{ - return crypto_register_shash(&alg); -} - -static void __exit crc32c_mod_fini(void) -{ - crypto_unregister_shash(&alg); -} - -module_init(crc32c_mod_init); -module_exit(crc32c_mod_fini); - -MODULE_AUTHOR("Clay Haapala "); -MODULE_DESCRIPTION("CRC32c (Castagnoli) calculations wrapper for lib/crc32c"); -MODULE_LICENSE("GPL"); diff --git a/crypto/crc32c_generic.c b/crypto/crc32c_generic.c new file mode 100644 index 00000000000..d9c7beba8e5 --- /dev/null +++ b/crypto/crc32c_generic.c @@ -0,0 +1,174 @@ +/* + * Cryptographic API. + * + * CRC32C chksum + * + *@Article{castagnoli-crc, + * author = { Guy Castagnoli and Stefan Braeuer and Martin Herrman}, + * title = {{Optimization of Cyclic Redundancy-Check Codes with 24 + * and 32 Parity Bits}}, + * journal = IEEE Transactions on Communication, + * year = {1993}, + * volume = {41}, + * number = {6}, + * pages = {}, + * month = {June}, + *} + * Used by the iSCSI driver, possibly others, and derived from the + * the iscsi-crc.c module of the linux-iscsi driver at + * http://linux-iscsi.sourceforge.net. + * + * Following the example of lib/crc32, this function is intended to be + * flexible and useful for all users. Modules that currently have their + * own crc32c, but hopefully may be able to use this one are: + * net/sctp (please add all your doco to here if you change to + * use this one!) + * + * + * Copyright (c) 2004 Cisco Systems, Inc. + * Copyright (c) 2008 Herbert Xu + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ + +#include +#include +#include +#include +#include +#include + +#define CHKSUM_BLOCK_SIZE 1 +#define CHKSUM_DIGEST_SIZE 4 + +struct chksum_ctx { + u32 key; +}; + +struct chksum_desc_ctx { + u32 crc; +}; + +/* + * Steps through buffer one byte at at time, calculates reflected + * crc using table. + */ + +static int chksum_init(struct shash_desc *desc) +{ + struct chksum_ctx *mctx = crypto_shash_ctx(desc->tfm); + struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); + + ctx->crc = mctx->key; + + return 0; +} + +/* + * Setting the seed allows arbitrary accumulators and flexible XOR policy + * If your algorithm starts with ~0, then XOR with ~0 before you set + * the seed. + */ +static int chksum_setkey(struct crypto_shash *tfm, const u8 *key, + unsigned int keylen) +{ + struct chksum_ctx *mctx = crypto_shash_ctx(tfm); + + if (keylen != sizeof(mctx->key)) { + crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + mctx->key = le32_to_cpu(*(__le32 *)key); + return 0; +} + +static int chksum_update(struct shash_desc *desc, const u8 *data, + unsigned int length) +{ + struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); + + ctx->crc = __crc32c_le(ctx->crc, data, length); + return 0; +} + +static int chksum_final(struct shash_desc *desc, u8 *out) +{ + struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); + + *(__le32 *)out = ~cpu_to_le32p(&ctx->crc); + return 0; +} + +static int __chksum_finup(u32 *crcp, const u8 *data, unsigned int len, u8 *out) +{ + *(__le32 *)out = ~cpu_to_le32(__crc32c_le(*crcp, data, len)); + return 0; +} + +static int chksum_finup(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out) +{ + struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); + + return __chksum_finup(&ctx->crc, data, len, out); +} + +static int chksum_digest(struct shash_desc *desc, const u8 *data, + unsigned int length, u8 *out) +{ + struct chksum_ctx *mctx = crypto_shash_ctx(desc->tfm); + + return __chksum_finup(&mctx->key, data, length, out); +} + +static int crc32c_cra_init(struct crypto_tfm *tfm) +{ + struct chksum_ctx *mctx = crypto_tfm_ctx(tfm); + + mctx->key = ~0; + return 0; +} + +static struct shash_alg alg = { + .digestsize = CHKSUM_DIGEST_SIZE, + .setkey = chksum_setkey, + .init = chksum_init, + .update = chksum_update, + .final = chksum_final, + .finup = chksum_finup, + .digest = chksum_digest, + .descsize = sizeof(struct chksum_desc_ctx), + .base = { + .cra_name = "crc32c", + .cra_driver_name = "crc32c-generic", + .cra_priority = 100, + .cra_blocksize = CHKSUM_BLOCK_SIZE, + .cra_alignmask = 3, + .cra_ctxsize = sizeof(struct chksum_ctx), + .cra_module = THIS_MODULE, + .cra_init = crc32c_cra_init, + } +}; + +static int __init crc32c_mod_init(void) +{ + return crypto_register_shash(&alg); +} + +static void __exit crc32c_mod_fini(void) +{ + crypto_unregister_shash(&alg); +} + +module_init(crc32c_mod_init); +module_exit(crc32c_mod_fini); + +MODULE_AUTHOR("Clay Haapala "); +MODULE_DESCRIPTION("CRC32c (Castagnoli) calculations wrapper for lib/crc32c"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("crc32c"); +MODULE_SOFTDEP("pre: crc32c"); -- cgit v1.2.3-18-g5258 From 32af1e180f53fd6c60355b24a5fa9bc45dfa6fcc Mon Sep 17 00:00:00 2001 From: Jingoo Han Date: Wed, 12 Feb 2014 13:28:59 +0900 Subject: crypto: picoxcell - Use devm_ioremap_resource() Use devm_ioremap_resource() in order to make the code simpler, and remove redundant return value check of platform_get_resource() because the value is checked by devm_ioremap_resource(). Signed-off-by: Jingoo Han Signed-off-by: Herbert Xu --- drivers/crypto/picoxcell_crypto.c | 16 +++++----------- 1 file changed, 5 insertions(+), 11 deletions(-) diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c index a6175ba6d23..5da5b98b8f2 100644 --- a/drivers/crypto/picoxcell_crypto.c +++ b/drivers/crypto/picoxcell_crypto.c @@ -1720,22 +1720,16 @@ static int spacc_probe(struct platform_device *pdev) engine->name = dev_name(&pdev->dev); mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + engine->regs = devm_ioremap_resource(&pdev->dev, mem); + if (IS_ERR(engine->regs)) + return PTR_ERR(engine->regs); + irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); - if (!mem || !irq) { + if (!irq) { dev_err(&pdev->dev, "no memory/irq resource for engine\n"); return -ENXIO; } - if (!devm_request_mem_region(&pdev->dev, mem->start, resource_size(mem), - engine->name)) - return -ENOMEM; - - engine->regs = devm_ioremap(&pdev->dev, mem->start, resource_size(mem)); - if (!engine->regs) { - dev_err(&pdev->dev, "memory map failed\n"); - return -ENOMEM; - } - if (devm_request_irq(&pdev->dev, irq->start, spacc_spacc_irq, 0, engine->name, engine)) { dev_err(engine->dev, "failed to request IRQ\n"); -- cgit v1.2.3-18-g5258 From 9e95275cf351ebbb02316addfa2d8d87173a4cd7 Mon Sep 17 00:00:00 2001 From: Jingoo Han Date: Wed, 12 Feb 2014 13:23:37 +0900 Subject: crypto: sahara - Use devm_ioremap_resource() Use devm_ioremap_resource() in order to make the code simpler, and remove redundant return value check of platform_get_resource() because the value is checked by devm_ioremap_resource(). Signed-off-by: Jingoo Han Signed-off-by: Herbert Xu --- drivers/crypto/sahara.c | 19 +++---------------- 1 file changed, 3 insertions(+), 16 deletions(-) diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c index 785a9ded7bd..894468fdb02 100644 --- a/drivers/crypto/sahara.c +++ b/drivers/crypto/sahara.c @@ -885,22 +885,9 @@ static int sahara_probe(struct platform_device *pdev) /* Get the base address */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - dev_err(&pdev->dev, "failed to get memory region resource\n"); - return -ENODEV; - } - - if (devm_request_mem_region(&pdev->dev, res->start, - resource_size(res), SAHARA_NAME) == NULL) { - dev_err(&pdev->dev, "failed to request memory region\n"); - return -ENOENT; - } - dev->regs_base = devm_ioremap(&pdev->dev, res->start, - resource_size(res)); - if (!dev->regs_base) { - dev_err(&pdev->dev, "failed to ioremap address region\n"); - return -ENOENT; - } + dev->regs_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(dev->regs_base)) + return PTR_ERR(dev->regs_base); /* Get the IRQ */ irq = platform_get_irq(pdev, 0); -- cgit v1.2.3-18-g5258 From 0fdefe2c907610f3efb3163c8b72127007e282aa Mon Sep 17 00:00:00 2001 From: Jingoo Han Date: Wed, 12 Feb 2014 13:24:57 +0900 Subject: crypto: s5p-sss - Use devm_ioremap_resource() Use devm_ioremap_resource() in order to make the code simpler, and remove redundant return value check of platform_get_resource() because the value is checked by devm_ioremap_resource(). Signed-off-by: Jingoo Han Signed-off-by: Herbert Xu --- drivers/crypto/s5p-sss.c | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c index cf149b19ff4..be45762f390 100644 --- a/drivers/crypto/s5p-sss.c +++ b/drivers/crypto/s5p-sss.c @@ -568,17 +568,14 @@ static int s5p_aes_probe(struct platform_device *pdev) if (s5p_dev) return -EEXIST; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) - return -ENODEV; - pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); if (!pdata) return -ENOMEM; - if (!devm_request_mem_region(dev, res->start, - resource_size(res), pdev->name)) - return -EBUSY; + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + pdata->ioaddr = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(pdata->ioaddr)) + return PTR_ERR(pdata->ioaddr); pdata->clk = devm_clk_get(dev, "secss"); if (IS_ERR(pdata->clk)) { @@ -589,8 +586,6 @@ static int s5p_aes_probe(struct platform_device *pdev) clk_enable(pdata->clk); spin_lock_init(&pdata->lock); - pdata->ioaddr = devm_ioremap(dev, res->start, - resource_size(res)); pdata->irq_hash = platform_get_irq_byname(pdev, "hash"); if (pdata->irq_hash < 0) { -- cgit v1.2.3-18-g5258 From bfaff75b09c42d8fc57b020034e836fe9048f8b1 Mon Sep 17 00:00:00 2001 From: Jingoo Han Date: Wed, 12 Feb 2014 14:17:08 +0900 Subject: hwrng: atmel - Use devm_ioremap_resource() Use devm_ioremap_resource() in order to make the code simpler, and remove redundant return value check of platform_get_resource() because the value is checked by devm_ioremap_resource(). Signed-off-by: Jingoo Han Acked-by: Peter Korsgaard Acked-by: Nicolas Ferre Signed-off-by: Herbert Xu --- drivers/char/hw_random/atmel-rng.c | 15 ++++----------- 1 file changed, 4 insertions(+), 11 deletions(-) diff --git a/drivers/char/hw_random/atmel-rng.c b/drivers/char/hw_random/atmel-rng.c index bf9fc6b7932..dfeddf2c00b 100644 --- a/dri