aboutsummaryrefslogtreecommitdiff
path: root/drivers/crypto/mv_cesa.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto/mv_cesa.c')
-rw-r--r--drivers/crypto/mv_cesa.c125
1 files changed, 94 insertions, 31 deletions
diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c
index 0d40cf66b3c..29d0ee50490 100644
--- a/drivers/crypto/mv_cesa.c
+++ b/drivers/crypto/mv_cesa.c
@@ -16,13 +16,18 @@
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/module.h>
+#include <linux/clk.h>
#include <crypto/internal/hash.h>
#include <crypto/sha.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/of_irq.h>
#include "mv_cesa.h"
#define MV_CESA "MV-CESA:"
#define MAX_HW_HASH_SIZE 0xFFFF
+#define MV_CESA_EXPIRE 500 /* msec */
/*
* STM:
@@ -79,12 +84,14 @@ struct crypto_priv {
void __iomem *reg;
void __iomem *sram;
int irq;
+ struct clk *clk;
struct task_struct *queue_th;
/* the lock protects queue and eng_st */
spinlock_t lock;
struct crypto_queue queue;
enum engine_status eng_st;
+ struct timer_list completion_timer;
struct crypto_async_request *cur_req;
struct req_progress p;
int max_req_size;
@@ -136,6 +143,29 @@ struct mv_req_hash_ctx {
int count_add;
};
+static void mv_completion_timer_callback(unsigned long unused)
+{
+ int active = readl(cpg->reg + SEC_ACCEL_CMD) & SEC_CMD_EN_SEC_ACCL0;
+
+ printk(KERN_ERR MV_CESA
+ "completion timer expired (CESA %sactive), cleaning up.\n",
+ active ? "" : "in");
+
+ del_timer(&cpg->completion_timer);
+ writel(SEC_CMD_DISABLE_SEC, cpg->reg + SEC_ACCEL_CMD);
+ while(readl(cpg->reg + SEC_ACCEL_CMD) & SEC_CMD_DISABLE_SEC)
+ printk(KERN_INFO MV_CESA "%s: waiting for engine finishing\n", __func__);
+ cpg->eng_st = ENGINE_W_DEQUEUE;
+ wake_up_process(cpg->queue_th);
+}
+
+static void mv_setup_timer(void)
+{
+ setup_timer(&cpg->completion_timer, &mv_completion_timer_callback, 0);
+ mod_timer(&cpg->completion_timer,
+ jiffies + msecs_to_jiffies(MV_CESA_EXPIRE));
+}
+
static void compute_aes_dec_key(struct mv_ctx *ctx)
{
struct crypto_aes_ctx gen_aes_key;
@@ -271,12 +301,8 @@ static void mv_process_current_q(int first_block)
sizeof(struct sec_accel_config));
/* GO */
+ mv_setup_timer();
writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD);
-
- /*
- * XXX: add timer if the interrupt does not occur for some mystery
- * reason
- */
}
static void mv_crypto_algo_completion(void)
@@ -355,12 +381,8 @@ static void mv_process_hash_current(int first_block)
memcpy(cpg->sram + SRAM_CONFIG, &op, sizeof(struct sec_accel_config));
/* GO */
+ mv_setup_timer();
writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD);
-
- /*
- * XXX: add timer if the interrupt does not occur for some mystery
- * reason
- */
}
static inline int mv_hash_import_sha1_ctx(const struct mv_req_hash_ctx *ctx,
@@ -404,6 +426,15 @@ out:
return rc;
}
+static void mv_save_digest_state(struct mv_req_hash_ctx *ctx)
+{
+ ctx->state[0] = readl(cpg->reg + DIGEST_INITIAL_VAL_A);
+ ctx->state[1] = readl(cpg->reg + DIGEST_INITIAL_VAL_B);
+ ctx->state[2] = readl(cpg->reg + DIGEST_INITIAL_VAL_C);
+ ctx->state[3] = readl(cpg->reg + DIGEST_INITIAL_VAL_D);
+ ctx->state[4] = readl(cpg->reg + DIGEST_INITIAL_VAL_E);
+}
+
static void mv_hash_algo_completion(void)
{
struct ahash_request *req = ahash_request_cast(cpg->cur_req);
@@ -418,14 +449,12 @@ static void mv_hash_algo_completion(void)
memcpy(req->result, cpg->sram + SRAM_DIGEST_BUF,
crypto_ahash_digestsize(crypto_ahash_reqtfm
(req)));
- } else
+ } else {
+ mv_save_digest_state(ctx);
mv_hash_final_fallback(req);
+ }
} else {
- ctx->state[0] = readl(cpg->reg + DIGEST_INITIAL_VAL_A);
- ctx->state[1] = readl(cpg->reg + DIGEST_INITIAL_VAL_B);
- ctx->state[2] = readl(cpg->reg + DIGEST_INITIAL_VAL_C);
- ctx->state[3] = readl(cpg->reg + DIGEST_INITIAL_VAL_D);
- ctx->state[4] = readl(cpg->reg + DIGEST_INITIAL_VAL_E);
+ mv_save_digest_state(ctx);
}
}
@@ -593,8 +622,8 @@ static int queue_manag(void *data)
}
if (async_req) {
- if (async_req->tfm->__crt_alg->cra_type !=
- &crypto_ahash_type) {
+ if (crypto_tfm_alg_type(async_req->tfm) !=
+ CRYPTO_ALG_TYPE_AHASH) {
struct ablkcipher_request *req =
ablkcipher_request_cast(async_req);
mv_start_new_crypt_req(req);
@@ -814,7 +843,7 @@ static int mv_hash_setkey(struct crypto_ahash *tfm, const u8 * key,
static int mv_cra_hash_init(struct crypto_tfm *tfm, const char *base_hash_name,
enum hash_op op, int count_add)
{
- const char *fallback_driver_name = tfm->__crt_alg->cra_name;
+ const char *fallback_driver_name = crypto_tfm_alg_name(tfm);
struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(tfm);
struct crypto_shash *fallback_tfm = NULL;
struct crypto_shash *base_hash = NULL;
@@ -878,7 +907,7 @@ static int mv_cra_hash_hmac_sha1_init(struct crypto_tfm *tfm)
return mv_cra_hash_init(tfm, "sha1", COP_HMAC_SHA1, SHA1_BLOCK_SIZE);
}
-irqreturn_t crypto_int(int irq, void *priv)
+static irqreturn_t crypto_int(int irq, void *priv)
{
u32 val;
@@ -886,6 +915,10 @@ irqreturn_t crypto_int(int irq, void *priv)
if (!(val & SEC_INT_ACCEL0_DONE))
return IRQ_NONE;
+ if (!del_timer(&cpg->completion_timer)) {
+ printk(KERN_WARNING MV_CESA
+ "got an interrupt but no pending timer?\n");
+ }
val &= ~SEC_INT_ACCEL0_DONE;
writel(val, cpg->reg + FPGA_INT_STATUS);
writel(val, cpg->reg + SEC_ACCEL_INT_STATUS);
@@ -895,11 +928,12 @@ irqreturn_t crypto_int(int irq, void *priv)
return IRQ_HANDLED;
}
-struct crypto_alg mv_aes_alg_ecb = {
+static struct crypto_alg mv_aes_alg_ecb = {
.cra_name = "ecb(aes)",
.cra_driver_name = "mv-ecb-aes",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
.cra_blocksize = 16,
.cra_ctxsize = sizeof(struct mv_ctx),
.cra_alignmask = 0,
@@ -917,11 +951,12 @@ struct crypto_alg mv_aes_alg_ecb = {
},
};
-struct crypto_alg mv_aes_alg_cbc = {
+static struct crypto_alg mv_aes_alg_cbc = {
.cra_name = "cbc(aes)",
.cra_driver_name = "mv-cbc-aes",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct mv_ctx),
.cra_alignmask = 0,
@@ -940,7 +975,7 @@ struct crypto_alg mv_aes_alg_cbc = {
},
};
-struct ahash_alg mv_sha1_alg = {
+static struct ahash_alg mv_sha1_alg = {
.init = mv_hash_init,
.update = mv_hash_update,
.final = mv_hash_final,
@@ -953,7 +988,8 @@ struct ahash_alg mv_sha1_alg = {
.cra_driver_name = "mv-sha1",
.cra_priority = 300,
.cra_flags =
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct mv_tfm_hash_ctx),
.cra_init = mv_cra_hash_sha1_init,
@@ -963,7 +999,7 @@ struct ahash_alg mv_sha1_alg = {
}
};
-struct ahash_alg mv_hmac_sha1_alg = {
+static struct ahash_alg mv_hmac_sha1_alg = {
.init = mv_hash_init,
.update = mv_hash_update,
.final = mv_hash_final,
@@ -977,7 +1013,8 @@ struct ahash_alg mv_hmac_sha1_alg = {
.cra_driver_name = "mv-hmac-sha1",
.cra_priority = 300,
.cra_flags =
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct mv_tfm_hash_ctx),
.cra_init = mv_cra_hash_hmac_sha1_init,
@@ -1028,7 +1065,10 @@ static int mv_probe(struct platform_device *pdev)
goto err_unmap_reg;
}
- irq = platform_get_irq(pdev, 0);
+ if (pdev->dev.of_node)
+ irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
+ else
+ irq = platform_get_irq(pdev, 0);
if (irq < 0 || irq == NO_IRQ) {
ret = irq;
goto err_unmap_sram;
@@ -1044,11 +1084,18 @@ static int mv_probe(struct platform_device *pdev)
goto err_unmap_sram;
}
- ret = request_irq(irq, crypto_int, IRQF_DISABLED, dev_name(&pdev->dev),
+ ret = request_irq(irq, crypto_int, 0, dev_name(&pdev->dev),
cp);
if (ret)
goto err_thread;
+ /* Not all platforms can gate the clock, so it is not
+ an error if the clock does not exists. */
+ cp->clk = clk_get(&pdev->dev, NULL);
+ if (!IS_ERR(cp->clk))
+ clk_prepare_enable(cp->clk);
+
+ writel(0, cpg->reg + SEC_ACCEL_INT_STATUS);
writel(SEC_INT_ACCEL0_DONE, cpg->reg + SEC_ACCEL_INT_MASK);
writel(SEC_CFG_STOP_DIG_ERR, cpg->reg + SEC_ACCEL_CFG);
writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0);
@@ -1086,6 +1133,10 @@ err_unreg_ecb:
crypto_unregister_alg(&mv_aes_alg_ecb);
err_irq:
free_irq(irq, cp);
+ if (!IS_ERR(cp->clk)) {
+ clk_disable_unprepare(cp->clk);
+ clk_put(cp->clk);
+ }
err_thread:
kthread_stop(cp->queue_th);
err_unmap_sram:
@@ -1095,7 +1146,6 @@ err_unmap_reg:
err:
kfree(cp);
cpg = NULL;
- platform_set_drvdata(pdev, NULL);
return ret;
}
@@ -1114,17 +1164,30 @@ static int mv_remove(struct platform_device *pdev)
memset(cp->sram, 0, cp->sram_size);
iounmap(cp->sram);
iounmap(cp->reg);
+
+ if (!IS_ERR(cp->clk)) {
+ clk_disable_unprepare(cp->clk);
+ clk_put(cp->clk);
+ }
+
kfree(cp);
cpg = NULL;
return 0;
}
+static const struct of_device_id mv_cesa_of_match_table[] = {
+ { .compatible = "marvell,orion-crypto", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, mv_cesa_of_match_table);
+
static struct platform_driver marvell_crypto = {
.probe = mv_probe,
.remove = mv_remove,
.driver = {
.owner = THIS_MODULE,
.name = "mv_crypto",
+ .of_match_table = mv_cesa_of_match_table,
},
};
MODULE_ALIAS("platform:mv_crypto");