diff options
Diffstat (limited to 'drivers/crypto/mv_cesa.c')
| -rw-r--r-- | drivers/crypto/mv_cesa.c | 230 | 
1 files changed, 138 insertions, 92 deletions
diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c index 7d279e578df..29d0ee50490 100644 --- a/drivers/crypto/mv_cesa.c +++ b/drivers/crypto/mv_cesa.c @@ -15,13 +15,19 @@  #include <linux/platform_device.h>  #include <linux/scatterlist.h>  #include <linux/slab.h> +#include <linux/module.h> +#include <linux/clk.h>  #include <crypto/internal/hash.h>  #include <crypto/sha.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/of_irq.h>  #include "mv_cesa.h"  #define MV_CESA	"MV-CESA:"  #define MAX_HW_HASH_SIZE	0xFFFF +#define MV_CESA_EXPIRE		500 /* msec */  /*   * STM: @@ -78,12 +84,14 @@ struct crypto_priv {  	void __iomem *reg;  	void __iomem *sram;  	int irq; +	struct clk *clk;  	struct task_struct *queue_th;  	/* the lock protects queue and eng_st */  	spinlock_t lock;  	struct crypto_queue queue;  	enum engine_status eng_st; +	struct timer_list completion_timer;  	struct crypto_async_request *cur_req;  	struct req_progress p;  	int max_req_size; @@ -133,9 +141,31 @@ struct mv_req_hash_ctx {  	int extra_bytes;	/* unprocessed bytes in buffer */  	enum hash_op op;  	int count_add; -	struct scatterlist dummysg;  }; +static void mv_completion_timer_callback(unsigned long unused) +{ +	int active = readl(cpg->reg + SEC_ACCEL_CMD) & SEC_CMD_EN_SEC_ACCL0; + +	printk(KERN_ERR MV_CESA +	       "completion timer expired (CESA %sactive), cleaning up.\n", +	       active ? "" : "in"); + +	del_timer(&cpg->completion_timer); +	writel(SEC_CMD_DISABLE_SEC, cpg->reg + SEC_ACCEL_CMD); +	while(readl(cpg->reg + SEC_ACCEL_CMD) & SEC_CMD_DISABLE_SEC) +		printk(KERN_INFO MV_CESA "%s: waiting for engine finishing\n", __func__); +	cpg->eng_st = ENGINE_W_DEQUEUE; +	wake_up_process(cpg->queue_th); +} + +static void mv_setup_timer(void) +{ +	setup_timer(&cpg->completion_timer, &mv_completion_timer_callback, 0); +	mod_timer(&cpg->completion_timer, +			jiffies + msecs_to_jiffies(MV_CESA_EXPIRE)); +} +  static void compute_aes_dec_key(struct mv_ctx *ctx)  {  	struct crypto_aes_ctx gen_aes_key; @@ -187,9 +217,9 @@ static void copy_src_to_buf(struct req_progress *p, char *dbuf, int len)  {  	int ret;  	void *sbuf; -	int copied = 0; +	int copy_len; -	while (1) { +	while (len) {  		if (!p->sg_src_left) {  			ret = sg_miter_next(&p->src_sg_it);  			BUG_ON(!ret); @@ -199,19 +229,14 @@ static void copy_src_to_buf(struct req_progress *p, char *dbuf, int len)  		sbuf = p->src_sg_it.addr + p->src_start; -		if (p->sg_src_left <= len - copied) { -			memcpy(dbuf + copied, sbuf, p->sg_src_left); -			copied += p->sg_src_left; -			p->sg_src_left = 0; -			if (copied >= len) -				break; -		} else { -			int copy_len = len - copied; -			memcpy(dbuf + copied, sbuf, copy_len); -			p->src_start += copy_len; -			p->sg_src_left -= copy_len; -			break; -		} +		copy_len = min(p->sg_src_left, len); +		memcpy(dbuf, sbuf, copy_len); + +		p->src_start += copy_len; +		p->sg_src_left -= copy_len; + +		len -= copy_len; +		dbuf += copy_len;  	}  } @@ -275,14 +300,9 @@ static void mv_process_current_q(int first_block)  	memcpy(cpg->sram + SRAM_CONFIG, &op,  			sizeof(struct sec_accel_config)); -	writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0);  	/* GO */ +	mv_setup_timer();  	writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD); - -	/* -	 * XXX: add timer if the interrupt does not occur for some mystery -	 * reason -	 */  }  static void mv_crypto_algo_completion(void) @@ -302,6 +322,7 @@ static void mv_crypto_algo_completion(void)  static void mv_process_hash_current(int first_block)  {  	struct ahash_request *req = ahash_request_cast(cpg->cur_req); +	const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);  	struct mv_req_hash_ctx *req_ctx = ahash_request_ctx(req);  	struct req_progress *p = &cpg->p;  	struct sec_accel_config op = { 0 }; @@ -314,6 +335,8 @@ static void mv_process_hash_current(int first_block)  		break;  	case COP_HMAC_SHA1:  		op.config = CFG_OP_MAC_ONLY | CFG_MACM_HMAC_SHA1; +		memcpy(cpg->sram + SRAM_HMAC_IV_IN, +				tfm_ctx->ivs, sizeof(tfm_ctx->ivs));  		break;  	} @@ -345,18 +368,21 @@ static void mv_process_hash_current(int first_block)  			op.config |= CFG_LAST_FRAG;  		else  			op.config |= CFG_MID_FRAG; + +		if (first_block) { +			writel(req_ctx->state[0], cpg->reg + DIGEST_INITIAL_VAL_A); +			writel(req_ctx->state[1], cpg->reg + DIGEST_INITIAL_VAL_B); +			writel(req_ctx->state[2], cpg->reg + DIGEST_INITIAL_VAL_C); +			writel(req_ctx->state[3], cpg->reg + DIGEST_INITIAL_VAL_D); +			writel(req_ctx->state[4], cpg->reg + DIGEST_INITIAL_VAL_E); +		}  	}  	memcpy(cpg->sram + SRAM_CONFIG, &op, sizeof(struct sec_accel_config)); -	writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0);  	/* GO */ +	mv_setup_timer();  	writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD); - -	/* -	* XXX: add timer if the interrupt does not occur for some mystery -	* reason -	*/  }  static inline int mv_hash_import_sha1_ctx(const struct mv_req_hash_ctx *ctx, @@ -400,6 +426,15 @@ out:  	return rc;  } +static void mv_save_digest_state(struct mv_req_hash_ctx *ctx) +{ +	ctx->state[0] = readl(cpg->reg + DIGEST_INITIAL_VAL_A); +	ctx->state[1] = readl(cpg->reg + DIGEST_INITIAL_VAL_B); +	ctx->state[2] = readl(cpg->reg + DIGEST_INITIAL_VAL_C); +	ctx->state[3] = readl(cpg->reg + DIGEST_INITIAL_VAL_D); +	ctx->state[4] = readl(cpg->reg + DIGEST_INITIAL_VAL_E); +} +  static void mv_hash_algo_completion(void)  {  	struct ahash_request *req = ahash_request_cast(cpg->cur_req); @@ -409,19 +444,17 @@ static void mv_hash_algo_completion(void)  		copy_src_to_buf(&cpg->p, ctx->buffer, ctx->extra_bytes);  	sg_miter_stop(&cpg->p.src_sg_it); -	ctx->state[0] = readl(cpg->reg + DIGEST_INITIAL_VAL_A); -	ctx->state[1] = readl(cpg->reg + DIGEST_INITIAL_VAL_B); -	ctx->state[2] = readl(cpg->reg + DIGEST_INITIAL_VAL_C); -	ctx->state[3] = readl(cpg->reg + DIGEST_INITIAL_VAL_D); -	ctx->state[4] = readl(cpg->reg + DIGEST_INITIAL_VAL_E); -  	if (likely(ctx->last_chunk)) {  		if (likely(ctx->count <= MAX_HW_HASH_SIZE)) {  			memcpy(req->result, cpg->sram + SRAM_DIGEST_BUF,  			       crypto_ahash_digestsize(crypto_ahash_reqtfm  						       (req))); -		} else +		} else { +			mv_save_digest_state(ctx);  			mv_hash_final_fallback(req); +		} +	} else { +		mv_save_digest_state(ctx);  	}  } @@ -480,7 +513,7 @@ static int count_sgs(struct scatterlist *sl, unsigned int total_bytes)  	int i = 0;  	size_t cur_len; -	while (1) { +	while (sl) {  		cur_len = sl[i].length;  		++i;  		if (total_bytes > cur_len) @@ -517,29 +550,12 @@ static void mv_start_new_hash_req(struct ahash_request *req)  {  	struct req_progress *p = &cpg->p;  	struct mv_req_hash_ctx *ctx = ahash_request_ctx(req); -	const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);  	int num_sgs, hw_bytes, old_extra_bytes, rc;  	cpg->cur_req = &req->base;  	memset(p, 0, sizeof(struct req_progress));  	hw_bytes = req->nbytes + ctx->extra_bytes;  	old_extra_bytes = ctx->extra_bytes; -	if (unlikely(ctx->extra_bytes)) { -		memcpy(cpg->sram + SRAM_DATA_IN_START, ctx->buffer, -		       ctx->extra_bytes); -		p->crypt_len = ctx->extra_bytes; -	} - -	memcpy(cpg->sram + SRAM_HMAC_IV_IN, tfm_ctx->ivs, sizeof(tfm_ctx->ivs)); - -	if (unlikely(!ctx->first_hash)) { -		writel(ctx->state[0], cpg->reg + DIGEST_INITIAL_VAL_A); -		writel(ctx->state[1], cpg->reg + DIGEST_INITIAL_VAL_B); -		writel(ctx->state[2], cpg->reg + DIGEST_INITIAL_VAL_C); -		writel(ctx->state[3], cpg->reg + DIGEST_INITIAL_VAL_D); -		writel(ctx->state[4], cpg->reg + DIGEST_INITIAL_VAL_E); -	} -  	ctx->extra_bytes = hw_bytes % SHA1_BLOCK_SIZE;  	if (ctx->extra_bytes != 0  	    && (!ctx->last_chunk || ctx->count > MAX_HW_HASH_SIZE)) @@ -555,6 +571,12 @@ static void mv_start_new_hash_req(struct ahash_request *req)  		p->complete = mv_hash_algo_completion;  		p->process = mv_process_hash_current; +		if (unlikely(old_extra_bytes)) { +			memcpy(cpg->sram + SRAM_DATA_IN_START, ctx->buffer, +			       old_extra_bytes); +			p->crypt_len = old_extra_bytes; +		} +  		mv_process_hash_current(1);  	} else {  		copy_src_to_buf(p, ctx->buffer + old_extra_bytes, @@ -600,12 +622,10 @@ static int queue_manag(void *data)  		}  		if (async_req) { -			if (async_req->tfm->__crt_alg->cra_type != -			    &crypto_ahash_type) { +			if (crypto_tfm_alg_type(async_req->tfm) != +			    CRYPTO_ALG_TYPE_AHASH) {  				struct ablkcipher_request *req = -				    container_of(async_req, -						 struct ablkcipher_request, -						 base); +				    ablkcipher_request_cast(async_req);  				mv_start_new_crypt_req(req);  			} else {  				struct ahash_request *req = @@ -722,19 +742,14 @@ static int mv_hash_update(struct ahash_request *req)  static int mv_hash_final(struct ahash_request *req)  {  	struct mv_req_hash_ctx *ctx = ahash_request_ctx(req); -	/* dummy buffer of 4 bytes */ -	sg_init_one(&ctx->dummysg, ctx->buffer, 4); -	/* I think I'm allowed to do that... */ -	ahash_request_set_crypt(req, &ctx->dummysg, req->result, 0); + +	ahash_request_set_crypt(req, NULL, req->result, 0);  	mv_update_hash_req_ctx(ctx, 1, 0);  	return mv_handle_req(&req->base);  }  static int mv_hash_finup(struct ahash_request *req)  { -	if (!req->nbytes) -		return mv_hash_final(req); -  	mv_update_hash_req_ctx(ahash_request_ctx(req), 1, req->nbytes);  	return mv_handle_req(&req->base);  } @@ -828,7 +843,7 @@ static int mv_hash_setkey(struct crypto_ahash *tfm, const u8 * key,  static int mv_cra_hash_init(struct crypto_tfm *tfm, const char *base_hash_name,  			    enum hash_op op, int count_add)  { -	const char *fallback_driver_name = tfm->__crt_alg->cra_name; +	const char *fallback_driver_name = crypto_tfm_alg_name(tfm);  	struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(tfm);  	struct crypto_shash *fallback_tfm = NULL;  	struct crypto_shash *base_hash = NULL; @@ -857,7 +872,7 @@ static int mv_cra_hash_init(struct crypto_tfm *tfm, const char *base_hash_name,  			printk(KERN_WARNING MV_CESA  			       "Base driver '%s' could not be loaded!\n",  			       base_hash_name); -			err = PTR_ERR(fallback_tfm); +			err = PTR_ERR(base_hash);  			goto err_bad_base;  		}  	} @@ -892,7 +907,7 @@ static int mv_cra_hash_hmac_sha1_init(struct crypto_tfm *tfm)  	return mv_cra_hash_init(tfm, "sha1", COP_HMAC_SHA1, SHA1_BLOCK_SIZE);  } -irqreturn_t crypto_int(int irq, void *priv) +static irqreturn_t crypto_int(int irq, void *priv)  {  	u32 val; @@ -900,6 +915,10 @@ irqreturn_t crypto_int(int irq, void *priv)  	if (!(val & SEC_INT_ACCEL0_DONE))  		return IRQ_NONE; +	if (!del_timer(&cpg->completion_timer)) { +		printk(KERN_WARNING MV_CESA +		       "got an interrupt but no pending timer?\n"); +	}  	val &= ~SEC_INT_ACCEL0_DONE;  	writel(val, cpg->reg + FPGA_INT_STATUS);  	writel(val, cpg->reg + SEC_ACCEL_INT_STATUS); @@ -909,11 +928,12 @@ irqreturn_t crypto_int(int irq, void *priv)  	return IRQ_HANDLED;  } -struct crypto_alg mv_aes_alg_ecb = { +static struct crypto_alg mv_aes_alg_ecb = {  	.cra_name		= "ecb(aes)",  	.cra_driver_name	= "mv-ecb-aes",  	.cra_priority	= 300, -	.cra_flags	= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, +	.cra_flags	= CRYPTO_ALG_TYPE_ABLKCIPHER | +			  CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,  	.cra_blocksize	= 16,  	.cra_ctxsize	= sizeof(struct mv_ctx),  	.cra_alignmask	= 0, @@ -931,11 +951,12 @@ struct crypto_alg mv_aes_alg_ecb = {  	},  }; -struct crypto_alg mv_aes_alg_cbc = { +static struct crypto_alg mv_aes_alg_cbc = {  	.cra_name		= "cbc(aes)",  	.cra_driver_name	= "mv-cbc-aes",  	.cra_priority	= 300, -	.cra_flags	= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, +	.cra_flags	= CRYPTO_ALG_TYPE_ABLKCIPHER | +			  CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,  	.cra_blocksize	= AES_BLOCK_SIZE,  	.cra_ctxsize	= sizeof(struct mv_ctx),  	.cra_alignmask	= 0, @@ -954,7 +975,7 @@ struct crypto_alg mv_aes_alg_cbc = {  	},  }; -struct ahash_alg mv_sha1_alg = { +static struct ahash_alg mv_sha1_alg = {  	.init = mv_hash_init,  	.update = mv_hash_update,  	.final = mv_hash_final, @@ -967,7 +988,8 @@ struct ahash_alg mv_sha1_alg = {  			  .cra_driver_name = "mv-sha1",  			  .cra_priority = 300,  			  .cra_flags = -			  CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, +			  CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY | +			  CRYPTO_ALG_NEED_FALLBACK,  			  .cra_blocksize = SHA1_BLOCK_SIZE,  			  .cra_ctxsize = sizeof(struct mv_tfm_hash_ctx),  			  .cra_init = mv_cra_hash_sha1_init, @@ -977,7 +999,7 @@ struct ahash_alg mv_sha1_alg = {  		 }  }; -struct ahash_alg mv_hmac_sha1_alg = { +static struct ahash_alg mv_hmac_sha1_alg = {  	.init = mv_hash_init,  	.update = mv_hash_update,  	.final = mv_hash_final, @@ -991,7 +1013,8 @@ struct ahash_alg mv_hmac_sha1_alg = {  			  .cra_driver_name = "mv-hmac-sha1",  			  .cra_priority = 300,  			  .cra_flags = -			  CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, +			  CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY | +			  CRYPTO_ALG_NEED_FALLBACK,  			  .cra_blocksize = SHA1_BLOCK_SIZE,  			  .cra_ctxsize = sizeof(struct mv_tfm_hash_ctx),  			  .cra_init = mv_cra_hash_hmac_sha1_init, @@ -1042,7 +1065,10 @@ static int mv_probe(struct platform_device *pdev)  		goto err_unmap_reg;  	} -	irq = platform_get_irq(pdev, 0); +	if (pdev->dev.of_node) +		irq = irq_of_parse_and_map(pdev->dev.of_node, 0); +	else +		irq = platform_get_irq(pdev, 0);  	if (irq < 0 || irq == NO_IRQ) {  		ret = irq;  		goto err_unmap_sram; @@ -1058,21 +1084,35 @@ static int mv_probe(struct platform_device *pdev)  		goto err_unmap_sram;  	} -	ret = request_irq(irq, crypto_int, IRQF_DISABLED, dev_name(&pdev->dev), +	ret = request_irq(irq, crypto_int, 0, dev_name(&pdev->dev),  			cp);  	if (ret)  		goto err_thread; +	/* Not all platforms can gate the clock, so it is not +	   an error if the clock does not exists. */ +	cp->clk = clk_get(&pdev->dev, NULL); +	if (!IS_ERR(cp->clk)) +		clk_prepare_enable(cp->clk); + +	writel(0, cpg->reg + SEC_ACCEL_INT_STATUS);  	writel(SEC_INT_ACCEL0_DONE, cpg->reg + SEC_ACCEL_INT_MASK);  	writel(SEC_CFG_STOP_DIG_ERR, cpg->reg + SEC_ACCEL_CFG); +	writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0);  	ret = crypto_register_alg(&mv_aes_alg_ecb); -	if (ret) +	if (ret) { +		printk(KERN_WARNING MV_CESA +		       "Could not register aes-ecb driver\n");  		goto err_irq; +	}  	ret = crypto_register_alg(&mv_aes_alg_cbc); -	if (ret) +	if (ret) { +		printk(KERN_WARNING MV_CESA +		       "Could not register aes-cbc driver\n");  		goto err_unreg_ecb; +	}  	ret = crypto_register_ahash(&mv_sha1_alg);  	if (ret == 0) @@ -1093,6 +1133,10 @@ err_unreg_ecb:  	crypto_unregister_alg(&mv_aes_alg_ecb);  err_irq:  	free_irq(irq, cp); +	if (!IS_ERR(cp->clk)) { +		clk_disable_unprepare(cp->clk); +		clk_put(cp->clk); +	}  err_thread:  	kthread_stop(cp->queue_th);  err_unmap_sram: @@ -1102,7 +1146,6 @@ err_unmap_reg:  err:  	kfree(cp);  	cpg = NULL; -	platform_set_drvdata(pdev, NULL);  	return ret;  } @@ -1121,32 +1164,35 @@ static int mv_remove(struct platform_device *pdev)  	memset(cp->sram, 0, cp->sram_size);  	iounmap(cp->sram);  	iounmap(cp->reg); + +	if (!IS_ERR(cp->clk)) { +		clk_disable_unprepare(cp->clk); +		clk_put(cp->clk); +	} +  	kfree(cp);  	cpg = NULL;  	return 0;  } +static const struct of_device_id mv_cesa_of_match_table[] = { +	{ .compatible = "marvell,orion-crypto", }, +	{} +}; +MODULE_DEVICE_TABLE(of, mv_cesa_of_match_table); +  static struct platform_driver marvell_crypto = {  	.probe		= mv_probe,  	.remove		= mv_remove,  	.driver		= {  		.owner	= THIS_MODULE,  		.name	= "mv_crypto", +		.of_match_table = mv_cesa_of_match_table,  	},  };  MODULE_ALIAS("platform:mv_crypto"); -static int __init mv_crypto_init(void) -{ -	return platform_driver_register(&marvell_crypto); -} -module_init(mv_crypto_init); - -static void __exit mv_crypto_exit(void) -{ -	platform_driver_unregister(&marvell_crypto); -} -module_exit(mv_crypto_exit); +module_platform_driver(marvell_crypto);  MODULE_AUTHOR("Sebastian Andrzej Siewior <sebastian@breakpoint.cc>");  MODULE_DESCRIPTION("Support for Marvell's cryptographic engine");  | 
