diff options
Diffstat (limited to 'drivers/crypto/caam/caamalg.c')
| -rw-r--r-- | drivers/crypto/caam/caamalg.c | 585 | 
1 files changed, 408 insertions, 177 deletions
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index 7c63b72ecd7..c09ce1f040d 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c @@ -66,10 +66,14 @@  /* length of descriptors text */  #define DESC_AEAD_BASE			(4 * CAAM_CMD_SZ) -#define DESC_AEAD_ENC_LEN		(DESC_AEAD_BASE + 16 * CAAM_CMD_SZ) -#define DESC_AEAD_DEC_LEN		(DESC_AEAD_BASE + 21 * CAAM_CMD_SZ) +#define DESC_AEAD_ENC_LEN		(DESC_AEAD_BASE + 15 * CAAM_CMD_SZ) +#define DESC_AEAD_DEC_LEN		(DESC_AEAD_BASE + 18 * CAAM_CMD_SZ)  #define DESC_AEAD_GIVENC_LEN		(DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ) +#define DESC_AEAD_NULL_BASE		(3 * CAAM_CMD_SZ) +#define DESC_AEAD_NULL_ENC_LEN		(DESC_AEAD_NULL_BASE + 14 * CAAM_CMD_SZ) +#define DESC_AEAD_NULL_DEC_LEN		(DESC_AEAD_NULL_BASE + 17 * CAAM_CMD_SZ) +  #define DESC_ABLKCIPHER_BASE		(3 * CAAM_CMD_SZ)  #define DESC_ABLKCIPHER_ENC_LEN		(DESC_ABLKCIPHER_BASE + \  					 20 * CAAM_CMD_SZ) @@ -86,6 +90,7 @@  #else  #define debug(format, arg...)  #endif +static struct list_head alg_list;  /* Set DK bit in class 1 operation if shared */  static inline void append_dec_op1(u32 *desc, u32 type) @@ -103,27 +108,14 @@ static inline void append_dec_op1(u32 *desc, u32 type)  }  /* - * Wait for completion of class 1 key loading before allowing - * error propagation - */ -static inline void append_dec_shr_done(u32 *desc) -{ -	u32 *jump_cmd; - -	jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TEST_ALL); -	set_jump_tgt_here(desc, jump_cmd); -	append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD); -} - -/*   * For aead functions, read payload and write payload,   * both of which are specified in req->src and req->dst   */  static inline void aead_append_src_dst(u32 *desc, u32 msg_type)  { +	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);  	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |  			     KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH); -	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);  }  /* @@ -210,9 +202,197 @@ static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,  	append_key_aead(desc, ctx, keys_fit_inline);  	set_jump_tgt_here(desc, key_jump_cmd); +} + +static int aead_null_set_sh_desc(struct crypto_aead *aead) +{ +	struct aead_tfm *tfm = &aead->base.crt_aead; +	struct caam_ctx *ctx = crypto_aead_ctx(aead); +	struct device *jrdev = ctx->jrdev; +	bool keys_fit_inline = false; +	u32 *key_jump_cmd, *jump_cmd, *read_move_cmd, *write_move_cmd; +	u32 *desc; + +	/* +	 * Job Descriptor and Shared Descriptors +	 * must all fit into the 64-word Descriptor h/w Buffer +	 */ +	if (DESC_AEAD_NULL_ENC_LEN + DESC_JOB_IO_LEN + +	    ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX) +		keys_fit_inline = true; + +	/* aead_encrypt shared descriptor */ +	desc = ctx->sh_desc_enc; + +	init_sh_desc(desc, HDR_SHARE_SERIAL); + +	/* Skip if already shared */ +	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | +				   JUMP_COND_SHRD); +	if (keys_fit_inline) +		append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len, +				  ctx->split_key_len, CLASS_2 | +				  KEY_DEST_MDHA_SPLIT | KEY_ENC); +	else +		append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 | +			   KEY_DEST_MDHA_SPLIT | KEY_ENC); +	set_jump_tgt_here(desc, key_jump_cmd); + +	/* cryptlen = seqoutlen - authsize */ +	append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize); + +	/* +	 * NULL encryption; IV is zero +	 * assoclen = (assoclen + cryptlen) - cryptlen +	 */ +	append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ); + +	/* read assoc before reading payload */ +	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | +			     KEY_VLF); + +	/* Prepare to read and write cryptlen bytes */ +	append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); +	append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); + +	/* +	 * MOVE_LEN opcode is not available in all SEC HW revisions, +	 * thus need to do some magic, i.e. self-patch the descriptor +	 * buffer. +	 */ +	read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | +				    MOVE_DEST_MATH3 | +				    (0x6 << MOVE_LEN_SHIFT)); +	write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | +				     MOVE_DEST_DESCBUF | +				     MOVE_WAITCOMP | +				     (0x8 << MOVE_LEN_SHIFT)); + +	/* Class 2 operation */ +	append_operation(desc, ctx->class2_alg_type | +			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); + +	/* Read and write cryptlen bytes */ +	aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1); + +	set_move_tgt_here(desc, read_move_cmd); +	set_move_tgt_here(desc, write_move_cmd); +	append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); +	append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO | +		    MOVE_AUX_LS); + +	/* Write ICV */ +	append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB | +			 LDST_SRCDST_BYTE_CONTEXT); + +	ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, +					      desc_bytes(desc), +					      DMA_TO_DEVICE); +	if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) { +		dev_err(jrdev, "unable to map shared descriptor\n"); +		return -ENOMEM; +	} +#ifdef DEBUG +	print_hex_dump(KERN_ERR, +		       "aead null enc shdesc@"__stringify(__LINE__)": ", +		       DUMP_PREFIX_ADDRESS, 16, 4, desc, +		       desc_bytes(desc), 1); +#endif + +	/* +	 * Job Descriptor and Shared Descriptors +	 * must all fit into the 64-word Descriptor h/w Buffer +	 */ +	keys_fit_inline = false; +	if (DESC_AEAD_NULL_DEC_LEN + DESC_JOB_IO_LEN + +	    ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX) +		keys_fit_inline = true; + +	desc = ctx->sh_desc_dec; + +	/* aead_decrypt shared descriptor */ +	init_sh_desc(desc, HDR_SHARE_SERIAL); + +	/* Skip if already shared */ +	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | +				   JUMP_COND_SHRD); +	if (keys_fit_inline) +		append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len, +				  ctx->split_key_len, CLASS_2 | +				  KEY_DEST_MDHA_SPLIT | KEY_ENC); +	else +		append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 | +			   KEY_DEST_MDHA_SPLIT | KEY_ENC); +	set_jump_tgt_here(desc, key_jump_cmd); + +	/* Class 2 operation */ +	append_operation(desc, ctx->class2_alg_type | +			 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON); + +	/* assoclen + cryptlen = seqinlen - ivsize - authsize */ +	append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM, +				ctx->authsize + tfm->ivsize); +	/* assoclen = (assoclen + cryptlen) - cryptlen */ +	append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ); +	append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ); + +	/* read assoc before reading payload */ +	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | +			     KEY_VLF); + +	/* Prepare to read and write cryptlen bytes */ +	append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ); +	append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ); + +	/* +	 * MOVE_LEN opcode is not available in all SEC HW revisions, +	 * thus need to do some magic, i.e. self-patch the descriptor +	 * buffer. +	 */ +	read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | +				    MOVE_DEST_MATH2 | +				    (0x6 << MOVE_LEN_SHIFT)); +	write_move_cmd = append_move(desc, MOVE_SRC_MATH2 | +				     MOVE_DEST_DESCBUF | +				     MOVE_WAITCOMP | +				     (0x8 << MOVE_LEN_SHIFT)); + +	/* Read and write cryptlen bytes */ +	aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1); + +	/* +	 * Insert a NOP here, since we need at least 4 instructions between +	 * code patching the descriptor buffer and the location being patched. +	 */ +	jump_cmd = append_jump(desc, JUMP_TEST_ALL); +	set_jump_tgt_here(desc, jump_cmd); + +	set_move_tgt_here(desc, read_move_cmd); +	set_move_tgt_here(desc, write_move_cmd); +	append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); +	append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO | +		    MOVE_AUX_LS); +	append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO); + +	/* Load ICV */ +	append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 | +			     FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV); + +	ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, +					      desc_bytes(desc), +					      DMA_TO_DEVICE); +	if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) { +		dev_err(jrdev, "unable to map shared descriptor\n"); +		return -ENOMEM; +	} +#ifdef DEBUG +	print_hex_dump(KERN_ERR, +		       "aead null dec shdesc@"__stringify(__LINE__)": ", +		       DUMP_PREFIX_ADDRESS, 16, 4, desc, +		       desc_bytes(desc), 1); +#endif -	/* Propagate errors from shared to job descriptor */ -	append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD); +	return 0;  }  static int aead_set_sh_desc(struct crypto_aead *aead) @@ -221,13 +401,16 @@ static int aead_set_sh_desc(struct crypto_aead *aead)  	struct caam_ctx *ctx = crypto_aead_ctx(aead);  	struct device *jrdev = ctx->jrdev;  	bool keys_fit_inline = false; -	u32 *key_jump_cmd, *jump_cmd;  	u32 geniv, moveiv;  	u32 *desc; -	if (!ctx->enckeylen || !ctx->authsize) +	if (!ctx->authsize)  		return 0; +	/* NULL encryption / decryption */ +	if (!ctx->enckeylen) +		return aead_null_set_sh_desc(aead); +  	/*  	 * Job Descriptor and Shared Descriptors  	 * must all fit into the 64-word Descriptor h/w Buffer @@ -252,7 +435,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)  	/* assoclen + cryptlen = seqinlen - ivsize */  	append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize); -	/* assoclen + cryptlen = (assoclen + cryptlen) - cryptlen */ +	/* assoclen = (assoclen + cryptlen) - cryptlen */  	append_math_sub(desc, VARSEQINLEN, REG2, REG3, CAAM_CMD_SZ);  	/* read assoc before reading payload */ @@ -290,35 +473,24 @@ static int aead_set_sh_desc(struct crypto_aead *aead)  	 * Job Descriptor and Shared Descriptors  	 * must all fit into the 64-word Descriptor h/w Buffer  	 */ +	keys_fit_inline = false;  	if (DESC_AEAD_DEC_LEN + DESC_JOB_IO_LEN +  	    ctx->split_key_pad_len + ctx->enckeylen <=  	    CAAM_DESC_BYTES_MAX)  		keys_fit_inline = true; -	desc = ctx->sh_desc_dec; -  	/* aead_decrypt shared descriptor */ -	init_sh_desc(desc, HDR_SHARE_SERIAL); - -	/* Skip if already shared */ -	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | -				   JUMP_COND_SHRD); - -	append_key_aead(desc, ctx, keys_fit_inline); +	desc = ctx->sh_desc_dec; -	/* Only propagate error immediately if shared */ -	jump_cmd = append_jump(desc, JUMP_TEST_ALL); -	set_jump_tgt_here(desc, key_jump_cmd); -	append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD); -	set_jump_tgt_here(desc, jump_cmd); +	init_sh_desc_key_aead(desc, ctx, keys_fit_inline);  	/* Class 2 operation */  	append_operation(desc, ctx->class2_alg_type |  			 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON); -	/* assoclen + cryptlen = seqinlen - ivsize */ +	/* assoclen + cryptlen = seqinlen - ivsize - authsize */  	append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM, -				ctx->authsize + tfm->ivsize) +				ctx->authsize + tfm->ivsize);  	/* assoclen = (assoclen + cryptlen) - cryptlen */  	append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);  	append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ); @@ -339,7 +511,6 @@ static int aead_set_sh_desc(struct crypto_aead *aead)  	/* Load ICV */  	append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |  			     FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV); -	append_dec_shr_done(desc);  	ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,  					      desc_bytes(desc), @@ -358,6 +529,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)  	 * Job Descriptor and Shared Descriptors  	 * must all fit into the 64-word Descriptor h/w Buffer  	 */ +	keys_fit_inline = false;  	if (DESC_AEAD_GIVENC_LEN + DESC_JOB_IO_LEN +  	    ctx->split_key_pad_len + ctx->enckeylen <=  	    CAAM_DESC_BYTES_MAX) @@ -466,24 +638,10 @@ static int aead_setkey(struct crypto_aead *aead,  	static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };  	struct caam_ctx *ctx = crypto_aead_ctx(aead);  	struct device *jrdev = ctx->jrdev; -	struct rtattr *rta = (void *)key; -	struct crypto_authenc_key_param *param; -	unsigned int authkeylen; -	unsigned int enckeylen; +	struct crypto_authenc_keys keys;  	int ret = 0; -	param = RTA_DATA(rta); -	enckeylen = be32_to_cpu(param->enckeylen); - -	key += RTA_ALIGN(rta->rta_len); -	keylen -= RTA_ALIGN(rta->rta_len); - -	if (keylen < enckeylen) -		goto badkey; - -	authkeylen = keylen - enckeylen; - -	if (keylen > CAAM_MAX_KEY_SIZE) +	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)  		goto badkey;  	/* Pick class 2 key length from algorithm submask */ @@ -491,25 +649,29 @@ static int aead_setkey(struct crypto_aead *aead,  				      OP_ALG_ALGSEL_SHIFT] * 2;  	ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16); +	if (ctx->split_key_pad_len + keys.enckeylen > CAAM_MAX_KEY_SIZE) +		goto badkey; +  #ifdef DEBUG  	printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n", -	       keylen, enckeylen, authkeylen); +	       keys.authkeylen + keys.enckeylen, keys.enckeylen, +	       keys.authkeylen);  	printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",  	       ctx->split_key_len, ctx->split_key_pad_len);  	print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",  		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);  #endif -	ret = gen_split_aead_key(ctx, key, authkeylen); +	ret = gen_split_aead_key(ctx, keys.authkey, keys.authkeylen);  	if (ret) {  		goto badkey;  	}  	/* postpend encryption key to auth split key */ -	memcpy(ctx->key + ctx->split_key_pad_len, key + authkeylen, enckeylen); +	memcpy(ctx->key + ctx->split_key_pad_len, keys.enckey, keys.enckeylen);  	ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len + -				       enckeylen, DMA_TO_DEVICE); +				      keys.enckeylen, DMA_TO_DEVICE);  	if (dma_mapping_error(jrdev, ctx->key_dma)) {  		dev_err(jrdev, "unable to map key i/o memory\n");  		return -ENOMEM; @@ -517,15 +679,15 @@ static int aead_setkey(struct crypto_aead *aead,  #ifdef DEBUG  	print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",  		       DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, -		       ctx->split_key_pad_len + enckeylen, 1); +		       ctx->split_key_pad_len + keys.enckeylen, 1);  #endif -	ctx->enckeylen = enckeylen; +	ctx->enckeylen = keys.enckeylen;  	ret = aead_set_sh_desc(aead);  	if (ret) {  		dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len + -				 enckeylen, DMA_TO_DEVICE); +				 keys.enckeylen, DMA_TO_DEVICE);  	}  	return ret; @@ -541,7 +703,7 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,  	struct ablkcipher_tfm *tfm = &ablkcipher->base.crt_ablkcipher;  	struct device *jrdev = ctx->jrdev;  	int ret = 0; -	u32 *key_jump_cmd, *jump_cmd; +	u32 *key_jump_cmd;  	u32 *desc;  #ifdef DEBUG @@ -572,9 +734,6 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,  	set_jump_tgt_here(desc, key_jump_cmd); -	/* Propagate errors from shared to job descriptor */ -	append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD); -  	/* Load iv */  	append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |  		   LDST_CLASS_1_CCB | tfm->ivsize); @@ -612,11 +771,7 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,  			  ctx->enckeylen, CLASS_1 |  			  KEY_DEST_CLASS_REG); -	/* For aead, only propagate error immediately if shared */ -	jump_cmd = append_jump(desc, JUMP_TEST_ALL);  	set_jump_tgt_here(desc, key_jump_cmd); -	append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD); -	set_jump_tgt_here(desc, jump_cmd);  	/* load IV */  	append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT | @@ -628,9 +783,6 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,  	/* Perform operation */  	ablkcipher_append_src_dst(desc); -	/* Wait for key to load before allowing propagating error */ -	append_dec_shr_done(desc); -  	ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,  					      desc_bytes(desc),  					      DMA_TO_DEVICE); @@ -769,11 +921,8 @@ static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,  	edesc = (struct aead_edesc *)((char *)desc -  		 offsetof(struct aead_edesc, hw_desc)); -	if (err) { -		char tmp[CAAM_ERROR_STR_MAX]; - -		dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err)); -	} +	if (err) +		caam_jr_strstatus(jrdev, err);  	aead_unmap(jrdev, edesc, req); @@ -817,14 +966,11 @@ static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,  		       ivsize, 1);  	print_hex_dump(KERN_ERR, "dst    @"__stringify(__LINE__)": ",  		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->dst), -		       req->cryptlen, 1); +		       req->cryptlen - ctx->authsize, 1);  #endif -	if (err) { -		char tmp[CAAM_ERROR_STR_MAX]; - -		dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err)); -	} +	if (err) +		caam_jr_strstatus(jrdev, err);  	aead_unmap(jrdev, edesc, req); @@ -869,11 +1015,8 @@ static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,  	edesc = (struct ablkcipher_edesc *)((char *)desc -  		 offsetof(struct ablkcipher_edesc, hw_desc)); -	if (err) { -		char tmp[CAAM_ERROR_STR_MAX]; - -		dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err)); -	} +	if (err) +		caam_jr_strstatus(jrdev, err);  #ifdef DEBUG  	print_hex_dump(KERN_ERR, "dstiv  @"__stringify(__LINE__)": ", @@ -904,11 +1047,8 @@ static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,  	edesc = (struct ablkcipher_edesc *)((char *)desc -  		 offsetof(struct ablkcipher_edesc, hw_desc)); -	if (err) { -		char tmp[CAAM_ERROR_STR_MAX]; - -		dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err)); -	} +	if (err) +		caam_jr_strstatus(jrdev, err);  #ifdef DEBUG  	print_hex_dump(KERN_ERR, "dstiv  @"__stringify(__LINE__)": ", @@ -971,12 +1111,9 @@ static void init_aead_job(u32 *sh_desc, dma_addr_t ptr,  				 (edesc->src_nents ? : 1);  		in_options = LDST_SGF;  	} -	if (encrypt) -		append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + -				  req->cryptlen - authsize, in_options); -	else -		append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + -				  req->cryptlen, in_options); + +	append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen, +			  in_options);  	if (likely(req->src == req->dst)) {  		if (all_contig) { @@ -997,7 +1134,8 @@ static void init_aead_job(u32 *sh_desc, dma_addr_t ptr,  		}  	}  	if (encrypt) -		append_seq_out_ptr(desc, dst_dma, req->cryptlen, out_options); +		append_seq_out_ptr(desc, dst_dma, req->cryptlen + authsize, +				   out_options);  	else  		append_seq_out_ptr(desc, dst_dma, req->cryptlen - authsize,  				   out_options); @@ -1047,8 +1185,8 @@ static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr,  		sec4_sg_index += edesc->assoc_nents + 1 + edesc->src_nents;  		in_options = LDST_SGF;  	} -	append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + -			  req->cryptlen - authsize, in_options); +	append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen, +			  in_options);  	if (contig & GIV_DST_CONTIG) {  		dst_dma = edesc->iv_dma; @@ -1065,7 +1203,8 @@ static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr,  		}  	} -	append_seq_out_ptr(desc, dst_dma, ivsize + req->cryptlen, out_options); +	append_seq_out_ptr(desc, dst_dma, ivsize + req->cryptlen + authsize, +			   out_options);  }  /* @@ -1129,7 +1268,8 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,   * allocate and map the aead extended descriptor   */  static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, -					   int desc_bytes, bool *all_contig_ptr) +					   int desc_bytes, bool *all_contig_ptr, +					   bool encrypt)  {  	struct crypto_aead *aead = crypto_aead_reqtfm(req);  	struct caam_ctx *ctx = crypto_aead_ctx(aead); @@ -1144,12 +1284,22 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,  	bool assoc_chained = false, src_chained = false, dst_chained = false;  	int ivsize = crypto_aead_ivsize(aead);  	int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes; +	unsigned int authsize = ctx->authsize;  	assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained); -	src_nents = sg_count(req->src, req->cryptlen, &src_chained); -	if (unlikely(req->dst != req->src)) -		dst_nents = sg_count(req->dst, req->cryptlen, &dst_chained); +	if (unlikely(req->dst != req->src)) { +		src_nents = sg_count(req->src, req->cryptlen, &src_chained); +		dst_nents = sg_count(req->dst, +				     req->cryptlen + +					(encrypt ? authsize : (-authsize)), +				     &dst_chained); +	} else { +		src_nents = sg_count(req->src, +				     req->cryptlen + +					(encrypt ? authsize : 0), +				     &src_chained); +	}  	sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,  				 DMA_TO_DEVICE, assoc_chained); @@ -1233,11 +1383,9 @@ static int aead_encrypt(struct aead_request *req)  	u32 *desc;  	int ret = 0; -	req->cryptlen += ctx->authsize; -  	/* allocate extended descriptor */  	edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN * -				 CAAM_CMD_SZ, &all_contig); +				 CAAM_CMD_SZ, &all_contig, true);  	if (IS_ERR(edesc))  		return PTR_ERR(edesc); @@ -1274,7 +1422,7 @@ static int aead_decrypt(struct aead_request *req)  	/* allocate extended descriptor */  	edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN * -				 CAAM_CMD_SZ, &all_contig); +				 CAAM_CMD_SZ, &all_contig, false);  	if (IS_ERR(edesc))  		return PTR_ERR(edesc); @@ -1331,7 +1479,8 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request  	src_nents = sg_count(req->src, req->cryptlen, &src_chained);  	if (unlikely(req->dst != req->src)) -		dst_nents = sg_count(req->dst, req->cryptlen, &dst_chained); +		dst_nents = sg_count(req->dst, req->cryptlen + ctx->authsize, +				     &dst_chained);  	sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,  				 DMA_TO_DEVICE, assoc_chained); @@ -1425,8 +1574,6 @@ static int aead_givencrypt(struct aead_givcrypt_request *areq)  	u32 *desc;  	int ret = 0; -	req->cryptlen += ctx->authsize; -  	/* allocate extended descriptor */  	edesc = aead_giv_edesc_alloc(areq, DESC_JOB_IO_LEN *  				     CAAM_CMD_SZ, &contig); @@ -1461,6 +1608,11 @@ static int aead_givencrypt(struct aead_givcrypt_request *areq)  	return ret;  } +static int aead_null_givencrypt(struct aead_givcrypt_request *areq) +{ +	return aead_encrypt(&areq->areq); +} +  /*   * allocate and map the ablkcipher extended descriptor for ablkcipher   */ @@ -1650,6 +1802,124 @@ struct caam_alg_template {  static struct caam_alg_template driver_algs[] = {  	/* single-pass ipsec_esp descriptor */  	{ +		.name = "authenc(hmac(md5),ecb(cipher_null))", +		.driver_name = "authenc-hmac-md5-ecb-cipher_null-caam", +		.blocksize = NULL_BLOCK_SIZE, +		.type = CRYPTO_ALG_TYPE_AEAD, +		.template_aead = { +			.setkey = aead_setkey, +			.setauthsize = aead_setauthsize, +			.encrypt = aead_encrypt, +			.decrypt = aead_decrypt, +			.givencrypt = aead_null_givencrypt, +			.geniv = "<built-in>", +			.ivsize = NULL_IV_SIZE, +			.maxauthsize = MD5_DIGEST_SIZE, +			}, +		.class1_alg_type = 0, +		.class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP, +		.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC, +	}, +	{ +		.name = "authenc(hmac(sha1),ecb(cipher_null))", +		.driver_name = "authenc-hmac-sha1-ecb-cipher_null-caam", +		.blocksize = NULL_BLOCK_SIZE, +		.type = CRYPTO_ALG_TYPE_AEAD, +		.template_aead = { +			.setkey = aead_setkey, +			.setauthsize = aead_setauthsize, +			.encrypt = aead_encrypt, +			.decrypt = aead_decrypt, +			.givencrypt = aead_null_givencrypt, +			.geniv = "<built-in>", +			.ivsize = NULL_IV_SIZE, +			.maxauthsize = SHA1_DIGEST_SIZE, +			}, +		.class1_alg_type = 0, +		.class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP, +		.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, +	}, +	{ +		.name = "authenc(hmac(sha224),ecb(cipher_null))", +		.driver_name = "authenc-hmac-sha224-ecb-cipher_null-caam", +		.blocksize = NULL_BLOCK_SIZE, +		.type = CRYPTO_ALG_TYPE_AEAD, +		.template_aead = { +			.setkey = aead_setkey, +			.setauthsize = aead_setauthsize, +			.encrypt = aead_encrypt, +			.decrypt = aead_decrypt, +			.givencrypt = aead_null_givencrypt, +			.geniv = "<built-in>", +			.ivsize = NULL_IV_SIZE, +			.maxauthsize = SHA224_DIGEST_SIZE, +			}, +		.class1_alg_type = 0, +		.class2_alg_type = OP_ALG_ALGSEL_SHA224 | +				   OP_ALG_AAI_HMAC_PRECOMP, +		.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC, +	}, +	{ +		.name = "authenc(hmac(sha256),ecb(cipher_null))", +		.driver_name = "authenc-hmac-sha256-ecb-cipher_null-caam", +		.blocksize = NULL_BLOCK_SIZE, +		.type = CRYPTO_ALG_TYPE_AEAD, +		.template_aead = { +			.setkey = aead_setkey, +			.setauthsize = aead_setauthsize, +			.encrypt = aead_encrypt, +			.decrypt = aead_decrypt, +			.givencrypt = aead_null_givencrypt, +			.geniv = "<built-in>", +			.ivsize = NULL_IV_SIZE, +			.maxauthsize = SHA256_DIGEST_SIZE, +			}, +		.class1_alg_type = 0, +		.class2_alg_type = OP_ALG_ALGSEL_SHA256 | +				   OP_ALG_AAI_HMAC_PRECOMP, +		.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, +	}, +	{ +		.name = "authenc(hmac(sha384),ecb(cipher_null))", +		.driver_name = "authenc-hmac-sha384-ecb-cipher_null-caam", +		.blocksize = NULL_BLOCK_SIZE, +		.type = CRYPTO_ALG_TYPE_AEAD, +		.template_aead = { +			.setkey = aead_setkey, +			.setauthsize = aead_setauthsize, +			.encrypt = aead_encrypt, +			.decrypt = aead_decrypt, +			.givencrypt = aead_null_givencrypt, +			.geniv = "<built-in>", +			.ivsize = NULL_IV_SIZE, +			.maxauthsize = SHA384_DIGEST_SIZE, +			}, +		.class1_alg_type = 0, +		.class2_alg_type = OP_ALG_ALGSEL_SHA384 | +				   OP_ALG_AAI_HMAC_PRECOMP, +		.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC, +	}, +	{ +		.name = "authenc(hmac(sha512),ecb(cipher_null))", +		.driver_name = "authenc-hmac-sha512-ecb-cipher_null-caam", +		.blocksize = NULL_BLOCK_SIZE, +		.type = CRYPTO_ALG_TYPE_AEAD, +		.template_aead = { +			.setkey = aead_setkey, +			.setauthsize = aead_setauthsize, +			.encrypt = aead_encrypt, +			.decrypt = aead_decrypt, +			.givencrypt = aead_null_givencrypt, +			.geniv = "<built-in>", +			.ivsize = NULL_IV_SIZE, +			.maxauthsize = SHA512_DIGEST_SIZE, +			}, +		.class1_alg_type = 0, +		.class2_alg_type = OP_ALG_ALGSEL_SHA512 | +				   OP_ALG_AAI_HMAC_PRECOMP, +		.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, +	}, +	{  		.name = "authenc(hmac(md5),cbc(aes))",  		.driver_name = "authenc-hmac-md5-cbc-aes-caam",  		.blocksize = AES_BLOCK_SIZE, @@ -2057,7 +2327,6 @@ static struct caam_alg_template driver_algs[] = {  struct caam_crypto_alg {  	struct list_head entry; -	struct device *ctrldev;  	int class1_alg_type;  	int class2_alg_type;  	int alg_op; @@ -2070,14 +2339,12 @@ static int caam_cra_init(struct crypto_tfm *tfm)  	struct caam_crypto_alg *caam_alg =  		 container_of(alg, struct caam_crypto_alg, crypto_alg);  	struct caam_ctx *ctx = crypto_tfm_ctx(tfm); -	struct caam_drv_private *priv = dev_get_drvdata(caam_alg->ctrldev); -	int tgt_jr = atomic_inc_return(&priv->tfm_count); -	/* -	 * distribute tfms across job rings to ensure in-order -	 * crypto request processing per tfm -	 */ -	ctx->jrdev = priv->jrdev[(tgt_jr / 2) % priv->total_jobrs]; +	ctx->jrdev = caam_jr_alloc(); +	if (IS_ERR(ctx->jrdev)) { +		pr_err("Job Ring Device allocation for transform failed\n"); +		return PTR_ERR(ctx->jrdev); +	}  	/* copy descriptor header template value */  	ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam_alg->class1_alg_type; @@ -2104,44 +2371,31 @@ static void caam_cra_exit(struct crypto_tfm *tfm)  		dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma,  				 desc_bytes(ctx->sh_desc_givenc),  				 DMA_TO_DEVICE); +	if (ctx->key_dma && +	    !dma_mapping_error(ctx->jrdev, ctx->key_dma)) +		dma_unmap_single(ctx->jrdev, ctx->key_dma, +				 ctx->enckeylen + ctx->split_key_pad_len, +				 DMA_TO_DEVICE); + +	caam_jr_free(ctx->jrdev);  }  static void __exit caam_algapi_exit(void)  { -	struct device_node *dev_node; -	struct platform_device *pdev; -	struct device *ctrldev; -	struct caam_drv_private *priv;  	struct caam_crypto_alg *t_alg, *n; -	dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); -	if (!dev_node) { -		dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0"); -		if (!dev_node) -			return; -	} - -	pdev = of_find_device_by_node(dev_node); -	if (!pdev) +	if (!alg_list.next)  		return; -	ctrldev = &pdev->dev; -	of_node_put(dev_node); -	priv = dev_get_drvdata(ctrldev); - -	if (!priv->alg_list.next) -		return; - -	list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) { +	list_for_each_entry_safe(t_alg, n, &alg_list, entry) {  		crypto_unregister_alg(&t_alg->crypto_alg);  		list_del(&t_alg->entry);  		kfree(t_alg);  	}  } -static struct caam_crypto_alg *caam_alg_alloc(struct device *ctrldev, -					      struct caam_alg_template +static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template  					      *template)  {  	struct caam_crypto_alg *t_alg; @@ -2149,7 +2403,7 @@ static struct caam_crypto_alg *caam_alg_alloc(struct device *ctrldev,  	t_alg = kzalloc(sizeof(struct caam_crypto_alg), GFP_KERNEL);  	if (!t_alg) { -		dev_err(ctrldev, "failed to allocate t_alg\n"); +		pr_err("failed to allocate t_alg\n");  		return ERR_PTR(-ENOMEM);  	} @@ -2181,62 +2435,39 @@ static struct caam_crypto_alg *caam_alg_alloc(struct device *ctrldev,  	t_alg->class1_alg_type = template->class1_alg_type;  	t_alg->class2_alg_type = template->class2_alg_type;  	t_alg->alg_op = template->alg_op; -	t_alg->ctrldev = ctrldev;  	return t_alg;  }  static int __init caam_algapi_init(void)  { -	struct device_node *dev_node; -	struct platform_device *pdev; -	struct device *ctrldev; -	struct caam_drv_private *priv;  	int i = 0, err = 0; -	dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); -	if (!dev_node) { -		dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0"); -		if (!dev_node) -			return -ENODEV; -	} - -	pdev = of_find_device_by_node(dev_node); -	if (!pdev) -		return -ENODEV; - -	ctrldev = &pdev->dev; -	priv = dev_get_drvdata(ctrldev); -	of_node_put(dev_node); - -	INIT_LIST_HEAD(&priv->alg_list); - -	atomic_set(&priv->tfm_count, -1); +	INIT_LIST_HEAD(&alg_list);  	/* register crypto algorithms the device supports */  	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {  		/* TODO: check if h/w supports alg */  		struct caam_crypto_alg *t_alg; -		t_alg = caam_alg_alloc(ctrldev, &driver_algs[i]); +		t_alg = caam_alg_alloc(&driver_algs[i]);  		if (IS_ERR(t_alg)) {  			err = PTR_ERR(t_alg); -			dev_warn(ctrldev, "%s alg allocation failed\n", -				 driver_algs[i].driver_name); +			pr_warn("%s alg allocation failed\n", +				driver_algs[i].driver_name);  			continue;  		}  		err = crypto_register_alg(&t_alg->crypto_alg);  		if (err) { -			dev_warn(ctrldev, "%s alg registration failed\n", +			pr_warn("%s alg registration failed\n",  				t_alg->crypto_alg.cra_driver_name);  			kfree(t_alg);  		} else -			list_add_tail(&t_alg->entry, &priv->alg_list); +			list_add_tail(&t_alg->entry, &alg_list);  	} -	if (!list_empty(&priv->alg_list)) -		dev_info(ctrldev, "%s algorithms registered in /proc/crypto\n", -			 (char *)of_get_property(dev_node, "compatible", NULL)); +	if (!list_empty(&alg_list)) +		pr_info("caam algorithms registered in /proc/crypto\n");  	return err;  }  | 
