diff options
Diffstat (limited to 'crypto/blkcipher.c')
| -rw-r--r-- | crypto/blkcipher.c | 127 | 
1 files changed, 85 insertions, 42 deletions
diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c index 7a7219266e3..0122bec3856 100644 --- a/crypto/blkcipher.c +++ b/crypto/blkcipher.c @@ -24,6 +24,8 @@  #include <linux/seq_file.h>  #include <linux/slab.h>  #include <linux/string.h> +#include <linux/cryptouser.h> +#include <net/netlink.h>  #include "internal.h" @@ -41,22 +43,22 @@ static int blkcipher_walk_first(struct blkcipher_desc *desc,  static inline void blkcipher_map_src(struct blkcipher_walk *walk)  { -	walk->src.virt.addr = scatterwalk_map(&walk->in, 0); +	walk->src.virt.addr = scatterwalk_map(&walk->in);  }  static inline void blkcipher_map_dst(struct blkcipher_walk *walk)  { -	walk->dst.virt.addr = scatterwalk_map(&walk->out, 1); +	walk->dst.virt.addr = scatterwalk_map(&walk->out);  }  static inline void blkcipher_unmap_src(struct blkcipher_walk *walk)  { -	scatterwalk_unmap(walk->src.virt.addr, 0); +	scatterwalk_unmap(walk->src.virt.addr);  }  static inline void blkcipher_unmap_dst(struct blkcipher_walk *walk)  { -	scatterwalk_unmap(walk->dst.virt.addr, 1); +	scatterwalk_unmap(walk->dst.virt.addr);  }  /* Get a spot of the specified length that does not straddle a page. @@ -68,14 +70,12 @@ static inline u8 *blkcipher_get_spot(u8 *start, unsigned int len)  	return max(start, end_page);  } -static inline unsigned int blkcipher_done_slow(struct crypto_blkcipher *tfm, -					       struct blkcipher_walk *walk, +static inline unsigned int blkcipher_done_slow(struct blkcipher_walk *walk,  					       unsigned int bsize)  {  	u8 *addr; -	unsigned int alignmask = crypto_blkcipher_alignmask(tfm); -	addr = (u8 *)ALIGN((unsigned long)walk->buffer, alignmask + 1); +	addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);  	addr = blkcipher_get_spot(addr, bsize);  	scatterwalk_copychunks(addr, &walk->out, bsize, 1);  	return bsize; @@ -103,7 +103,6 @@ static inline unsigned int blkcipher_done_fast(struct blkcipher_walk *walk,  int blkcipher_walk_done(struct blkcipher_desc *desc,  			struct blkcipher_walk *walk, int err)  { -	struct crypto_blkcipher *tfm = desc->tfm;  	unsigned int nbytes = 0;  	if (likely(err >= 0)) { @@ -115,7 +114,7 @@ int blkcipher_walk_done(struct blkcipher_desc *desc,  			err = -EINVAL;  			goto err;  		} else -			n = blkcipher_done_slow(tfm, walk, n); +			n = blkcipher_done_slow(walk, n);  		nbytes = walk->total - n;  		err = 0; @@ -134,7 +133,7 @@ err:  	}  	if (walk->iv != desc->info) -		memcpy(desc->info, walk->iv, crypto_blkcipher_ivsize(tfm)); +		memcpy(desc->info, walk->iv, walk->ivsize);  	if (walk->buffer != walk->page)  		kfree(walk->buffer);  	if (walk->page) @@ -224,22 +223,20 @@ static inline int blkcipher_next_fast(struct blkcipher_desc *desc,  static int blkcipher_walk_next(struct blkcipher_desc *desc,  			       struct blkcipher_walk *walk)  { -	struct crypto_blkcipher *tfm = desc->tfm; -	unsigned int alignmask = crypto_blkcipher_alignmask(tfm);  	unsigned int bsize;  	unsigned int n;  	int err;  	n = walk->total; -	if (unlikely(n < crypto_blkcipher_blocksize(tfm))) { +	if (unlikely(n < walk->cipher_blocksize)) {  		desc->flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;  		return blkcipher_walk_done(desc, walk, -EINVAL);  	}  	walk->flags &= ~(BLKCIPHER_WALK_SLOW | BLKCIPHER_WALK_COPY |  			 BLKCIPHER_WALK_DIFF); -	if (!scatterwalk_aligned(&walk->in, alignmask) || -	    !scatterwalk_aligned(&walk->out, alignmask)) { +	if (!scatterwalk_aligned(&walk->in, walk->alignmask) || +	    !scatterwalk_aligned(&walk->out, walk->alignmask)) {  		walk->flags |= BLKCIPHER_WALK_COPY;  		if (!walk->page) {  			walk->page = (void *)__get_free_page(GFP_ATOMIC); @@ -248,12 +245,12 @@ static int blkcipher_walk_next(struct blkcipher_desc *desc,  		}  	} -	bsize = min(walk->blocksize, n); +	bsize = min(walk->walk_blocksize, n);  	n = scatterwalk_clamp(&walk->in, n);  	n = scatterwalk_clamp(&walk->out, n);  	if (unlikely(n < bsize)) { -		err = blkcipher_next_slow(desc, walk, bsize, alignmask); +		err = blkcipher_next_slow(desc, walk, bsize, walk->alignmask);  		goto set_phys_lowmem;  	} @@ -275,28 +272,26 @@ set_phys_lowmem:  	return err;  } -static inline int blkcipher_copy_iv(struct blkcipher_walk *walk, -				    struct crypto_blkcipher *tfm, -				    unsigned int alignmask) +static inline int blkcipher_copy_iv(struct blkcipher_walk *walk)  { -	unsigned bs = walk->blocksize; -	unsigned int ivsize = crypto_blkcipher_ivsize(tfm); -	unsigned aligned_bs = ALIGN(bs, alignmask + 1); -	unsigned int size = aligned_bs * 2 + ivsize + max(aligned_bs, ivsize) - -			    (alignmask + 1); +	unsigned bs = walk->walk_blocksize; +	unsigned aligned_bs = ALIGN(bs, walk->alignmask + 1); +	unsigned int size = aligned_bs * 2 + +			    walk->ivsize + max(aligned_bs, walk->ivsize) - +			    (walk->alignmask + 1);  	u8 *iv; -	size += alignmask & ~(crypto_tfm_ctx_alignment() - 1); +	size += walk->alignmask & ~(crypto_tfm_ctx_alignment() - 1);  	walk->buffer = kmalloc(size, GFP_ATOMIC);  	if (!walk->buffer)  		return -ENOMEM; -	iv = (u8 *)ALIGN((unsigned long)walk->buffer, alignmask + 1); +	iv = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);  	iv = blkcipher_get_spot(iv, bs) + aligned_bs;  	iv = blkcipher_get_spot(iv, bs) + aligned_bs; -	iv = blkcipher_get_spot(iv, ivsize); +	iv = blkcipher_get_spot(iv, walk->ivsize); -	walk->iv = memcpy(iv, walk->iv, ivsize); +	walk->iv = memcpy(iv, walk->iv, walk->ivsize);  	return 0;  } @@ -304,7 +299,10 @@ int blkcipher_walk_virt(struct blkcipher_desc *desc,  			struct blkcipher_walk *walk)  {  	walk->flags &= ~BLKCIPHER_WALK_PHYS; -	walk->blocksize = crypto_blkcipher_blocksize(desc->tfm); +	walk->walk_blocksize = crypto_blkcipher_blocksize(desc->tfm); +	walk->cipher_blocksize = walk->walk_blocksize; +	walk->ivsize = crypto_blkcipher_ivsize(desc->tfm); +	walk->alignmask = crypto_blkcipher_alignmask(desc->tfm);  	return blkcipher_walk_first(desc, walk);  }  EXPORT_SYMBOL_GPL(blkcipher_walk_virt); @@ -313,7 +311,10 @@ int blkcipher_walk_phys(struct blkcipher_desc *desc,  			struct blkcipher_walk *walk)  {  	walk->flags |= BLKCIPHER_WALK_PHYS; -	walk->blocksize = crypto_blkcipher_blocksize(desc->tfm); +	walk->walk_blocksize = crypto_blkcipher_blocksize(desc->tfm); +	walk->cipher_blocksize = walk->walk_blocksize; +	walk->ivsize = crypto_blkcipher_ivsize(desc->tfm); +	walk->alignmask = crypto_blkcipher_alignmask(desc->tfm);  	return blkcipher_walk_first(desc, walk);  }  EXPORT_SYMBOL_GPL(blkcipher_walk_phys); @@ -321,9 +322,6 @@ EXPORT_SYMBOL_GPL(blkcipher_walk_phys);  static int blkcipher_walk_first(struct blkcipher_desc *desc,  				struct blkcipher_walk *walk)  { -	struct crypto_blkcipher *tfm = desc->tfm; -	unsigned int alignmask = crypto_blkcipher_alignmask(tfm); -  	if (WARN_ON_ONCE(in_irq()))  		return -EDEADLK; @@ -333,8 +331,8 @@ static int blkcipher_walk_first(struct blkcipher_desc *desc,  	walk->buffer = NULL;  	walk->iv = desc->info; -	if (unlikely(((unsigned long)walk->iv & alignmask))) { -		int err = blkcipher_copy_iv(walk, tfm, alignmask); +	if (unlikely(((unsigned long)walk->iv & walk->alignmask))) { +		int err = blkcipher_copy_iv(walk);  		if (err)  			return err;  	} @@ -351,11 +349,28 @@ int blkcipher_walk_virt_block(struct blkcipher_desc *desc,  			      unsigned int blocksize)  {  	walk->flags &= ~BLKCIPHER_WALK_PHYS; -	walk->blocksize = blocksize; +	walk->walk_blocksize = blocksize; +	walk->cipher_blocksize = crypto_blkcipher_blocksize(desc->tfm); +	walk->ivsize = crypto_blkcipher_ivsize(desc->tfm); +	walk->alignmask = crypto_blkcipher_alignmask(desc->tfm);  	return blkcipher_walk_first(desc, walk);  }  EXPORT_SYMBOL_GPL(blkcipher_walk_virt_block); +int blkcipher_aead_walk_virt_block(struct blkcipher_desc *desc, +				   struct blkcipher_walk *walk, +				   struct crypto_aead *tfm, +				   unsigned int blocksize) +{ +	walk->flags &= ~BLKCIPHER_WALK_PHYS; +	walk->walk_blocksize = blocksize; +	walk->cipher_blocksize = crypto_aead_blocksize(tfm); +	walk->ivsize = crypto_aead_ivsize(tfm); +	walk->alignmask = crypto_aead_alignmask(tfm); +	return blkcipher_walk_first(desc, walk); +} +EXPORT_SYMBOL_GPL(blkcipher_aead_walk_virt_block); +  static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,  			    unsigned int keylen)  { @@ -492,6 +507,35 @@ static int crypto_init_blkcipher_ops(struct crypto_tfm *tfm, u32 type, u32 mask)  		return crypto_init_blkcipher_ops_async(tfm);  } +#ifdef CONFIG_NET +static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg) +{ +	struct crypto_report_blkcipher rblkcipher; + +	strncpy(rblkcipher.type, "blkcipher", sizeof(rblkcipher.type)); +	strncpy(rblkcipher.geniv, alg->cra_blkcipher.geniv ?: "<default>", +		sizeof(rblkcipher.geniv)); + +	rblkcipher.blocksize = alg->cra_blocksize; +	rblkcipher.min_keysize = alg->cra_blkcipher.min_keysize; +	rblkcipher.max_keysize = alg->cra_blkcipher.max_keysize; +	rblkcipher.ivsize = alg->cra_blkcipher.ivsize; + +	if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER, +		    sizeof(struct crypto_report_blkcipher), &rblkcipher)) +		goto nla_put_failure; +	return 0; + +nla_put_failure: +	return -EMSGSIZE; +} +#else +static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg) +{ +	return -ENOSYS; +} +#endif +  static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)  	__attribute__ ((unused));  static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg) @@ -511,6 +555,7 @@ const struct crypto_type crypto_blkcipher_type = {  #ifdef CONFIG_PROC_FS  	.show = crypto_blkcipher_show,  #endif +	.report = crypto_blkcipher_report,  };  EXPORT_SYMBOL_GPL(crypto_blkcipher_type); @@ -556,18 +601,16 @@ struct crypto_instance *skcipher_geniv_alloc(struct crypto_template *tmpl,  	int err;  	algt = crypto_get_attr_type(tb); -	err = PTR_ERR(algt);  	if (IS_ERR(algt)) -		return ERR_PTR(err); +		return ERR_CAST(algt);  	if ((algt->type ^ (CRYPTO_ALG_TYPE_GIVCIPHER | CRYPTO_ALG_GENIV)) &  	    algt->mask)  		return ERR_PTR(-EINVAL);  	name = crypto_attr_alg_name(tb[1]); -	err = PTR_ERR(name);  	if (IS_ERR(name)) -		return ERR_PTR(err); +		return ERR_CAST(name);  	inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);  	if (!inst)  | 
