aboutsummaryrefslogtreecommitdiff
path: root/drivers/crypto/ixp4xx_crypto.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto/ixp4xx_crypto.c')
-rw-r--r--drivers/crypto/ixp4xx_crypto.c338
1 files changed, 165 insertions, 173 deletions
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index 42a107fe923..f757a0f428b 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -17,6 +17,8 @@
#include <linux/rtnetlink.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
+#include <linux/gfp.h>
+#include <linux/module.h>
#include <crypto/ctr.h>
#include <crypto/des.h>
@@ -27,8 +29,8 @@
#include <crypto/authenc.h>
#include <crypto/scatterwalk.h>
-#include <asm/arch/npe.h>
-#include <asm/arch/qmgr.h>
+#include <mach/npe.h>
+#include <mach/qmgr.h>
#define MAX_KEYLEN 32
@@ -96,25 +98,44 @@
struct buffer_desc {
u32 phys_next;
+#ifdef __ARMEB__
u16 buf_len;
u16 pkt_len;
+#else
+ u16 pkt_len;
+ u16 buf_len;
+#endif
u32 phys_addr;
u32 __reserved[4];
struct buffer_desc *next;
+ enum dma_data_direction dir;
};
struct crypt_ctl {
+#ifdef __ARMEB__
u8 mode; /* NPE_OP_* operation mode */
u8 init_len;
u16 reserved;
+#else
+ u16 reserved;
+ u8 init_len;
+ u8 mode; /* NPE_OP_* operation mode */
+#endif
u8 iv[MAX_IVLEN]; /* IV for CBC mode or CTR IV for CTR mode */
u32 icv_rev_aes; /* icv or rev aes */
u32 src_buf;
u32 dst_buf;
+#ifdef __ARMEB__
u16 auth_offs; /* Authentication start offset */
u16 auth_len; /* Authentication data length */
u16 crypt_offs; /* Cryption start offset */
u16 crypt_len; /* Cryption data length */
+#else
+ u16 auth_len; /* Authentication data length */
+ u16 auth_offs; /* Authentication start offset */
+ u16 crypt_len; /* Cryption data length */
+ u16 crypt_offs; /* Cryption start offset */
+#endif
u32 aadAddr; /* Additional Auth Data Addr for CCM mode */
u32 crypto_ctx; /* NPE Crypto Param structure address */
@@ -132,14 +153,10 @@ struct crypt_ctl {
struct ablk_ctx {
struct buffer_desc *src;
struct buffer_desc *dst;
- unsigned src_nents;
- unsigned dst_nents;
};
struct aead_ctx {
struct buffer_desc *buffer;
- unsigned short assoc_nents;
- unsigned short src_nents;
struct scatterlist ivlist;
/* used when the hmac is not on one sg entry */
u8 *hmac_virt;
@@ -201,23 +218,9 @@ static dma_addr_t crypt_phys;
static int support_aes = 1;
-static void dev_release(struct device *dev)
-{
- return;
-}
-
#define DRIVER_NAME "ixp4xx_crypto"
-static struct platform_device pseudo_dev = {
- .name = DRIVER_NAME,
- .id = 0,
- .num_resources = 0,
- .dev = {
- .coherent_dma_mask = DMA_32BIT_MASK,
- .release = dev_release,
- }
-};
-static struct device *dev = &pseudo_dev.dev;
+static struct platform_device *pdev;
static inline dma_addr_t crypt_virt2phys(struct crypt_ctl *virt)
{
@@ -246,10 +249,11 @@ static inline const struct ix_hash_algo *ix_hash(struct crypto_tfm *tfm)
static int setup_crypt_desc(void)
{
+ struct device *dev = &pdev->dev;
BUILD_BUG_ON(sizeof(struct crypt_ctl) != 64);
crypt_virt = dma_alloc_coherent(dev,
NPE_QLEN * sizeof(struct crypt_ctl),
- &crypt_phys, GFP_KERNEL);
+ &crypt_phys, GFP_ATOMIC);
if (!crypt_virt)
return -ENOMEM;
memset(crypt_virt, 0, NPE_QLEN * sizeof(struct crypt_ctl));
@@ -312,7 +316,7 @@ static struct crypt_ctl *get_crypt_desc_emerg(void)
}
}
-static void free_buf_chain(struct buffer_desc *buf, u32 phys)
+static void free_buf_chain(struct device *dev, struct buffer_desc *buf,u32 phys)
{
while (buf) {
struct buffer_desc *buf1;
@@ -320,6 +324,7 @@ static void free_buf_chain(struct buffer_desc *buf, u32 phys)
buf1 = buf->next;
phys1 = buf->phys_next;
+ dma_unmap_single(dev, buf->phys_next, buf->buf_len, buf->dir);
dma_pool_free(buffer_pool, buf, phys);
buf = buf1;
phys = phys1;
@@ -345,10 +350,10 @@ static void finish_scattered_hmac(struct crypt_ctl *crypt)
static void one_packet(dma_addr_t phys)
{
+ struct device *dev = &pdev->dev;
struct crypt_ctl *crypt;
struct ixp_ctx *ctx;
int failed;
- enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
failed = phys & 0x1 ? -EBADMSG : 0;
phys &= ~0x3;
@@ -358,13 +363,8 @@ static void one_packet(dma_addr_t phys)
case CTL_FLAG_PERFORM_AEAD: {
struct aead_request *req = crypt->data.aead_req;
struct aead_ctx *req_ctx = aead_request_ctx(req);
- dma_unmap_sg(dev, req->assoc, req_ctx->assoc_nents,
- DMA_TO_DEVICE);
- dma_unmap_sg(dev, &req_ctx->ivlist, 1, DMA_BIDIRECTIONAL);
- dma_unmap_sg(dev, req->src, req_ctx->src_nents,
- DMA_BIDIRECTIONAL);
- free_buf_chain(req_ctx->buffer, crypt->src_buf);
+ free_buf_chain(dev, req_ctx->buffer, crypt->src_buf);
if (req_ctx->hmac_virt) {
finish_scattered_hmac(crypt);
}
@@ -374,16 +374,11 @@ static void one_packet(dma_addr_t phys)
case CTL_FLAG_PERFORM_ABLK: {
struct ablkcipher_request *req = crypt->data.ablk_req;
struct ablk_ctx *req_ctx = ablkcipher_request_ctx(req);
- int nents;
+
if (req_ctx->dst) {
- nents = req_ctx->dst_nents;
- dma_unmap_sg(dev, req->dst, nents, DMA_FROM_DEVICE);
- free_buf_chain(req_ctx->dst, crypt->dst_buf);
- src_direction = DMA_TO_DEVICE;
+ free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
}
- nents = req_ctx->src_nents;
- dma_unmap_sg(dev, req->src, nents, src_direction);
- free_buf_chain(req_ctx->src, crypt->src_buf);
+ free_buf_chain(dev, req_ctx->src, crypt->src_buf);
req->base.complete(&req->base, failed);
break;
}
@@ -425,9 +420,10 @@ static void crypto_done_action(unsigned long arg)
tasklet_schedule(&crypto_done_tasklet);
}
-static int init_ixp_crypto(void)
+static int init_ixp_crypto(struct device *dev)
{
int ret = -ENODEV;
+ u32 msg[2] = { 0, 0 };
if (! ( ~(*IXP4XX_EXP_CFG2) & (IXP4XX_FEATURE_HASH |
IXP4XX_FEATURE_AES | IXP4XX_FEATURE_DES))) {
@@ -439,9 +435,35 @@ static int init_ixp_crypto(void)
return ret;
if (!npe_running(npe_c)) {
- npe_load_firmware(npe_c, npe_name(npe_c), dev);
+ ret = npe_load_firmware(npe_c, npe_name(npe_c), dev);
+ if (ret) {
+ return ret;
+ }
+ if (npe_recv_message(npe_c, msg, "STATUS_MSG"))
+ goto npe_error;
+ } else {
+ if (npe_send_message(npe_c, msg, "STATUS_MSG"))
+ goto npe_error;
+
+ if (npe_recv_message(npe_c, msg, "STATUS_MSG"))
+ goto npe_error;
}
+ switch ((msg[1]>>16) & 0xff) {
+ case 3:
+ printk(KERN_WARNING "Firmware of %s lacks AES support\n",
+ npe_name(npe_c));
+ support_aes = 0;
+ break;
+ case 4:
+ case 5:
+ support_aes = 1;
+ break;
+ default:
+ printk(KERN_ERR "Firmware of %s lacks crypto support\n",
+ npe_name(npe_c));
+ return -ENODEV;
+ }
/* buffer_pool will also be used to sometimes store the hmac,
* so assure it is large enough
*/
@@ -457,10 +479,12 @@ static int init_ixp_crypto(void)
if (!ctx_pool) {
goto err;
}
- ret = qmgr_request_queue(SEND_QID, NPE_QLEN_TOTAL, 0, 0);
+ ret = qmgr_request_queue(SEND_QID, NPE_QLEN_TOTAL, 0, 0,
+ "ixp_crypto:out", NULL);
if (ret)
goto err;
- ret = qmgr_request_queue(RECV_QID, NPE_QLEN, 0, 0);
+ ret = qmgr_request_queue(RECV_QID, NPE_QLEN, 0, 0,
+ "ixp_crypto:in", NULL);
if (ret) {
qmgr_release_queue(SEND_QID);
goto err;
@@ -470,6 +494,10 @@ static int init_ixp_crypto(void)
qmgr_enable_irq(RECV_QID);
return 0;
+
+npe_error:
+ printk(KERN_ERR "%s not responding\n", npe_name(npe_c));
+ ret = -EIO;
err:
if (ctx_pool)
dma_pool_destroy(ctx_pool);
@@ -479,7 +507,7 @@ err:
return ret;
}
-static void release_ixp_crypto(void)
+static void release_ixp_crypto(struct device *dev)
{
qmgr_disable_irq(RECV_QID);
tasklet_kill(&crypto_done_tasklet);
@@ -631,6 +659,9 @@ static int setup_auth(struct crypto_tfm *tfm, int encrypt, unsigned authsize,
/* write cfg word to cryptinfo */
cfgword = algo->cfgword | ( authsize << 6); /* (authsize/4) << 8 */
+#ifndef __ARMEB__
+ cfgword ^= 0xAA000000; /* change the "byte swap" flags */
+#endif
*(u32*)cinfo = cpu_to_be32(cfgword);
cinfo += sizeof(cfgword);
@@ -707,12 +738,12 @@ static int setup_cipher(struct crypto_tfm *tfm, int encrypt,
}
if (cipher_cfg & MOD_AES) {
switch (key_len) {
- case 16: keylen_cfg = MOD_AES128 | KEYLEN_128; break;
- case 24: keylen_cfg = MOD_AES192 | KEYLEN_192; break;
- case 32: keylen_cfg = MOD_AES256 | KEYLEN_256; break;
- default:
- *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
- return -EINVAL;
+ case 16: keylen_cfg = MOD_AES128; break;
+ case 24: keylen_cfg = MOD_AES192; break;
+ case 32: keylen_cfg = MOD_AES256; break;
+ default:
+ *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ return -EINVAL;
}
cipher_cfg |= keylen_cfg;
} else if (cipher_cfg & MOD_3DES) {
@@ -748,56 +779,35 @@ static int setup_cipher(struct crypto_tfm *tfm, int encrypt,
return 0;
}
-static int count_sg(struct scatterlist *sg, int nbytes)
-{
- int i;
- for (i = 0; nbytes > 0; i++, sg = sg_next(sg))
- nbytes -= sg->length;
- return i;
-}
-
-static struct buffer_desc *chainup_buffers(struct scatterlist *sg,
- unsigned nbytes, struct buffer_desc *buf, gfp_t flags)
+static struct buffer_desc *chainup_buffers(struct device *dev,
+ struct scatterlist *sg, unsigned nbytes,
+ struct buffer_desc *buf, gfp_t flags,
+ enum dma_data_direction dir)
{
- int nents = 0;
-
- while (nbytes > 0) {
+ for (;nbytes > 0; sg = scatterwalk_sg_next(sg)) {
+ unsigned len = min(nbytes, sg->length);
struct buffer_desc *next_buf;
u32 next_buf_phys;
- unsigned len = min(nbytes, sg_dma_len(sg));
+ void *ptr;
- nents++;
nbytes -= len;
- if (!buf->phys_addr) {
- buf->phys_addr = sg_dma_address(sg);
- buf->buf_len = len;
- buf->next = NULL;
- buf->phys_next = 0;
- goto next;
- }
- /* Two consecutive chunks on one page may be handled by the old
- * buffer descriptor, increased by the length of the new one
- */
- if (sg_dma_address(sg) == buf->phys_addr + buf->buf_len) {
- buf->buf_len += len;
- goto next;
- }
+ ptr = page_address(sg_page(sg)) + sg->offset;
next_buf = dma_pool_alloc(buffer_pool, flags, &next_buf_phys);
- if (!next_buf)
- return NULL;
+ if (!next_buf) {
+ buf = NULL;
+ break;
+ }
+ sg_dma_address(sg) = dma_map_single(dev, ptr, len, dir);
buf->next = next_buf;
buf->phys_next = next_buf_phys;
-
buf = next_buf;
- buf->next = NULL;
- buf->phys_next = 0;
+
buf->phys_addr = sg_dma_address(sg);
buf->buf_len = len;
-next:
- if (nbytes > 0) {
- sg = sg_next(sg);
- }
+ buf->dir = dir;
}
+ buf->next = NULL;
+ buf->phys_next = 0;
return buf;
}
@@ -858,12 +868,13 @@ static int ablk_perform(struct ablkcipher_request *req, int encrypt)
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
unsigned ivsize = crypto_ablkcipher_ivsize(tfm);
- int ret = -ENOMEM;
struct ix_sa_dir *dir;
struct crypt_ctl *crypt;
- unsigned int nbytes = req->nbytes, nents;
+ unsigned int nbytes = req->nbytes;
enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
struct ablk_ctx *req_ctx = ablkcipher_request_ctx(req);
+ struct buffer_desc src_hook;
+ struct device *dev = &pdev->dev;
gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
GFP_KERNEL : GFP_ATOMIC;
@@ -876,7 +887,7 @@ static int ablk_perform(struct ablkcipher_request *req, int encrypt)
crypt = get_crypt_desc();
if (!crypt)
- return ret;
+ return -ENOMEM;
crypt->data.ablk_req = req;
crypt->crypto_ctx = dir->npe_ctx_phys;
@@ -889,53 +900,41 @@ static int ablk_perform(struct ablkcipher_request *req, int encrypt)
BUG_ON(ivsize && !req->info);
memcpy(crypt->iv, req->info, ivsize);
if (req->src != req->dst) {
+ struct buffer_desc dst_hook;
crypt->mode |= NPE_OP_NOT_IN_PLACE;
- nents = count_sg(req->dst, nbytes);
/* This was never tested by Intel
* for more than one dst buffer, I think. */
- BUG_ON(nents != 1);
- req_ctx->dst_nents = nents;
- dma_map_sg(dev, req->dst, nents, DMA_FROM_DEVICE);
- req_ctx->dst = dma_pool_alloc(buffer_pool, flags,&crypt->dst_buf);
- if (!req_ctx->dst)
- goto unmap_sg_dest;
- req_ctx->dst->phys_addr = 0;
- if (!chainup_buffers(req->dst, nbytes, req_ctx->dst, flags))
+ BUG_ON(req->dst->length < nbytes);
+ req_ctx->dst = NULL;
+ if (!chainup_buffers(dev, req->dst, nbytes, &dst_hook,
+ flags, DMA_FROM_DEVICE))
goto free_buf_dest;
src_direction = DMA_TO_DEVICE;
+ req_ctx->dst = dst_hook.next;
+ crypt->dst_buf = dst_hook.phys_next;
} else {
req_ctx->dst = NULL;
- req_ctx->dst_nents = 0;
}
- nents = count_sg(req->src, nbytes);
- req_ctx->src_nents = nents;
- dma_map_sg(dev, req->src, nents, src_direction);
-
- req_ctx->src = dma_pool_alloc(buffer_pool, flags, &crypt->src_buf);
- if (!req_ctx->src)
- goto unmap_sg_src;
- req_ctx->src->phys_addr = 0;
- if (!chainup_buffers(req->src, nbytes, req_ctx->src, flags))
+ req_ctx->src = NULL;
+ if (!chainup_buffers(dev, req->src, nbytes, &src_hook,
+ flags, src_direction))
goto free_buf_src;
+ req_ctx->src = src_hook.next;
+ crypt->src_buf = src_hook.phys_next;
crypt->ctl_flags |= CTL_FLAG_PERFORM_ABLK;
qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
BUG_ON(qmgr_stat_overflow(SEND_QID));
return -EINPROGRESS;
free_buf_src:
- free_buf_chain(req_ctx->src, crypt->src_buf);
-unmap_sg_src:
- dma_unmap_sg(dev, req->src, req_ctx->src_nents, src_direction);
+ free_buf_chain(dev, req_ctx->src, crypt->src_buf);
free_buf_dest:
if (req->src != req->dst) {
- free_buf_chain(req_ctx->dst, crypt->dst_buf);
-unmap_sg_dest:
- dma_unmap_sg(dev, req->src, req_ctx->dst_nents,
- DMA_FROM_DEVICE);
+ free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
}
crypt->ctl_flags = CTL_FLAG_UNUSED;
- return ret;
+ return -ENOMEM;
}
static int ablk_encrypt(struct ablkcipher_request *req)
@@ -983,7 +982,7 @@ static int hmac_inconsistent(struct scatterlist *sg, unsigned start,
break;
offset += sg->length;
- sg = sg_next(sg);
+ sg = scatterwalk_sg_next(sg);
}
return (start + nbytes > offset + sg->length);
}
@@ -995,12 +994,12 @@ static int aead_perform(struct aead_request *req, int encrypt,
struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
unsigned ivsize = crypto_aead_ivsize(tfm);
unsigned authsize = crypto_aead_authsize(tfm);
- int ret = -ENOMEM;
struct ix_sa_dir *dir;
struct crypt_ctl *crypt;
- unsigned int cryptlen, nents;
- struct buffer_desc *buf;
+ unsigned int cryptlen;
+ struct buffer_desc *buf, src_hook;
struct aead_ctx *req_ctx = aead_request_ctx(req);
+ struct device *dev = &pdev->dev;
gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
GFP_KERNEL : GFP_ATOMIC;
@@ -1020,7 +1019,7 @@ static int aead_perform(struct aead_request *req, int encrypt,
}
crypt = get_crypt_desc();
if (!crypt)
- return ret;
+ return -ENOMEM;
crypt->data.aead_req = req;
crypt->crypto_ctx = dir->npe_ctx_phys;
@@ -1036,34 +1035,30 @@ static int aead_perform(struct aead_request *req, int encrypt,
memcpy(crypt->iv, req->iv, ivsize);
if (req->src != req->dst) {
- BUG(); /* -ENOTSUP because of my lazyness */
+ BUG(); /* -ENOTSUP because of my laziness */
}
- req_ctx->buffer = dma_pool_alloc(buffer_pool, flags, &crypt->src_buf);
- if (!req_ctx->buffer)
- goto out;
- req_ctx->buffer->phys_addr = 0;
/* ASSOC data */
- nents = count_sg(req->assoc, req->assoclen);
- req_ctx->assoc_nents = nents;
- dma_map_sg(dev, req->assoc, nents, DMA_TO_DEVICE);
- buf = chainup_buffers(req->assoc, req->assoclen, req_ctx->buffer,flags);
+ buf = chainup_buffers(dev, req->assoc, req->assoclen, &src_hook,
+ flags, DMA_TO_DEVICE);
+ req_ctx->buffer = src_hook.next;
+ crypt->src_buf = src_hook.phys_next;
if (!buf)
- goto unmap_sg_assoc;
+ goto out;
/* IV */
sg_init_table(&req_ctx->ivlist, 1);
sg_set_buf(&req_ctx->ivlist, iv, ivsize);
- dma_map_sg(dev, &req_ctx->ivlist, 1, DMA_BIDIRECTIONAL);
- buf = chainup_buffers(&req_ctx->ivlist, ivsize, buf, flags);
+ buf = chainup_buffers(dev, &req_ctx->ivlist, ivsize, buf, flags,
+ DMA_BIDIRECTIONAL);
if (!buf)
- goto unmap_sg_iv;
+ goto free_chain;
if (unlikely(hmac_inconsistent(req->src, cryptlen, authsize))) {
/* The 12 hmac bytes are scattered,
* we need to copy them into a safe buffer */
req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags,
&crypt->icv_rev_aes);
if (unlikely(!req_ctx->hmac_virt))
- goto unmap_sg_iv;
+ goto free_chain;
if (!encrypt) {
scatterwalk_map_and_copy(req_ctx->hmac_virt,
req->src, cryptlen, authsize, 0);
@@ -1073,33 +1068,28 @@ static int aead_perform(struct aead_request *req, int encrypt,
req_ctx->hmac_virt = NULL;
}
/* Crypt */
- nents = count_sg(req->src, cryptlen + authsize);
- req_ctx->src_nents = nents;
- dma_map_sg(dev, req->src, nents, DMA_BIDIRECTIONAL);
- buf = chainup_buffers(req->src, cryptlen + authsize, buf, flags);
+ buf = chainup_buffers(dev, req->src, cryptlen + authsize, buf, flags,
+ DMA_BIDIRECTIONAL);
if (!buf)
- goto unmap_sg_src;
+ goto free_hmac_virt;
if (!req_ctx->hmac_virt) {
crypt->icv_rev_aes = buf->phys_addr + buf->buf_len - authsize;
}
+
crypt->ctl_flags |= CTL_FLAG_PERFORM_AEAD;
qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
BUG_ON(qmgr_stat_overflow(SEND_QID));
return -EINPROGRESS;
-unmap_sg_src:
- dma_unmap_sg(dev, req->src, req_ctx->src_nents, DMA_BIDIRECTIONAL);
+free_hmac_virt:
if (req_ctx->hmac_virt) {
dma_pool_free(buffer_pool, req_ctx->hmac_virt,
crypt->icv_rev_aes);
}
-unmap_sg_iv:
- dma_unmap_sg(dev, &req_ctx->ivlist, 1, DMA_BIDIRECTIONAL);
-unmap_sg_assoc:
- dma_unmap_sg(dev, req->assoc, req_ctx->assoc_nents, DMA_TO_DEVICE);
- free_buf_chain(req_ctx->buffer, crypt->src_buf);
+free_chain:
+ free_buf_chain(dev, req_ctx->buffer, crypt->src_buf);
out:
crypt->ctl_flags = CTL_FLAG_UNUSED;
- return ret;
+ return -ENOMEM;
}
static int aead_setup(struct crypto_aead *tfm, unsigned int authsize)
@@ -1159,32 +1149,24 @@ static int aead_setkey(struct crypto_aead *tfm, const u8 *key,
unsigned int keylen)
{
struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
- struct rtattr *rta = (struct rtattr *)key;
- struct crypto_authenc_key_param *param;
+ struct crypto_authenc_keys keys;
- if (!RTA_OK(rta, keylen))
- goto badkey;
- if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
+ if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
goto badkey;
- if (RTA_PAYLOAD(rta) < sizeof(*param))
- goto badkey;
-
- param = RTA_DATA(rta);
- ctx->enckey_len = be32_to_cpu(param->enckeylen);
- key += RTA_ALIGN(rta->rta_len);
- keylen -= RTA_ALIGN(rta->rta_len);
+ if (keys.authkeylen > sizeof(ctx->authkey))
+ goto badkey;
- if (keylen < ctx->enckey_len)
+ if (keys.enckeylen > sizeof(ctx->enckey))
goto badkey;
- ctx->authkey_len = keylen - ctx->enckey_len;
- memcpy(ctx->enckey, key + ctx->authkey_len, ctx->enckey_len);
- memcpy(ctx->authkey, key, ctx->authkey_len);
+ memcpy(ctx->authkey, keys.authkey, keys.authkeylen);
+ memcpy(ctx->enckey, keys.enckey, keys.enckeylen);
+ ctx->authkey_len = keys.authkeylen;
+ ctx->enckey_len = keys.enckeylen;
return aead_setup(tfm, crypto_aead_authsize(tfm));
badkey:
- ctx->enckey_len = 0;
crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
@@ -1418,20 +1400,28 @@ static struct ixp_alg ixp4xx_algos[] = {
} };
#define IXP_POSTFIX "-ixp4xx"
+
+static const struct platform_device_info ixp_dev_info __initdata = {
+ .name = DRIVER_NAME,
+ .id = 0,
+ .dma_mask = DMA_BIT_MASK(32),
+};
+
static int __init ixp_module_init(void)
{
int num = ARRAY_SIZE(ixp4xx_algos);
- int i,err ;
+ int i, err;
- if (platform_device_register(&pseudo_dev))
- return -ENODEV;
+ pdev = platform_device_register_full(&ixp_dev_info);
+ if (IS_ERR(pdev))
+ return PTR_ERR(pdev);
spin_lock_init(&desc_lock);
spin_lock_init(&emerg_lock);
- err = init_ixp_crypto();
+ err = init_ixp_crypto(&pdev->dev);
if (err) {
- platform_device_unregister(&pseudo_dev);
+ platform_device_unregister(pdev);
return err;
}
for (i=0; i< num; i++) {
@@ -1450,6 +1440,7 @@ static int __init ixp_module_init(void)
/* block ciphers */
cra->cra_type = &crypto_ablkcipher_type;
cra->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_ASYNC;
if (!cra->cra_ablkcipher.setkey)
cra->cra_ablkcipher.setkey = ablk_setkey;
@@ -1462,6 +1453,7 @@ static int __init ixp_module_init(void)
/* authenc */
cra->cra_type = &crypto_aead_type;
cra->cra_flags = CRYPTO_ALG_TYPE_AEAD |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_ASYNC;
cra->cra_aead.setkey = aead_setkey;
cra->cra_aead.setauthsize = aead_setauthsize;
@@ -1493,8 +1485,8 @@ static void __exit ixp_module_exit(void)
if (ixp4xx_algos[i].registered)
crypto_unregister_alg(&ixp4xx_algos[i].crypto);
}
- release_ixp_crypto();
- platform_device_unregister(&pseudo_dev);
+ release_ixp_crypto(&pdev->dev);
+ platform_device_unregister(pdev);
}
module_init(ixp_module_init);