aboutsummaryrefslogtreecommitdiff
path: root/drivers/crypto
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto')
-rw-r--r--drivers/crypto/Kconfig52
-rw-r--r--drivers/crypto/Makefile2
-rw-r--r--drivers/crypto/amcc/crypto4xx_alg.c3846
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c2899
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.h365
-rw-r--r--drivers/crypto/amcc/crypto4xx_reg_def.h14
-rw-r--r--drivers/crypto/amcc/crypto4xx_sa.c114
-rw-r--r--drivers/crypto/amcc/crypto4xx_sa.h530
-rw-r--r--drivers/crypto/pka_4xx.c1333
-rw-r--r--drivers/crypto/pka_4xx_access.c201
-rw-r--r--drivers/crypto/pka_4xx_access.h86
-rw-r--r--drivers/crypto/pka_4xx_firmware.h515
12 files changed, 9876 insertions, 81 deletions
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index b08403d7d1c..d0921a72d59 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -213,6 +213,15 @@ config CRYPTO_DEV_IXP4XX
help
Driver for the IXP4xx NPE crypto engine.
+config CRYPTO_DEV_PKA4xx
+ tristate "Support for the AMCC 4xx PKA"
+ depends on PPC
+ help
+ Select this option if you want to have support for the AMCC 4xx PKA.
+
+ To compile this driver as a module, choose M here: the module
+ will be called pka4xx
+
config CRYPTO_DEV_PPC4XX
tristate "Driver AMCC PPC4xx crypto accelerator"
depends on PPC && 4xx
@@ -222,4 +231,47 @@ config CRYPTO_DEV_PPC4XX
help
This option allows you to have support for AMCC crypto acceleration.
+config SEC_HW_POLL
+ bool "Turn on HW Polling instead of Doing Force Descriptor Write"
+ depends on CRYPTO_DEV_PPC4XX
+ default n
+
+config SEC_HW_RING_POLL_FREQ
+ int "Basic HW polling frequency"
+ help
+ HW Polling Frequency which the Packet Engine reads a segment of the external PDR
+ depends on CRYPTO_DEV_PPC4XX && SEC_HW_POLL
+ default "2"
+
+config SEC_HW_POLL_RETRY_FREQ
+ int "HW Polling Retry interval"
+ help
+ HW Polling Retry interval that specifies how much Packet
+ Engine wait between re-reads on an invalid descriptor entry
+ depends on CRYPTO_DEV_PPC4XX && SEC_HW_POLL
+ default "1"
+
+config SEC_PD_OCM
+ bool "Security Packet Descriptors on OCM"
+ default n
+ help
+ This enables the Security Packet Descriptors to be allocated on the OCM
+ depends on CRYPTO_DEV_PPC4XX && 460EX
+
+config SEC_SA_OCM
+ bool "Security SA on OCM"
+ default n
+ help
+ This option enables the SA and State Record to be alloctaed on the OCM.
+ depends on CRYPTO_DEV_PPC4XX && 460EX
+
+config CRYPTO_DEV_ASYNC_SAMPLE
+ tristate "Async crypto and hash sample driver using software algorithms"
+ select CRYPTO_HASH
+ select CRYPTO_ALGAPI
+ select CRYPTO_BLKCIPHER
+ help
+ This is a sample asynchronous crypto and hash device driver over synchronous
+ software crypto and hash algorithms.
+
endif # CRYPTO_HW
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 6ffcb3f7f94..b9d434e28e4 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -5,4 +5,6 @@ obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
obj-$(CONFIG_CRYPTO_DEV_MV_CESA) += mv_cesa.o
obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o
+pka4xx-objs := pka_4xx_access.o pka_4xx.o
+obj-$(CONFIG_CRYPTO_DEV_PKA4xx) += pka4xx.o
obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/
diff --git a/drivers/crypto/amcc/crypto4xx_alg.c b/drivers/crypto/amcc/crypto4xx_alg.c
index a33243c17b0..9d81d8f7571 100644
--- a/drivers/crypto/amcc/crypto4xx_alg.c
+++ b/drivers/crypto/amcc/crypto4xx_alg.c
@@ -20,18 +20,39 @@
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/spinlock_types.h>
+#include <linux/highmem.h>
#include <linux/scatterlist.h>
#include <linux/crypto.h>
#include <linux/hash.h>
#include <crypto/internal/hash.h>
+#include <crypto/aead.h>
#include <linux/dma-mapping.h>
+#include <linux/pci.h>
#include <crypto/algapi.h>
#include <crypto/aes.h>
+#include <crypto/des.h>
#include <crypto/sha.h>
+#include <crypto/authenc.h>
+#include <net/ip.h>
+
#include "crypto4xx_reg_def.h"
#include "crypto4xx_sa.h"
#include "crypto4xx_core.h"
+#define DEBUG_CRYPTESP 1
+#ifdef DEBUG_CRYPTESP
+# define ESP_PHD print_hex_dump
+#else
+# define ESP_PHD(arg...)
+#endif
+
+#ifdef DEBUG_CRYPTESP
+
+#define ESP_PRINTK printk
+#else
+#define ESP_PRINTK(KERN_INFO arg...)
+#endif
+
void set_dynamic_sa_command_0(struct dynamic_sa_ctl *sa, u32 save_h,
u32 save_iv, u32 ld_h, u32 ld_iv, u32 hdr_proc,
u32 h, u32 c, u32 pad_type, u32 op_grp, u32 op,
@@ -56,11 +77,11 @@ void set_dynamic_sa_command_1(struct dynamic_sa_ctl *sa, u32 cm, u32 hmac_mc,
u32 cfb, u32 esn, u32 sn_mask, u32 mute,
u32 cp_pad, u32 cp_pay, u32 cp_hdr)
{
- sa->sa_command_1.w = 0;
sa->sa_command_1.bf.crypto_mode31 = (cm & 4) >> 2;
- sa->sa_command_1.bf.crypto_mode9_8 = cm & 3;
+ sa->sa_command_1.bf.crypto_mode9_8 = (cm & 3);
sa->sa_command_1.bf.feedback_mode = cfb,
sa->sa_command_1.bf.sa_rev = 1;
+ sa->sa_command_1.bf.hmac_muting = hmac_mc;
sa->sa_command_1.bf.extended_seq_num = esn;
sa->sa_command_1.bf.seq_num_mask = sn_mask;
sa->sa_command_1.bf.mutable_bit_proc = mute;
@@ -69,6 +90,423 @@ void set_dynamic_sa_command_1(struct dynamic_sa_ctl *sa, u32 cm, u32 hmac_mc,
sa->sa_command_1.bf.copy_hdr = cp_hdr;
}
+/** Table lookup for SA Hash Digest length and
+ * Hash Contents (based on Hash type)
+ */
+unsigned int crypto4xx_sa_hash_tbl[3][HASH_ALG_MAX_CNT] = {
+ /* Hash Contents */
+ { SA_HASH128_CONTENTS, SA_HASH160_CONTENTS, SA_HASH256_CONTENTS,
+ SA_HASH256_CONTENTS, SA_HASH512_CONTENTS, SA_HASH512_CONTENTS },
+ /* Digest len */
+ {4 * 4, 5 * 4, 7 * 4, 8 * 4, 12 * 4, 16 * 4},
+ /* SA Length */
+ { SA_HASH128_LEN, SA_HASH160_LEN, SA_HASH256_LEN, SA_HASH256_LEN,
+ SA_HASH512_LEN, SA_HASH512_LEN }
+};
+
+/** Table lookup for Hash Algorithms based on Hash type, used in
+ * crypto4xx_pre_compute_hmac()
+ */
+char *crypto4xx_hash_alg_map_tbl[HASH_ALG_MAX_CNT] = CRYPTO4XX_MAC_ALGS;
+
+static void crypto4xx_sg_setbuf(unsigned char *data, size_t bufsize,
+ struct scatterlist *sg, int sg_num)
+{
+ int remainder_of_page;
+ int i = 0;
+
+ sg_init_table(sg, sg_num);
+ while (bufsize > 0 && i < sg_num) {
+ sg_set_buf(&sg[i], data, bufsize);
+ remainder_of_page = PAGE_SIZE - sg[i].offset;
+ if (bufsize > remainder_of_page) {
+ /* the buffer was split over multiple pages */
+ sg[i].length = remainder_of_page;
+ bufsize -= remainder_of_page;
+ data += remainder_of_page;
+ } else {
+ bufsize = 0;
+ }
+ i++;
+ }
+}
+
+int crypto4xx_pre_compute_hmac(struct crypto4xx_ctx *ctx,
+ void *key,
+ unsigned int keylen,
+ unsigned int bs,
+ unsigned char ha,
+ unsigned char digs)
+{
+ u8 *ipad = NULL;
+ u8 *opad;
+ struct crypto_hash *child_hash = NULL;
+ struct hash_desc desc;
+ struct scatterlist sg[1];
+ struct scatterlist asg[2];
+ struct crypto_tfm *child_tfm;
+ char *child_name = NULL;
+ int i, rc = 0;
+ int ds;
+
+ BUG_ON(ha >= HASH_ALG_MAX_CNT);
+ child_name = crypto4xx_hash_alg_map_tbl[ha];
+ child_hash = crypto_alloc_hash(child_name, 0, 0);
+ if (IS_ERR(child_hash)) {
+ rc = PTR_ERR(child_hash);
+ printk(KERN_ERR "failed to load "
+ "transform for %s error %d\n",
+ child_name, rc);
+ return rc;
+ }
+
+ ipad = kmalloc(bs * 2, GFP_KERNEL);
+ if (ipad == NULL) {
+ crypto_free_hash(child_hash);
+ return -ENOMEM;
+ }
+
+ opad = ipad + bs;
+ child_tfm = crypto_hash_tfm(child_hash);
+ ds = crypto_hash_digestsize(child_hash);
+ desc.tfm = child_hash;
+ desc.flags = 0;
+ if (keylen > bs) {
+ crypto4xx_sg_setbuf(key, keylen, asg, 2);
+ rc = crypto_hash_init(&desc);
+ if (rc < 0)
+ goto err_alg_hash_key;
+ rc = crypto_hash_update(&desc, asg, keylen);
+ if (rc < 0)
+ goto err_alg_hash_key;
+ rc = crypto_hash_final(&desc, ipad);
+ keylen = ds;
+ } else {
+ memcpy(ipad, key, keylen);
+ }
+ memset(ipad + keylen, 0, bs-keylen);
+ memcpy(opad, ipad, bs);
+
+ for (i = 0; i < bs; i++) {
+ ipad[i] ^= 0x36;
+ opad[i] ^= 0x5c;
+ }
+
+ sg_init_one(&sg[0], ipad, bs);
+ rc = crypto_hash_init(&desc);
+ if (rc < 0)
+ goto err_alg_hash_key;
+ rc = crypto_hash_update(&desc, sg, bs);
+ if (rc < 0)
+ goto err_alg_hash_key;
+
+ if (ha == SA_HASH_ALG_SHA224)
+ ds = SHA256_DIGEST_SIZE;
+ else if (ha == SA_HASH_ALG_SHA384)
+ ds = SHA512_DIGEST_SIZE;
+
+ crypto_hash_partial(&desc, ipad);
+ crypto4xx_memcpy_le(ctx->sa_in +
+ get_dynamic_sa_offset_inner_digest(ctx), ipad, ds);
+
+ sg_init_one(&sg[0], opad, bs);
+ rc = crypto_hash_init(&desc);
+ if (rc < 0)
+ goto err_alg_hash_key;
+
+ rc = crypto_hash_update(&desc, sg, bs);
+ if (rc < 0)
+ goto err_alg_hash_key;
+
+ crypto_hash_partial(&desc, opad);
+ crypto4xx_memcpy_le(ctx->sa_in +
+ get_dynamic_sa_offset_outer_digest(ctx), opad, ds);
+
+err_alg_hash_key:
+ kfree(ipad);
+ crypto_free_hash(child_hash);
+ return rc;
+}
+
+int crypto4xx_pre_compute_ssl_mac(struct crypto4xx_ctx *ctx,
+ void *key,
+ unsigned int keylen,
+ unsigned int bs,
+ unsigned char ha)
+
+{
+ u8 *ipad;
+ u8 *opad;
+ struct crypto_hash *child_hash = NULL;
+ struct hash_desc desc;
+ struct scatterlist sg[1];
+ struct crypto_tfm *child_tfm;
+ unsigned char *digest = NULL;
+ int padsize = 0;
+ char *child_name = NULL;
+ int i, rc = 0;
+ int ds;
+
+ digest = kmalloc(bs, GFP_KERNEL);
+ if (digest == NULL) {
+ rc = -ENOMEM;
+ goto err_nomem;
+ }
+
+ if (ha == SA_HASH_ALG_MD5) {
+ child_name = "md5";
+ padsize = 48;
+ } else if (ha == SA_HASH_ALG_SHA1) {
+ child_name = "sha1";
+ padsize = 40;
+ }
+
+ child_hash = crypto_alloc_hash(child_name, 0, 0);
+ if (IS_ERR(child_hash)) {
+ rc = PTR_ERR(child_hash);
+ printk(KERN_ERR
+ "failed to load transform for %s error %d\n",
+ child_name, rc);
+ goto err_alg;
+ }
+
+ child_tfm = crypto_hash_tfm(child_hash);
+ ds = crypto_hash_digestsize(child_hash);
+ desc.tfm = child_hash;
+ desc.flags = 0;
+
+ if (keylen > bs) {
+ sg_init_one(&sg[0], key, keylen);
+ rc = crypto_hash_init(&desc);
+ if (rc < 0)
+ goto err_alg_hash_key;
+ rc = crypto_hash_update(&desc, &sg[0], keylen);
+ if (rc < 0)
+ goto err_alg_hash_key;
+ rc = crypto_hash_final(&desc, digest);
+ key = digest;
+ keylen = ds;
+ }
+
+ ipad = kmalloc(bs * 4, GFP_KERNEL);
+ if (ipad == NULL)
+ goto err_nomem;
+
+ memcpy(ipad, key, keylen);
+ memset(ipad + keylen, 0, bs);
+ opad = ipad + bs;
+ memcpy(opad, ipad, bs);
+
+ for (i = 0; i < bs; i++) {
+ ipad[i] ^= 0x36;
+ opad[i] ^= 0x5c;
+ }
+
+ sg_init_one(&sg[0], ipad, bs);
+ rc = crypto_hash_init(&desc);
+ if (rc < 0)
+ goto err_alg_hash_key;
+ rc = crypto_hash_update(&desc, sg, bs);
+ if (rc < 0)
+ goto err_alg_hash_key;
+
+ crypto_hash_partial(&desc, digest);
+ memcpy(ctx->sa_in + get_dynamic_sa_offset_inner_digest(ctx), digest, ds);
+
+ sg_init_one(&sg[0], opad, bs);
+ rc = crypto_hash_init(&desc);
+ if (rc < 0)
+ goto err_alg_hash_key;
+ rc = crypto_hash_update(&desc, sg, bs);
+ if (rc < 0)
+ goto err_alg_hash_key;
+
+ crypto_hash_partial(&desc, digest);
+ memcpy(ctx->sa_in + get_dynamic_sa_offset_outer_digest(ctx), digest, ds);
+
+err_alg_hash_key:
+ crypto_free_hash(child_hash);
+err_alg:
+ kfree(digest);
+err_nomem:
+ return rc;
+}
+
+int crypto4xx_compute_gcm_hash_key_sw(struct crypto4xx_ctx *ctx,
+ const u8 *key,
+ unsigned int keylen)
+{
+ struct crypto_blkcipher *aes_tfm = NULL;
+ struct blkcipher_desc desc;
+ struct scatterlist sg[1];
+ char src[16];
+ int rc = 0;
+
+ aes_tfm = crypto_alloc_blkcipher("ecb(aes)", 0, CRYPTO_ALG_ASYNC);
+ if (IS_ERR(aes_tfm)) {
+ printk(KERN_ERR "failed to load transform for %ld\n",
+ PTR_ERR(aes_tfm));
+ rc = PTR_ERR(aes_tfm);
+ return rc;
+ }
+ desc.tfm = aes_tfm;
+ desc.flags = 0;
+
+ memset(src, 0, 16);
+ rc = crypto_blkcipher_setkey(aes_tfm, key, keylen);
+ if (rc) {
+ printk(KERN_ERR "setkey() failed flags=%x\n",
+ crypto_blkcipher_get_flags(aes_tfm));
+ goto out;
+ }
+
+ sg_init_one(sg, src, 16);
+ rc = crypto_blkcipher_encrypt(&desc, sg, sg, 16);
+ if (rc)
+ goto out;
+ crypto4xx_memcpy_le(ctx->sa_in +
+ get_dynamic_sa_offset_inner_digest(ctx), src, 16);
+
+out:
+ crypto_free_blkcipher(aes_tfm);
+ return rc;
+}
+
+/**
+ * 3DES/DES Functions
+ *
+ */
+static int crypto4xx_setkey_3des(struct crypto_ablkcipher *cipher,
+ const u8 *key,
+ unsigned int keylen,
+ unsigned char cm,
+ unsigned char fb)
+{
+ struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_alg *alg = tfm->__crt_alg;
+ struct crypto4xx_alg *my_alg = crypto_alg_to_crypto4xx_alg(alg);
+ struct dynamic_sa_ctl *sa;
+ int rc;
+
+ ctx->dev = my_alg->dev;
+
+ if (keylen != DES_KEY_SIZE && keylen != DES3_EDE_KEY_SIZE) {
+ crypto_ablkcipher_set_flags(cipher,
+ CRYPTO_TFM_RES_BAD_KEY_LEN);
+
+ return -EINVAL;
+ }
+
+ if (keylen == DES_KEY_SIZE) {
+ u32 tmp[32];
+ rc = des_ekey(tmp, key);
+ if (unlikely(rc == 0) &&
+ (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+ crypto_ablkcipher_set_flags(cipher,
+ CRYPTO_TFM_RES_WEAK_KEY);
+ return -EINVAL;
+ }
+ }
+
+ /* Create SA */
+ if (ctx->sa_in_dma_addr || ctx->sa_out_dma_addr)
+ crypto4xx_free_sa(ctx);
+
+ rc = crypto4xx_alloc_sa(ctx, keylen == 8 ? SA_DES_LEN : SA_3DES_LEN);
+ if (rc)
+ return rc;
+ /*
+ * state record will state in base ctx, so iv and
+ * hash result can be reused
+ * also don't need to alloc each packet coming
+ */
+ if (ctx->state_record_dma_addr == 0) {
+ rc = crypto4xx_alloc_state_record(ctx);
+ if (rc) {
+ crypto4xx_free_sa(ctx);
+ return rc;
+ }
+ }
+
+ /* Setup SA */
+ ctx->direction = DIR_INBOUND;
+ ctx->hash_final = 0;
+
+ sa = (struct dynamic_sa_ctl *) ctx->sa_in;
+ set_dynamic_sa_command_0(sa, SA_NOT_SAVE_HASH, SA_NOT_SAVE_IV,
+ SA_LOAD_HASH_FROM_SA, SA_LOAD_IV_FROM_STATE,
+ SA_NO_HEADER_PROC, SA_HASH_ALG_NULL,
+ SA_CIPHER_ALG_DES,
+ SA_PAD_TYPE_ZERO, SA_OP_GROUP_BASIC,
+ SA_OPCODE_DECRYPT, DIR_INBOUND);
+
+ set_dynamic_sa_command_1(sa, cm, SA_HASH_MODE_HASH,
+ fb, SA_EXTENDED_SN_OFF,
+ SA_SEQ_MASK_OFF, SA_MC_ENABLE,
+ SA_NOT_COPY_PAD, SA_COPY_PAYLOAD,
+ SA_NOT_COPY_HDR);
+
+ if (keylen == DES_KEY_SIZE) {
+ crypto4xx_memcpy_le(((struct dynamic_sa_des *) sa)->key,
+ key, keylen);
+ ((struct dynamic_sa_des *)sa)->ctrl.sa_contents =
+ SA_DES_CONTENTS;
+ sa->sa_command_0.bf.cipher_alg = SA_CIPHER_ALG_DES;
+ } else {
+ crypto4xx_memcpy_le(((struct dynamic_sa_3des *) sa)->key,
+ key, keylen);
+ ((struct dynamic_sa_3des *)sa)->ctrl.sa_contents =
+ SA_3DES_CONTENTS;
+ sa->sa_command_0.bf.cipher_alg = SA_CIPHER_ALG_3DES;
+ }
+
+ memcpy((void *)(ctx->sa_in +
+ get_dynamic_sa_offset_state_ptr_field(ctx)),
+ (void *)&ctx->state_record_dma_addr, 4);
+ ctx->offset_to_sr_ptr = get_dynamic_sa_offset_state_ptr_field(ctx);
+ ctx->is_hash = 0;
+ sa->sa_command_0.bf.dir = DIR_INBOUND;
+ memcpy(ctx->sa_out, ctx->sa_in, ctx->sa_len * 4);
+ sa = (struct dynamic_sa_ctl *) ctx->sa_out;
+ sa->sa_command_0.bf.dir = DIR_OUTBOUND;
+
+ return 0;
+}
+
+int crypto4xx_setkey_3des_cfb(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen)
+{
+ return crypto4xx_setkey_3des(cipher, key, keylen,
+ CRYPTO_MODE_CFB,
+ CRYPTO_FEEDBACK_MODE_8BIT_CFB);
+}
+
+int crypto4xx_setkey_3des_ofb(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen)
+{
+ return crypto4xx_setkey_3des(cipher, key, keylen,
+ CRYPTO_MODE_OFB,
+ CRYPTO_FEEDBACK_MODE_64BIT_OFB);
+}
+
+int crypto4xx_setkey_3des_cbc(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen)
+{
+ return crypto4xx_setkey_3des(cipher, key, keylen,
+ CRYPTO_MODE_CBC,
+ CRYPTO_FEEDBACK_MODE_NO_FB);
+}
+
+int crypto4xx_setkey_3des_ecb(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen)
+{
+ return crypto4xx_setkey_3des(cipher, key, keylen,
+ CRYPTO_MODE_ECB,
+ CRYPTO_FEEDBACK_MODE_NO_FB);
+}
+
+
int crypto4xx_encrypt(struct ablkcipher_request *req)
{
struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
@@ -79,22 +517,54 @@ int crypto4xx_encrypt(struct ablkcipher_request *req)
ctx->pd_ctl = 0x1;
return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
- req->nbytes, req->info,
- get_dynamic_sa_iv_size(ctx));
+ req->nbytes, NULL, 0, req->info,
+ get_dynamic_sa_iv_size(ctx));
}
int crypto4xx_decrypt(struct ablkcipher_request *req)
{
struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ ctx->hash_final = 0;
+ ctx->is_hash = 0;
+ ctx->pd_ctl = 0x1;
ctx->direction = DIR_INBOUND;
+
+ return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+ req->nbytes, NULL, 0, req->info,
+ get_dynamic_sa_iv_size(ctx));
+}
+
+int crypto4xx_encrypt_ctr(struct ablkcipher_request *req)
+{
+ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+
ctx->hash_final = 0;
ctx->is_hash = 0;
- ctx->pd_ctl = 1;
+ ctx->pd_ctl = 0x1;
+ ctx->direction = DIR_OUTBOUND;
return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
- req->nbytes, req->info,
- get_dynamic_sa_iv_size(ctx));
+ req->nbytes, NULL, 0,
+ req->info,
+ crypto_ablkcipher_ivsize(ablkcipher));
+}
+
+int crypto4xx_decrypt_ctr(struct ablkcipher_request *req)
+{
+ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+
+ ctx->hash_final = 0;
+ ctx->is_hash = 0;
+ ctx->pd_ctl = 0x1;
+ ctx->direction = DIR_INBOUND;
+
+ return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+ req->nbytes, NULL, 0,
+ req->info,
+ crypto_ablkcipher_ivsize(ablkcipher));
}
/**
@@ -106,11 +576,15 @@ static int crypto4xx_setkey_aes(struct crypto_ablkcipher *cipher,
unsigned char cm,
u8 fb)
{
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
- struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
- struct dynamic_sa_ctl *sa;
+ struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_alg *alg = tfm->__crt_alg;
+ struct crypto4xx_alg *my_alg = crypto_alg_to_crypto4xx_alg(alg);
+ struct dynamic_sa_ctl *sa;
int rc;
+ ctx->dev = my_alg->dev;
+
if (keylen != AES_KEYSIZE_256 &&
keylen != AES_KEYSIZE_192 && keylen != AES_KEYSIZE_128) {
crypto_ablkcipher_set_flags(cipher,
@@ -162,10 +636,17 @@ static int crypto4xx_setkey_aes(struct crypto_ablkcipher *cipher,
memcpy(ctx->sa_out, ctx->sa_in, ctx->sa_len * 4);
sa = (struct dynamic_sa_ctl *) ctx->sa_out;
sa->sa_command_0.bf.dir = DIR_OUTBOUND;
-
+
return 0;
}
+int crypto4xx_setkey_aes_ecb(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen)
+{
+ return crypto4xx_setkey_aes(cipher, key, keylen, CRYPTO_MODE_ECB,
+ CRYPTO_FEEDBACK_MODE_NO_FB);
+}
+
int crypto4xx_setkey_aes_cbc(struct crypto_ablkcipher *cipher,
const u8 *key, unsigned int keylen)
{
@@ -173,19 +654,716 @@ int crypto4xx_setkey_aes_cbc(struct crypto_ablkcipher *cipher,
CRYPTO_FEEDBACK_MODE_NO_FB);
}
+int crypto4xx_setkey_aes_ctr(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen)
+{
+ struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_alg *alg = tfm->__crt_alg;
+ struct crypto4xx_alg *my_alg = crypto_alg_to_crypto4xx_alg(alg);
+ struct dynamic_sa_ctl *sa;
+ u32 cnt = 1;
+ int rc;
+ u32 cm = CRYPTO_MODE_AES_CTR;
+
+ ctx->dev = my_alg->dev;
+
+ keylen -= 4;
+ /* Create SA */
+ if (ctx->sa_in_dma_addr || ctx->sa_out_dma_addr)
+ crypto4xx_free_sa(ctx);
+
+ if (keylen != AES_KEYSIZE_256 &&
+ keylen != AES_KEYSIZE_192 && keylen != AES_KEYSIZE_128) {
+ crypto_ablkcipher_set_flags(cipher,
+ CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ rc = crypto4xx_alloc_sa(ctx, SA_AES128_LEN + (keylen-16) / 4);
+ if (rc)
+ return rc;
+
+ if (ctx->state_record_dma_addr == 0) {
+ rc = crypto4xx_alloc_state_record(ctx);
+ if (rc) {
+ crypto4xx_free_sa(ctx);
+ return rc;
+ }
+ }
+
+ sa = (struct dynamic_sa_ctl *) ctx->sa_in;
+ ctx->hash_final = 0;
+ ctx->ctr_aes = 1;
+ /* Setup SA */
+ set_dynamic_sa_command_0(sa, SA_NOT_SAVE_HASH, SA_NOT_SAVE_IV,
+ SA_LOAD_HASH_FROM_SA, SA_LOAD_IV_FROM_STATE,
+ SA_NO_HEADER_PROC, SA_HASH_ALG_NULL,
+ SA_CIPHER_ALG_AES, SA_PAD_TYPE_ZERO,
+ SA_OP_GROUP_BASIC, SA_OPCODE_ENCRYPT,
+ DIR_INBOUND);
+ set_dynamic_sa_command_1(sa, cm, SA_HASH_MODE_HASH,
+ CRYPTO_FEEDBACK_MODE_NO_FB,
+ SA_EXTENDED_SN_OFF, SA_SEQ_MASK_OFF,
+ SA_MC_ENABLE, SA_NOT_COPY_PAD,
+ SA_NOT_COPY_PAYLOAD,
+ SA_NOT_COPY_HDR);
+
+ crypto4xx_memcpy_le(ctx->sa_in + get_dynamic_sa_offset_key_field(ctx),
+ key, keylen);
+ sa->sa_contents = SA_AES_CONTENTS | (keylen << 2);
+ sa->sa_command_1.bf.key_len = keylen >> 3;
+
+ ctx->direction = DIR_INBOUND;
+ memcpy(ctx->sa_in + get_dynamic_sa_offset_state_ptr_field(ctx),
+ (void *)&ctx->state_record_dma_addr, 4);
+ ctx->offset_to_sr_ptr = get_dynamic_sa_offset_state_ptr_field(ctx);
+
+ crypto4xx_memcpy_le(ctx->state_record, key + keylen, 4);
+ crypto4xx_memcpy_le(ctx->state_record + 12, (void *)&cnt, 4);
+
+ sa->sa_command_0.bf.dir = DIR_INBOUND;
+
+ memcpy(ctx->sa_out, ctx->sa_in, ctx->sa_len * 4);
+ sa = (struct dynamic_sa_ctl *) ctx->sa_out;
+ sa->sa_command_0.bf.dir = DIR_OUTBOUND;
+
+ return 0;
+}
+
+int crypto4xx_setkey_aes_cfb(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen)
+{
+ return crypto4xx_setkey_aes(cipher, key, keylen, CRYPTO_MODE_CFB,
+ CRYPTO_FEEDBACK_MODE_128BIT_CFB);
+}
+
+int crypto4xx_setkey_aes_ofb(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen)
+{
+ return crypto4xx_setkey_aes(cipher, key, keylen, CRYPTO_MODE_OFB,
+ CRYPTO_FEEDBACK_MODE_64BIT_OFB);
+}
+
+int crypto4xx_setkey_aes_icm(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen)
+{
+ return crypto4xx_setkey_aes(cipher, key, keylen, CRYPTO_MODE_AES_ICM,
+ CRYPTO_FEEDBACK_MODE_NO_FB);
+}
+
+/**
+ * AES-GCM Functions
+ */
+static inline int crypto4xx_aes_gcm_validate_keylen(unsigned int keylen)
+{
+ switch (keylen) {
+ case 16:
+ case 20:
+ case 24:
+ case 30:
+ case 32:
+ case 36:
+ return 0;
+ default:
+ printk(KERN_ERR "crypto4xx_setkey_aes_gcm: "
+ "ERROR keylen = 0x%08x\n", keylen);
+ return -EINVAL;
+ }
+ return -EINVAL;
+}
+
+int crypto4xx_setkey_aes_gcm(struct crypto_aead *cipher,
+ const u8 *key, unsigned int keylen)
+
+{
+ struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_alg *alg = tfm->__crt_alg;
+ struct crypto4xx_alg *my_alg = crypto_alg_to_crypto4xx_alg(alg);
+ struct dynamic_sa_ctl *sa;
+ int rc = 0;
+
+ u32 cm = 4;
+
+ ctx->dev = my_alg->dev;
+
+ if (crypto4xx_aes_gcm_validate_keylen(keylen) != 0) {
+ printk(KERN_ERR "crypto4xx_setkey_aes_gcm:"
+ "ERROR keylen = 0x%08x\n", keylen);
+ crypto_aead_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ if (ctx->sa_in_dma_addr || ctx->sa_out_dma_addr)
+ crypto4xx_free_sa(ctx);
+
+ rc = crypto4xx_alloc_sa(ctx, SA_AES128_GCM_LEN + (keylen-16) / 4);
+ if (rc)
+ return rc;
+
+ if (ctx->state_record_dma_addr == 0) {
+ rc = crypto4xx_alloc_state_record(ctx);
+ if (rc)
+ goto err;
+ }
+
+ sa = (struct dynamic_sa_ctl *) ctx->sa_in;
+
+ sa->sa_contents = SA_AES_GCM_CONTENTS | (keylen << 2);
+ sa->sa_command_1.bf.key_len = keylen >> 3;
+
+ ctx->direction = DIR_INBOUND;
+ crypto4xx_memcpy_le(ctx->sa_in + get_dynamic_sa_offset_key_field(ctx),
+ key, keylen);
+
+ memcpy(ctx->sa_in + get_dynamic_sa_offset_state_ptr_field(ctx),
+ (void *)&ctx->state_record_dma_addr, 4);
+
+ rc = crypto4xx_compute_gcm_hash_key_sw(ctx, key, keylen);
+ if (rc) {
+ printk(KERN_ERR "GCM hash key setting failed = %d\n", rc);
+ goto err;
+ }
+
+ ctx->offset_to_sr_ptr = get_dynamic_sa_offset_state_ptr_field(ctx);
+ ctx->is_gcm = 1;
+ ctx->hash_final = 1;
+ ctx->is_hash = 0;
+ ctx->pd_ctl = 0x11;
+
+ set_dynamic_sa_command_0(sa, SA_SAVE_HASH, SA_NOT_SAVE_IV,
+ SA_LOAD_HASH_FROM_SA, SA_LOAD_IV_FROM_STATE,
+ SA_NO_HEADER_PROC, SA_HASH_ALG_GHASH,
+ SA_CIPHER_ALG_AES, SA_PAD_TYPE_ZERO,
+ SA_OP_GROUP_BASIC, SA_OPCODE_HASH_DECRYPT,
+ DIR_INBOUND);
+
+ sa->sa_command_1.bf.crypto_mode31 = (cm & 4) >> 2;
+ sa->sa_command_1.bf.crypto_mode9_8 = (cm & 3);
+ sa->sa_command_1.bf.feedback_mode = 0;
+
+ sa->sa_command_1.bf.hash_crypto_offset = 0;
+ sa->sa_command_1.bf.sa_rev = 1;
+ sa->sa_command_1.bf.copy_payload = 1;
+
+ sa->sa_command_1.bf.copy_pad = 0;
+ sa->sa_command_1.bf.copy_hdr = 0;
+ sa->sa_command_1.bf.mutable_bit_proc = 1;
+ sa->sa_command_1.bf.seq_num_mask = 1;
+
+ memcpy(ctx->sa_out, ctx->sa_in, ctx->sa_len * 4);
+ sa = (struct dynamic_sa_ctl *) ctx->sa_out;
+ sa->sa_command_0.bf.dir = DIR_OUTBOUND;
+ sa->sa_command_0.bf.opcode = SA_OPCODE_ENCRYPT_HASH;
+
+ return 0;
+err:
+ crypto4xx_free_sa(ctx);
+ return rc;
+}
+
+int crypto4xx_encrypt_aes_gcm(struct aead_request *req)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+
+ ctx->direction = DIR_OUTBOUND;
+ ctx->append_icv = 1;
+
+ return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+ req->cryptlen, req->assoc, req->assoclen,
+ req->iv, crypto_aead_ivsize(aead));
+}
+
+int crypto4xx_decrypt_aes_gcm(struct aead_request *req)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ int len = req->cryptlen - crypto_aead_authsize(aead);
+
+ ctx->direction = DIR_INBOUND;
+ ctx->append_icv = 0;
+ return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+ len, req->assoc, req->assoclen,
+ req->iv, crypto_aead_ivsize(aead));
+}
+
+int crypto4xx_givencrypt_aes_gcm(struct aead_givcrypt_request *req)
+{
+ return -ENOSYS;
+}
+
+int crypto4xx_givdecrypt_aes_gcm(struct aead_givcrypt_request *req)
+{
+ return -ENOSYS;
+}
+
+/**
+ * AES-CCM Functions
+ */
+int crypto4xx_setauthsize_aes(struct crypto_aead *ciper,
+ unsigned int authsize)
+{
+ struct aead_tfm *tfm = crypto_aead_crt(ciper);
+
+ switch (authsize) {
+ case 8:
+ case 12:
+ case 16:
+ case 10:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ tfm->authsize = authsize;
+ return 0;
+}
+
+int crypto4xx_setkey_aes_ccm(struct crypto_aead *cipher, const u8 *key,
+ unsigned int keylen)
+{
+ struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_alg *alg = tfm->__crt_alg;
+ struct crypto4xx_alg *my_alg = crypto_alg_to_crypto4xx_alg(alg);
+ struct dynamic_sa_ctl *sa;
+ int rc = 0;
+
+ ctx->dev = my_alg->dev;
+
+ if (ctx->sa_in_dma_addr || ctx->sa_out_dma_addr)
+ crypto4xx_free_sa(ctx);
+
+ rc = crypto4xx_alloc_sa(ctx, SA_AES128_CCM_LEN + (keylen-16) / 4);
+ if (rc)
+ return rc;
+
+ if (ctx->state_record_dma_addr == 0) {
+ rc = crypto4xx_alloc_state_record(ctx);
+ if (rc) {
+ crypto4xx_free_sa(ctx);
+ return rc;
+ }
+ }
+
+ /* Setup SA */
+ sa = (struct dynamic_sa_ctl *) ctx->sa_in;
+ sa->sa_contents = SA_AES_CCM_CONTENTS | (keylen << 2);
+
+ set_dynamic_sa_command_0(sa, SA_NOT_SAVE_HASH, SA_NOT_SAVE_IV,
+ SA_LOAD_HASH_FROM_SA, SA_LOAD_IV_FROM_STATE,
+ SA_NO_HEADER_PROC, SA_HASH_ALG_CBC_MAC,
+ SA_CIPHER_ALG_AES,
+ SA_PAD_TYPE_ZERO, SA_OP_GROUP_BASIC,
+ SA_OPCODE_HASH_DECRYPT, DIR_INBOUND);
+
+ sa->sa_command_0.bf.digest_len = 0;
+ sa->sa_command_1.bf.key_len = keylen >> 3;
+ ctx->direction = DIR_INBOUND;
+ ctx->append_icv = 0;
+ ctx->is_gcm = 0;
+ ctx->hash_final = 1;
+ ctx->is_hash = 0;
+ ctx->pd_ctl = 0x11;
+
+ crypto4xx_memcpy_le(ctx->sa_in + get_dynamic_sa_offset_key_field(ctx),
+ key, keylen);
+ memcpy(ctx->sa_in + get_dynamic_sa_offset_state_ptr_field(ctx),
+ (void *)&ctx->state_record_dma_addr, 4);
+ ctx->offset_to_sr_ptr = get_dynamic_sa_offset_state_ptr_field(ctx);
+
+ set_dynamic_sa_command_1(sa, CRYPTO_MODE_AES_CTR, SA_HASH_MODE_HASH,
+ CRYPTO_FEEDBACK_MODE_NO_FB, SA_EXTENDED_SN_OFF,
+ SA_SEQ_MASK_OFF, SA_MC_ENABLE,
+ SA_NOT_COPY_PAD, SA_COPY_PAYLOAD,
+ SA_NOT_COPY_HDR);
+
+ memcpy(ctx->sa_out, ctx->sa_in, ctx->sa_len * 4);
+ sa = (struct dynamic_sa_ctl *) ctx->sa_out;
+ set_dynamic_sa_command_0(sa, SA_SAVE_HASH, SA_NOT_SAVE_IV,
+ SA_LOAD_HASH_FROM_SA, SA_LOAD_IV_FROM_STATE,
+ SA_NO_HEADER_PROC, SA_HASH_ALG_CBC_MAC,
+ SA_CIPHER_ALG_AES,
+ SA_PAD_TYPE_ZERO, SA_OP_GROUP_BASIC,
+ SA_OPCODE_ENCRYPT_HASH, DIR_OUTBOUND);
+ set_dynamic_sa_command_1(sa, CRYPTO_MODE_AES_CTR, SA_HASH_MODE_HASH,
+ CRYPTO_FEEDBACK_MODE_NO_FB, SA_EXTENDED_SN_OFF,
+ SA_SEQ_MASK_OFF, SA_MC_ENABLE,
+ SA_NOT_COPY_PAD, SA_COPY_PAYLOAD,
+ SA_NOT_COPY_HDR);
+
+ return 0;
+}
+
+int crypto4xx_encrypt_aes_ccm(struct aead_request *req)
+{
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct dynamic_sa_ctl *sa;
+
+ ctx->direction = DIR_OUTBOUND;
+
+ sa = (struct dynamic_sa_ctl *) ctx->sa_out;
+ if (req->assoclen)
+ sa->sa_command_1.bf.hash_crypto_offset = req->assoclen >> 2;
+
+ sa->sa_command_0.bf.digest_len = (crypto_aead_authsize(aead) >> 2);
+ if ((req->iv[0] & 7) == 1)
+ sa->sa_command_1.bf.crypto_mode9_8 = 1;
+
+ ctx->append_icv = 1;
+ return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+ req->cryptlen, req->assoc, req->assoclen,
+ req->iv, 16);
+}
+
+int crypto4xx_decrypt_aes_ccm(struct aead_request *req)
+{
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct dynamic_sa_ctl *sa;
+
+ /* Support only counter field length of 2 and 4 bytes */
+ if ((req->iv[0] & 0x7) != 1 && (req->iv[0] & 0x7) != 3) {
+ printk(KERN_ERR "algorithm AES-CCM "
+ "unsupported counter length %d\n",
+ req->iv[0] & 0x7);
+ return -EINVAL;
+ }
+
+ ctx->direction = DIR_INBOUND;
+ sa = (struct dynamic_sa_ctl *) ctx->sa_in;
+
+ sa->sa_command_0.bf.digest_len = (crypto_aead_authsize(aead) >> 2);
+ if ((req->iv[0] & 7) == 1)
+ sa->sa_command_1.bf.crypto_mode9_8 = 1;
+ else
+ sa->sa_command_1.bf.crypto_mode9_8 = 0;
+
+ return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+ req->cryptlen, req->assoc, req->assoclen,
+ req->iv, 16);
+}
+
+int crypto4xx_givencrypt_aes_ccm(struct aead_givcrypt_request *req)
+{
+ return -ENOSYS;
+}
+
+int crypto4xx_givdecrypt_aes_ccm(struct aead_givcrypt_request *req)
+{
+ return -ENOSYS;
+}
+
/**
- * HASH SHA1 Functions
+ * Kasumi Functions
+ *
+ */
+int crypto4xx_setkey_kasumi(struct crypto_ablkcipher *cipher,
+ const u8 *key,
+ unsigned int keylen,
+ unsigned char cm)
+{
+ struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_alg *alg = tfm->__crt_alg;
+ struct crypto4xx_alg *my_alg = crypto_alg_to_crypto4xx_alg(alg);
+ struct dynamic_sa_ctl *sa;
+ u32 sa_len = 0;
+ int rc;
+
+ if (keylen != 16) {
+ crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ printk(KERN_ERR "%s: keylen fail\n", __func__);
+ return -EINVAL;
+ }
+
+ ctx->dev = my_alg->dev;
+
+ /* Create SA - SA is created here as the alg init function is
+ * common to many algorithm and it does not have the SA length
+ * as it is specify to an algorithm. See setkey function has
+ * to be called for encryption/decryption algorithm once,
+ * it is okay to do this here.
+ */
+ if (ctx->sa_in_dma_addr || ctx->sa_out_dma_addr)
+ crypto4xx_free_sa(ctx);
+
+ if (cm == CRYPTO_MODE_KASUMI)
+ sa_len = SA_KASUMI_LEN;
+ else if (cm == CRYPTO_MODE_KASUMI_f8)
+ sa_len = SA_KASUMI_F8_LEN;
+
+ rc = crypto4xx_alloc_sa(ctx, sa_len);
+ if (rc)
+ return rc;
+
+ if (!ctx->state_record) {
+ rc = crypto4xx_alloc_state_record(ctx);
+ if (rc) {
+ crypto4xx_free_sa(ctx);
+ return rc;
+ }
+ }
+
+ sa = (struct dynamic_sa_ctl *) ctx->sa_in;
+ /* Setup SA - SA is a shared resource for request operation. As
+ * crypto alg and crypto mode can not be change, it should be
+ * ok to store them there. SA control words are not used by the
+ * hardware (configured in token instead), we use it to store
+ * software algorithm and mode selected.
+ */
+
+ if (cm == CRYPTO_MODE_KASUMI) {
+ sa->sa_contents = SA_KASUMI_CONTENTS;
+ sa->sa_command_0.bf.cipher_alg = SA_CIPHER_ALG_KASUMI;
+ sa->sa_command_0.bf.hash_alg = SA_HASH_ALG_NULL;
+ sa->sa_command_0.bf.pad_type = 3; /* set to zero padding */
+ sa->sa_command_0.bf.opcode = 0;
+ sa->sa_command_1.bf.crypto_mode31 = (cm & 4) >> 2;
+ sa->sa_command_1.bf.crypto_mode9_8 = (cm & 3);
+ sa->sa_command_1.bf.feedback_mode = 0;
+ } else {
+ sa->sa_contents = SA_KASUMI_F8_CONTENTS;
+ sa->sa_command_0.bf.cipher_alg = SA_CIPHER_ALG_KASUMI;
+ sa->sa_command_0.bf.hash_alg = SA_HASH_ALG_NULL;
+ sa->sa_command_0.bf.pad_type = 3;
+ sa->sa_command_0.bf.load_iv = SA_LOAD_IV_FROM_STATE;
+ sa->sa_command_0.bf.opcode = SA_OPCODE_ENCRYPT;
+ sa->sa_command_1.bf.crypto_mode31 = (cm & 4) >> 2;;
+ sa->sa_command_1.bf.crypto_mode9_8 = (cm & 3);
+ sa->sa_command_1.bf.feedback_mode = 0;
+ sa->sa_command_1.bf.mutable_bit_proc = 1;
+ }
+
+ ctx->direction = DIR_INBOUND;
+ sa->sa_command_1.bf.sa_rev = 1;
+ crypto4xx_memcpy_le(ctx->sa_in + get_dynamic_sa_offset_key_field(ctx),
+ key, keylen);
+ ctx->is_hash = 0;
+
+ memcpy(ctx->sa_in + get_dynamic_sa_offset_state_ptr_field(ctx),
+ (void *)&ctx->state_record_dma_addr, 4);
+ ctx->offset_to_sr_ptr = get_dynamic_sa_offset_state_ptr_field(ctx);
+ sa->sa_command_0.bf.dir = DIR_INBOUND;
+
+ memcpy(ctx->sa_out, ctx->sa_in, ctx->sa_len * 4);
+ sa = (struct dynamic_sa_ctl *) ctx->sa_out;
+ sa->sa_command_0.bf.dir = DIR_OUTBOUND;
+
+ return 0;
+}
+
+int crypto4xx_setkey_kasumi_p(struct crypto_ablkcipher *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ return crypto4xx_setkey_kasumi(cipher, key, keylen,
+ CRYPTO_MODE_KASUMI);
+}
+
+int crypto4xx_setkey_kasumi_f8(struct crypto_ablkcipher *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ return crypto4xx_setkey_kasumi(cipher, key, keylen,
+ CRYPTO_MODE_KASUMI_f8);
+}
+
+/**
+ * Kasumi and Kasumi f8 work with number of bits.
+ * The crypto engine can only take number bytes as source/destination length
+ * User should round up bit number to byte number. When receive the result
+ * packet and then mask off the extra bits in the last
+ * byte.
+ */
+int crypto4xx_encrypt_kasumi(struct ablkcipher_request *req)
+{
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ ctx->direction = DIR_OUTBOUND;
+ ctx->pd_ctl = 0x1;
+
+ return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+ req->nbytes, NULL, 0, NULL, 0);
+}
+
+/**
+ * Kasumi and Kasumi f8 work with number of bits.
+ * The crypto engine can only take number bytes as source/destination length
+ * User should round up bit number to byte number.
+ * When receive the result packet and then mask off the extra bits in the last
+ * byte.
+ */
+int crypto4xx_decrypt_kasumi(struct aead_request *req)
+{
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+
+ ctx->pd_ctl = 0x1;
+ ctx->direction = DIR_INBOUND;
+
+ return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+ req->cryptlen, NULL, 0, NULL, 0);
+}
+
+/**
+ * Kasumi and Kasumi f8 work with number of bits.
+ * The crypto engine can only take number bytes as source/destination length
+ * The user should round up bit number to byte number.
+ * When receive the result packet and then mask
+ * off the extra bits in the last byte.
+ */
+int crypto4xx_encrypt_kasumi_f8(struct ablkcipher_request *req)
+{
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+
+ ctx->direction = DIR_OUTBOUND;
+ ctx->is_hash = 0;
+ ctx->pd_ctl = 0x1;
+
+ return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+ req->nbytes, NULL, 0, req->info, 8);
+}
+
+/** Note:
+ * Kasumi and Kasumi f8 work with number of bits.
+ * The crypto engine can only take number bytes as source/destination length
+ * User should round up bit number to byte number.
+ * When receive the result packet and then mask off the extra bits in the last
+ * byte.
+ */
+int crypto4xx_decrypt_kasumi_f8(struct ablkcipher_request *req)
+{
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+
+ ctx->direction = DIR_INBOUND;
+ ctx->is_hash = 0;
+ ctx->pd_ctl = 0x1;
+
+ return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+ req->nbytes, NULL, 0, req->info, 8);
+}
+
+/**
+ * ARC4 Functions
+ *
+ */
+int crypto4xx_setkey_arc4(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen)
+{
+ struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_alg *alg = tfm->__crt_alg;
+ struct crypto4xx_alg *my_alg = crypto_alg_to_crypto4xx_alg(alg);
+ struct dynamic_sa_ctl *sa = (struct dynamic_sa_ctl *) ctx->sa_in;
+ int rc = 0;
+
+ ctx->dev = my_alg->dev;
+
+ /* Create SA */
+ if (ctx->sa_in_dma_addr || ctx->sa_out_dma_addr)
+ crypto4xx_free_sa(ctx);
+
+ rc = crypto4xx_alloc_sa(ctx, SA_ARC4_LEN);
+ if (rc)
+ return rc;
+
+#if 0
+ crypto4xx_alloc_arc4_state_record(ctx);
+ if (ctx->arc4_state_record == NULL) {
+ crypto4xx_free_sa(ctx);
+ return -ENOMEM;
+ }
+#endif
+ if (ctx->arc4_state_record == NULL) {
+ rc = crypto4xx_alloc_arc4_state_record(ctx);
+ if (rc) {
+ crypto4xx_free_sa(ctx);
+ return -ENOMEM;
+ }
+ }
+ /* Setup SA */
+ ctx->sa_len = SA_ARC4_LEN;
+ ctx->init_arc4 = 1;
+ ctx->direction = DIR_INBOUND;
+
+ sa = ctx->sa_in;
+ memset(((struct dynamic_sa_arc4 *)sa)->key, 0, 16);
+
+ crypto4xx_memcpy_le(((struct dynamic_sa_arc4 *)sa)->key, key, keylen);
+ sa->sa_contents = SA_ARC4_CONTENTS;
+
+ set_dynamic_sa_command_0(sa, SA_NOT_SAVE_HASH, SA_NOT_SAVE_IV,
+ SA_LOAD_HASH_FROM_SA, SA_LOAD_IV_FROM_STATE,
+ SA_NO_HEADER_PROC, SA_HASH_ALG_NULL,
+ SA_CIPHER_ALG_ARC4, SA_PAD_TYPE_ZERO,
+ SA_OP_GROUP_BASIC, SA_OPCODE_ENCRYPT,
+ DIR_INBOUND);
+
+ set_dynamic_sa_command_1(sa, 0, SA_HASH_MODE_HASH,
+ CRYPTO_FEEDBACK_MODE_NO_FB,
+ SA_EXTENDED_SN_OFF, SA_SEQ_MASK_OFF,
+ SA_MC_ENABLE, SA_NOT_COPY_PAD,
+ SA_COPY_PAYLOAD, SA_NOT_COPY_HDR);
+
+ sa->sa_command_1.bf.key_len = keylen;
+ memcpy(sa + get_dynamic_sa_offset_arc4_state_ptr(ctx),
+ (void *)&ctx->arc4_state_record_dma_addr, 4);
+
+ memcpy(ctx->sa_out, ctx->sa_in, ctx->sa_len * 4);
+ sa = (struct dynamic_sa_ctl *) ctx->sa_out;
+ sa->sa_command_0.bf.dir = DIR_OUTBOUND;
+
+
+ return 0;
+}
+
+int crypto4xx_arc4_encrypt(struct ablkcipher_request *req)
+{
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+
+ if (ctx->init_arc4) {
+ ctx->init_arc4 = 0;
+ ctx->pd_ctl = 9;
+ } else {
+ ctx->pd_ctl = 0x1;
+ }
+
+ return crypto4xx_build_pd(&req->base, ctx, req->src,
+ req->dst,
+ req->nbytes, NULL, 0, NULL, 0);
+}
+
+int crypto4xx_arc4_decrypt(struct ablkcipher_request *req)
+{
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+
+ if (ctx->init_arc4) {
+ ctx->init_arc4 = 0;
+ ctx->pd_ctl = 9;
+ } else {
+ ctx->pd_ctl = 0x1;
+ }
+
+ return crypto4xx_build_pd(&req->base, ctx, req->src,
+ req->dst,
+ req->nbytes, NULL, 0, NULL, 0);
+}
+
+/**
+ * Support MD5/SHA/HMAC Hashing Algorithms
+ *
*/
static int crypto4xx_hash_alg_init(struct crypto_tfm *tfm,
unsigned int sa_len,
unsigned char ha,
unsigned char hm)
{
- struct crypto_alg *alg = tfm->__crt_alg;
- struct crypto4xx_alg *my_alg = crypto_alg_to_crypto4xx_alg(alg);
- struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
- struct dynamic_sa_ctl *sa;
- struct dynamic_sa_hash160 *sa_in;
+ struct crypto_alg *alg = tfm->__crt_alg;
+ struct crypto4xx_alg *my_alg = crypto_alg_to_crypto4xx_alg(alg);
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct dynamic_sa_ctl *sa;
int rc;
ctx->dev = my_alg->dev;
@@ -200,6 +1378,7 @@ static int crypto4xx_hash_alg_init(struct crypto_tfm *tfm,
if (rc)
return rc;
+
if (ctx->state_record_dma_addr == 0) {
crypto4xx_alloc_state_record(ctx);
if (!ctx->state_record_dma_addr) {
@@ -207,10 +1386,15 @@ static int crypto4xx_hash_alg_init(struct crypto_tfm *tfm,
return -ENOMEM;
}
}
-
+
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
sizeof(struct crypto4xx_ctx));
sa = (struct dynamic_sa_ctl *) ctx->sa_in;
+
+
+ /*
+ * Setup hash algorithm and hash mode
+ */
set_dynamic_sa_command_0(sa, SA_SAVE_HASH, SA_NOT_SAVE_IV,
SA_NOT_LOAD_HASH, SA_LOAD_IV_FROM_SA,
SA_NO_HEADER_PROC, ha, SA_CIPHER_ALG_NULL,
@@ -221,13 +1405,12 @@ static int crypto4xx_hash_alg_init(struct crypto_tfm *tfm,
SA_SEQ_MASK_OFF, SA_MC_ENABLE,
SA_NOT_COPY_PAD, SA_NOT_COPY_PAYLOAD,
SA_NOT_COPY_HDR);
+
+ BUG_ON(ha >= HASH_ALG_MAX_CNT);
+ sa->sa_contents = crypto4xx_sa_hash_tbl[0][ha];
ctx->direction = DIR_INBOUND;
- sa->sa_contents = SA_HASH160_CONTENTS;
- sa_in = (struct dynamic_sa_hash160 *) ctx->sa_in;
- /* Need to zero hash digest in SA */
- memset(sa_in->inner_digest, 0, sizeof(sa_in->inner_digest));
- memset(sa_in->outer_digest, 0, sizeof(sa_in->outer_digest));
- sa_in->state_ptr = ctx->state_record_dma_addr;
+ memcpy(ctx->sa_in + get_dynamic_sa_offset_state_ptr_field(ctx),
+ (void *)&ctx->state_record_dma_addr, 4);
ctx->offset_to_sr_ptr = get_dynamic_sa_offset_state_ptr_field(ctx);
return 0;
@@ -261,7 +1444,7 @@ int crypto4xx_hash_update(struct ahash_request *req)
return crypto4xx_build_pd(&req->base, ctx, req->src,
(struct scatterlist *) req->result,
- req->nbytes, NULL, 0);
+ req->nbytes, NULL, 0, NULL, 0);
}
int crypto4xx_hash_final(struct ahash_request *req)
@@ -279,16 +1462,2627 @@ int crypto4xx_hash_digest(struct ahash_request *req)
return crypto4xx_build_pd(&req->base, ctx, req->src,
(struct scatterlist *) req->result,
- req->nbytes, NULL, 0);
+ req->nbytes, NULL, 0, NULL, 0);
}
/**
* SHA1 Algorithm
*/
+
+int crypto4xx_md5_alg_init(struct crypto_tfm *tfm)
+{
+ return crypto4xx_hash_alg_init(tfm, SA_HASH128_LEN, SA_HASH_ALG_MD5,
+ SA_HASH_MODE_HASH);
+}
+
+int crypto4xx_hash_hmac_setkey(struct crypto_ahash *hash,
+ const u8 *key,
+ unsigned int keylen,
+ unsigned int sa_len,
+ unsigned char ha,
+ unsigned char hm,
+ unsigned int max_keylen)
+{
+ struct crypto_tfm *tfm = crypto_ahash_tfm(hash);
+ struct crypto_alg *alg = tfm->__crt_alg;
+ struct crypto4xx_alg *my_alg = crypto_alg_to_crypto4xx_alg(alg);
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct dynamic_sa_ctl *sa;
+ int bs = crypto_tfm_alg_blocksize(tfm);
+ int ds = crypto_ahash_digestsize(hash);
+ int rc;
+
+ ctx->dev = my_alg->dev;
+
+ if (keylen > max_keylen) {
+ crypto_ahash_set_flags(hash, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -1;
+ }
+
+ if (ctx->sa_in_dma_addr || ctx->sa_out_dma_addr)
+ crypto4xx_free_sa(ctx);
+
+ /* Create SA */
+ rc = crypto4xx_alloc_sa(ctx, sa_len);
+ if (rc)
+ return rc;
+
+ if (ctx->state_record_dma_addr == 0) {
+ rc = crypto4xx_alloc_state_record(ctx);
+ if (rc)
+ goto err;
+ }
+
+ sa = (struct dynamic_sa_ctl *) ctx->sa_in;
+
+ /*
+ * Setup hash algorithm and hash mode
+ */
+ set_dynamic_sa_command_0(sa, SA_SAVE_HASH, SA_NOT_SAVE_IV,
+ SA_NOT_LOAD_HASH, SA_LOAD_IV_FROM_SA,
+ SA_NO_HEADER_PROC,
+ ha, SA_CIPHER_ALG_NULL, SA_PAD_TYPE_ZERO,
+ SA_OP_GROUP_BASIC, SA_OPCODE_HASH,
+ DIR_INBOUND);
+ set_dynamic_sa_command_1(sa, 0, hm,
+ CRYPTO_FEEDBACK_MODE_NO_FB,
+ SA_EXTENDED_SN_OFF,
+ SA_SEQ_MASK_OFF, SA_MC_ENABLE,
+ SA_NOT_COPY_PAD, SA_NOT_COPY_PAYLOAD,
+ SA_NOT_COPY_HDR);
+
+ BUG_ON(ha >= HASH_ALG_MAX_CNT);
+ sa->sa_contents = crypto4xx_sa_hash_tbl[0][ha];
+ ctx->direction = DIR_INBOUND;
+ memcpy((ctx->sa_in) + get_dynamic_sa_offset_state_ptr_field(ctx),
+ (void *)&ctx->state_record_dma_addr, 4);
+
+ ctx->offset_to_sr_ptr = get_dynamic_sa_offset_state_ptr_field(ctx);
+ rc = crypto4xx_pre_compute_hmac(ctx, (void *)key, keylen, bs, ha, ds);
+ if (rc) {
+ printk(KERN_ERR "Hmac Initial Digest Calculation failed\n");
+ goto err;
+ }
+
+ ctx->hash_final = 1;
+ ctx->is_hash = 1;
+
+ memcpy(ctx->sa_out, ctx->sa_in, ctx->sa_len * 4);
+ sa = (struct dynamic_sa_ctl *) ctx->sa_out;
+ sa->sa_command_0.bf.dir = DIR_OUTBOUND;
+
+ return 0;
+err:
+ crypto4xx_free_sa(ctx);
+ return rc;
+}
+
+int crypto4xx_md5_hmac_setkey(struct crypto_ahash *hash, const u8 *key,
+ unsigned int keylen)
+{
+ return crypto4xx_hash_hmac_setkey(hash, key, keylen, SA_HASH128_LEN,
+ SA_HASH_ALG_MD5, SA_HASH_MODE_HMAC,
+ 256);
+}
+
+/**
+ * SHA1 and SHA2 Algorithm
+ *
+ */
+
int crypto4xx_sha1_alg_init(struct crypto_tfm *tfm)
{
return crypto4xx_hash_alg_init(tfm, SA_HASH160_LEN, SA_HASH_ALG_SHA1,
SA_HASH_MODE_HASH);
}
+int crypto4xx_sha1_hmac_setkey(struct crypto_ahash *hash, const u8 *key,
+ unsigned int keylen)
+{
+ return crypto4xx_hash_hmac_setkey(hash, key, keylen, SA_HASH160_LEN,
+ SA_HASH_ALG_SHA1, SA_HASH_MODE_HMAC,
+ 256);
+}
+
+int crypto4xx_sha2_alg_init(struct crypto_tfm *tfm)
+{
+ int ds = crypto_ahash_digestsize(__crypto_ahash_cast(tfm));
+ u8 ha;
+
+ switch (ds) {
+ default:
+ case 256/8:
+ ha = SA_HASH_ALG_SHA256;
+ break;
+ case 224/8:
+ ha = SA_HASH_ALG_SHA224;
+ break;
+ case 512/8:
+ ha = SA_HASH_ALG_SHA512;
+ break;
+ case 384/8:
+ ha = SA_HASH_ALG_SHA384;
+ break;
+ }
+ BUG_ON(ha >= HASH_ALG_MAX_CNT);
+
+ return crypto4xx_hash_alg_init(tfm,
+ crypto4xx_sa_hash_tbl[2][ha], ha, 0);
+}
+
+int crypto4xx_sha2_hmac_setkey(struct crypto_ahash *hash,
+ const u8 *key,
+ unsigned int keylen)
+{
+ int ds = crypto_ahash_digestsize(hash);
+ unsigned char ha;
+
+ switch (ds) {
+ default:
+ case 256/8:
+ ha = SA_HASH_ALG_SHA256;
+ break;
+ case 224/8:
+ ha = SA_HASH_ALG_SHA224;
+ break;
+ case 512/8:
+ ha = SA_HASH_ALG_SHA512;
+ break;
+ case 384/8:
+ ha = SA_HASH_ALG_SHA384;
+ break;
+ }
+ BUG_ON(ha >= HASH_ALG_MAX_CNT);
+
+ return crypto4xx_hash_hmac_setkey(hash, key, keylen,
+ crypto4xx_sa_hash_tbl[2][ha],
+ ha,
+ SA_HASH_MODE_HMAC,
+ 512);
+}
+
+/**
+ * AES-XCBC-MAC Algorithm
+ *
+ */
+int crypto4xx_xcbc_digest(const unsigned char *key,
+ unsigned int keylen,
+ u8 *sa_hash, int bs)
+{
+ struct scatterlist sg[1];
+ struct crypto_blkcipher *aes_tfm = NULL;
+ struct blkcipher_desc desc;
+ int rc;
+ u8 *digest;
+
+ /* Load pre-computed key value into SA */
+ aes_tfm = crypto_alloc_blkcipher("ecb(aes)", 0, CRYPTO_ALG_ASYNC);
+ if (IS_ERR(aes_tfm)) {
+ rc = PTR_ERR(aes_tfm);
+ printk(KERN_ERR "failed to load transform"
+ " for ecb(aes) error %d\n", rc);
+ goto err_alg;
+ }
+ desc.tfm = aes_tfm;
+ desc.flags = 0;
+ rc = crypto_blkcipher_setkey(desc.tfm, key, keylen);
+ if (rc) {
+ printk(KERN_ERR "failed to load key error %d\n", rc);
+ goto err_alg;
+ }
+ digest = kmalloc(16, GFP_KERNEL);
+ if (digest == NULL) {
+ rc = -ENOMEM;
+ goto err_alg;
+ }
+
+ memset(digest, 0x01, bs);
+ sg_init_one(&sg[0], digest, bs);
+ rc = crypto_blkcipher_encrypt(&desc, sg, sg, bs);
+ if (rc < 0) {
+ printk(KERN_ERR "failed to hash key error %d\n", rc);
+ goto err_alg;
+ }
+
+ crypto4xx_memcpy_le((void *) sa_hash, digest, bs);
+
+ memset(digest, 0x02, bs);
+ sg_init_one(&sg[0], digest, bs);
+ rc = crypto_blkcipher_encrypt(&desc, sg, sg, bs);
+ if (rc < 0) {
+ printk(KERN_ERR "failed to hash key error %d\n", rc);
+ goto err_alg;
+ }
+
+ sa_hash += 32;
+ crypto4xx_memcpy_le((void *) sa_hash, digest, bs);
+
+ memset(digest, 0x03, bs);
+ sg_init_one(&sg[0], digest, bs);
+ rc = crypto_blkcipher_encrypt(&desc, sg, sg, bs);
+ if (rc < 0) {
+ printk(KERN_ERR "failed to hash key error %d\n", rc);
+ goto err_alg;
+ }
+
+ sa_hash += 16;
+ crypto4xx_memcpy_le((void *) sa_hash, digest, bs);
+
+ crypto_free_blkcipher(aes_tfm);
+
+ return 0;
+err_alg:
+#if 0
+ if (aes_tfm)
+ crypto_free_blkcipher(aes_tfm);
+#endif
+ return rc;
+}
+
+int crypto4xx_xcbc_setkey(struct crypto_ahash *hash,
+ const u8 *key,
+ unsigned int keylen)
+{
+ struct crypto_tfm *tfm = crypto_ahash_tfm(hash);
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_alg *alg = tfm->__crt_alg;
+ struct crypto4xx_alg *my_alg = crypto_alg_to_crypto4xx_alg(alg);
+ int bs = crypto_tfm_alg_blocksize(tfm);
+ struct dynamic_sa_ctl *sa;
+ u8 *sa_hash;
+ int rc = 0;
+
+ ctx->dev = my_alg->dev;
+
+ if (keylen != 128/8) {
+ crypto_ahash_set_flags(hash, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ if (ctx->sa_in_dma_addr || ctx->sa_out_dma_addr)
+ crypto4xx_free_sa(ctx);
+
+ /* Create SA */
+ rc = crypto4xx_alloc_sa(ctx, SA_AES128_XCBC_MAC_LEN);
+ if (rc)
+ return rc;
+
+ if (ctx->state_record_dma_addr == 0) {
+ rc = crypto4xx_alloc_state_record(ctx);
+ if (rc) {
+ rc = -ENOMEM;
+ goto err;
+ }
+ }
+
+ ctx->direction = DIR_INBOUND;
+ sa = (struct dynamic_sa_ctl *) ctx->sa_in;
+ /*
+ * Setup hash algorithm and hash mode
+ */
+ sa->sa_contents = SA_AES128_XCBC_MAC_CONTENTS;
+ set_dynamic_sa_command_0(sa, SA_SAVE_HASH, SA_NOT_SAVE_IV,
+ SA_NOT_LOAD_HASH, SA_LOAD_IV_FROM_SA,
+ SA_NO_HEADER_PROC,
+ SA_HASH_ALG_AES_XCBC_MAC_128,
+ SA_CIPHER_ALG_NULL, SA_PAD_TYPE_ZERO,
+ SA_OP_GROUP_BASIC, SA_OPCODE_HASH,
+ DIR_INBOUND);
+ set_dynamic_sa_command_1(sa, 0, SA_HASH_MODE_HASH,
+ CRYPTO_FEEDBACK_MODE_NO_FB,
+ SA_EXTENDED_SN_OFF,
+ SA_SEQ_MASK_OFF, SA_MC_ENABLE,
+ SA_NOT_COPY_PAD, SA_NOT_COPY_PAYLOAD,
+ SA_NOT_COPY_HDR);
+ crypto4xx_memcpy_le(ctx->sa_in + get_dynamic_sa_offset_key_field(ctx),
+ key, keylen);
+
+ memcpy((void *)(ctx->sa_in +
+ get_dynamic_sa_offset_state_ptr_field(ctx)),
+ (void *)&ctx->state_record_dma_addr, 4);
+ ctx->is_hash = 1;
+ ctx->offset_to_sr_ptr = get_dynamic_sa_offset_state_ptr_field(ctx);
+ sa_hash = (u8 *)(&(((struct dynamic_sa_aes128_xcbc_mac *)
+ ctx->sa_in)->inner_digest));
+ rc = crypto4xx_xcbc_digest(key, keylen, sa_hash, bs);
+ if (rc) {
+ printk(KERN_ERR "XCBC Digest Calculation Failed %d\n", rc);
+ goto err;
+ }
+
+ ctx->is_hash = 1;
+ ctx->hash_final = 1;
+ ctx->pd_ctl = 0x11;
+
+ ctx->direction = DIR_INBOUND;
+
+ memcpy(ctx->sa_out, ctx->sa_in, ctx->sa_len * 4);
+ sa = (struct dynamic_sa_ctl *) ctx->sa_out;
+ sa->sa_command_0.bf.dir = DIR_OUTBOUND;
+
+ return 0;
+err:
+ crypto4xx_free_sa(ctx);
+ return rc;
+}
+
+/**
+ * Kasumi F9 - Hash Algorithms
+ *
+ */
+int crypto4xx_kasumi_f9_setkey(struct crypto_ahash *hash,
+ const u8 *key, unsigned int keylen)
+{
+ struct crypto_tfm *tfm = crypto_ahash_tfm(hash);
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_alg *alg = tfm->__crt_alg;
+ struct crypto4xx_alg *my_alg = crypto_alg_to_crypto4xx_alg(alg);
+ struct dynamic_sa_ctl *sa;
+ int rc;
+
+ ctx->dev = my_alg->dev;
+
+ if (keylen != 16) {
+ crypto_ahash_set_flags(hash, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ /* Create SA */
+ if (ctx->sa_in_dma_addr || ctx->sa_out_dma_addr)
+ crypto4xx_free_sa(ctx);
+
+ rc = crypto4xx_alloc_sa(ctx, SA_KASUMI_F9_LEN);
+ if (rc)
+ return rc;
+
+ if (ctx->state_record_dma_addr == 0) {
+ rc = crypto4xx_alloc_state_record(ctx);
+ if (rc) {
+ crypto4xx_free_sa(ctx);
+ return rc;
+ }
+ }
+
+ sa = (struct dynamic_sa_ctl *) ctx->sa_in;
+ /*
+ * Setup hash algorithm and hash mode
+ */
+ set_dynamic_sa_command_0(sa, SA_SAVE_HASH, SA_NOT_SAVE_IV,
+ SA_NOT_LOAD_HASH, SA_LOAD_IV_FROM_SA,
+ SA_NO_HEADER_PROC, SA_HASH_ALG_KASUMI_f9,
+ SA_CIPHER_ALG_NULL, SA_PAD_TYPE_ZERO,
+ SA_OP_GROUP_BASIC, SA_OPCODE_HASH,
+ DIR_INBOUND);
+ set_dynamic_sa_command_1(sa, 0, SA_HASH_MODE_HASH,
+ CRYPTO_FEEDBACK_MODE_NO_FB, SA_EXTENDED_SN_OFF,
+ SA_SEQ_MASK_OFF, SA_MC_ENABLE,
+ SA_NOT_COPY_PAD, SA_NOT_COPY_PAYLOAD,
+ SA_NOT_COPY_HDR);
+ sa->sa_contents = SA_KASUMI_F9_CONTENTS;
+
+ ctx->direction = DIR_INBOUND;
+ memcpy((void *)(ctx->sa_in +
+ get_dynamic_sa_offset_state_ptr_field(ctx)),
+ (void *)&ctx->state_record_dma_addr, 4);
+
+ crypto4xx_memcpy_le(ctx->sa_in +
+ get_dynamic_sa_offset_inner_digest(ctx), key, keylen);
+ ctx->offset_to_sr_ptr = get_dynamic_sa_offset_state_ptr_field(ctx);
+ ctx->is_hash = 1;
+ ctx->hash_final = 1;
+ ctx->pd_ctl = 0x11;
+ ctx->bypass = 4;
+
+ return 0;
+}
+
+int crypto4xx_kasumi_f9_digest(struct ahash_request *req)
+{
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct scatterlist *src = req->src;
+ struct dynamic_sa_ctl *sa;
+ dma_addr_t addr;
+
+ /*
+ * We have prepended count/fresh/direction/reserv total
+ * 16byte before the plaintext
+ * so, need to modify the length.
+ * We doing so, to make use of tcrypt.c's hash_test.
+ */
+ sa = (struct dynamic_sa_ctl *) ctx->sa_in;
+
+ addr = dma_map_page(NULL, sg_page(src), src->offset,
+ src->length, DMA_TO_DEVICE);
+ crypto4xx_memcpy_le((void *)sa +
+ get_dynamic_sa_offset_outer_digest(ctx),
+ phys_to_virt(addr), 12);
+
+ return crypto4xx_build_pd(&req->base, ctx, req->src,
+ (struct scatterlist *)req->result,
+ req->nbytes, NULL, 0, NULL, 0);
+}
+/** IPSEC Related Routines */
+
+int crypto4xx_setkey_esp_tunnel(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen,
+ u32 cipher_alg,
+ u32 hashing,
+ u32 c_mode,
+ u32 sa_len,
+ u32 sa_contents,
+ u32 ds,
+ u32 bypass,
+ u32 hash_bs)
+
+{
+ struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_alg *alg = tfm->__crt_alg;
+ struct crypto4xx_alg *my_alg = crypto_alg_to_crypto4xx_alg(alg);
+ struct rtattr *rta = (void *) key;
+ struct dynamic_sa_ctl *sa;
+
+ struct esp_authenc_param {
+ __be32 spi;
+ __be32 seq;
+ __be16 pad_block_size;
+ __be16 encap_uhl;
+ struct crypto_authenc_key_param authenc_param;
+ } *param;
+
+ unsigned int enckeylen;
+ unsigned int authkeylen;
+
+ if (!RTA_OK(rta, keylen))
+ goto badkey;
+
+ if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
+ goto badkey;
+
+ if (RTA_PAYLOAD(rta) < sizeof(*param))
+ goto badkey;
+
+ ctx->dev = my_alg->dev;
+
+ param = RTA_DATA(rta);
+
+ ctx->spi = be32_to_cpu(param->spi);
+ ctx->seq = be32_to_cpu(param->seq);
+ ctx->pad_block_size = be16_to_cpu(param->pad_block_size);
+ ctx->encap_uhl = be16_to_cpu(param->encap_uhl);
+
+ ESP_PRINTK(KERN_INFO "%s: spi = 0x%08x, seq = %d, pad_size = %d, encap uhl = %d\n",__FUNCTION__,
+ ctx->spi, ctx->seq, ctx->pad_block_size, ctx->encap_uhl);
+
+ enckeylen = be32_to_cpu(param->authenc_param.enckeylen);
+
+ key += RTA_ALIGN(rta->rta_len);
+ keylen -= RTA_ALIGN(rta->rta_len);
+
+ authkeylen = keylen - enckeylen;
+
+
+ printk(KERN_INFO "%s: enckeylen = %d, authkeylen = %d\n",
+ __FUNCTION__, enckeylen, authkeylen);
+#if 0
+ ESP_PHD(KERN_CONT, "", DUMP_PREFIX_OFFSET,
+ 16, 1,
+ (void*)key, authkeylen, false);
+
+ ESP_PHD(KERN_CONT, "", DUMP_PREFIX_OFFSET,
+ 16, 1,
+ (void*)key+authkeylen, enckeylen, false);
+#endif
+ /* Create SA */
+ if (ctx->sa_in_dma_addr || ctx->sa_out_dma_addr) {
+ crypto4xx_free_sa(ctx);
+ }
+
+ crypto4xx_alloc_sa(ctx, sa_len);
+ if (!ctx->sa_in_dma_addr || !ctx->sa_out_dma_addr)
+ goto err_nomem;
+
+ if (!ctx->state_record) {
+ crypto4xx_alloc_state_record(ctx);
+ if (!ctx->state_record_dma_addr)
+ goto err_nomem_sr;
+ }
+
+ ctx->direction = DIR_INBOUND;
+ sa = (struct dynamic_sa_ctl *)(ctx->sa_in);
+ /*
+ * Setup hash algorithm and hash mode
+ */
+ sa->sa_command_0.w = 0;
+ sa->sa_command_0.bf.hash_alg = hashing;
+ sa->sa_command_0.bf.gather = 0;
+ sa->sa_command_0.bf.save_hash_state = 1;
+ sa->sa_command_0.bf.load_hash_state = 0;
+ sa->sa_command_0.bf.cipher_alg = SA_CIPHER_ALG_NULL;
+ sa->sa_command_0.bf.opcode = SA_OPCODE_HASH;
+ sa->sa_command_0.bf.dir = DIR_INBOUND;
+ sa->sa_command_1.w = 0;
+ sa->sa_command_1.bf.hmac_muting = 0;
+ sa->sa_command_1.bf.sa_rev = 1;
+ sa->sa_contents = sa_contents;
+
+ memcpy(ctx->sa_in + get_dynamic_sa_offset_state_ptr_field(ctx),
+ (void*)&(ctx->state_record_dma_addr), 4);
+
+ crypto4xx_pre_compute_hmac(ctx, (void *)key, authkeylen,
+ hash_bs, hashing, ds);
+
+ /*
+ * Now, setup command for ESP
+ */
+ sa->sa_command_0.bf.load_hash_state = 0;
+ sa->sa_command_0.bf.save_hash_state = 0;
+ sa->sa_command_0.bf.hdr_proc = 1;
+
+ sa->sa_command_0.bf.load_iv = 2;
+ sa->sa_command_0.bf.cipher_alg = cipher_alg;
+ sa->sa_command_0.bf.op_group = SA_OP_GROUP_PROTOCOL;
+ sa->sa_command_0.bf.opcode = SA_OPCODE_ESP;
+
+ sa->sa_command_1.bf.hmac_muting = 0;
+
+
+ printk(KERN_INFO "%s: keylen = %d, enckeylen = %d\n",
+ __FUNCTION__, keylen, enckeylen);
+
+ if (cipher_alg == SA_CIPHER_ALG_AES) {
+ if ( enckeylen == 16)
+ sa->sa_command_1.bf.key_len = SA_AES_KEY_LEN_128;
+ else if ( enckeylen == 24)
+ sa->sa_command_1.bf.key_len = SA_AES_KEY_LEN_192;
+ else
+ sa->sa_command_1.bf.key_len = SA_AES_KEY_LEN_256;
+ } else {
+ sa->sa_command_1.bf.key_len = 0;
+ }
+
+ sa->sa_command_1.bf.crypto_mode31 = c_mode >> 2;
+ sa->sa_command_1.bf.crypto_mode9_8 = c_mode & 3;
+ sa->sa_command_1.bf.feedback_mode = 0;
+ sa->sa_command_1.bf.copy_payload = 1;
+ sa->sa_command_1.bf.copy_pad = 1;
+ sa->sa_command_1.bf.copy_hdr = 1;
+
+ sa->sa_command_1.bf.seq_num_mask = 1;
+ sa->sa_command_1.bf.mutable_bit_proc = 0;
+
+ sa->sa_command_0.bf.hdr_proc = 1;
+
+ crypto4xx_memcpy_le(ctx->sa_in + get_dynamic_sa_offset_key_field(ctx),
+ key+authkeylen, enckeylen);
+
+ memcpy(ctx->sa_in + get_dynamic_sa_offset_spi(ctx),
+ (void*)&(ctx->spi), 4);
+ memcpy(ctx->sa_in + get_dynamic_sa_offset_seq_num(ctx),
+ (void*)&(ctx->seq), 4);
+
+ /*
+ * Setup sa for inbound processing
+ */
+ sa->sa_command_0.bf.dir = DIR_INBOUND;
+ sa->sa_command_0.bf.load_iv = 1;
+ sa->sa_command_0.bf.hdr_proc = 1;
+
+ sa->sa_command_1.bf.copy_pad = 1;
+ sa->sa_command_1.bf.copy_hdr = 0;
+ sa->sa_command_1.bf.hash_crypto_offset = 6;
+
+
+ /*
+ * Setup sa for outbound processing
+ */
+ memcpy(ctx->sa_out, ctx->sa_in, ctx->sa_len*4);
+ sa = (struct dynamic_sa_ctl *)(ctx->sa_out);
+ sa->sa_command_0.bf.dir = DIR_OUTBOUND;
+ sa->sa_command_0.bf.load_iv = 3;
+ sa->sa_command_0.bf.hdr_proc = 1;
+
+ sa->sa_command_1.bf.hash_crypto_offset = 0;
+ sa->sa_command_1.bf.copy_pad = 1;
+ sa->sa_command_1.bf.copy_hdr = 1;
+
+ ctx->bypass = bypass;
+ ctx->authenc = 0;
+ ctx->hash_final = 1;
+ ctx->is_hash = 0;
+ ctx->pad_ctl = param->pad_block_size/4;
+ ctx->append_icv = 0;
+
+ return 0;
+
+err_nomem_sr:
+ crypto4xx_free_sa(ctx);
+
+err_nomem:
+ return -ENOMEM;
+badkey:
+ ESP_PRINTK(KERN_INFO KERN_ERR "%s: badkey\n",__FUNCTION__);
+ return -EINVAL;
+}
+
+int crypto4xx_setkey_tunnel_esp_cbc_aes_md5(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ ESP_PRINTK(KERN_INFO "%s: called\n", __FUNCTION__);
+ return crypto4xx_setkey_esp_tunnel(cipher, key, keylen,
+ SA_CIPHER_ALG_AES,
+ SA_HASH_ALG_MD5, 1,
+ SA_ESP_MD5_SHA1_LEN,
+ SA_ESP_MD5_SHA1_CONTENTS, 16, 0, 64);
+}
+
+int crypto4xx_setkey_tunnel_esp_cbc_aes_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ ESP_PRINTK(KERN_INFO "%s: called\n", __FUNCTION__);
+ return crypto4xx_setkey_esp_tunnel(cipher, key, keylen,
+ SA_CIPHER_ALG_AES,
+ SA_HASH_ALG_SHA1, 1,
+ SA_ESP_MD5_SHA1_LEN,
+ SA_ESP_MD5_SHA1_CONTENTS, 20, 0, 64);
+}
+
+int crypto4xx_setkey_tunnel_esp_cbc_aes_sha224(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ ESP_PRINTK(KERN_INFO "%s: called\n", __FUNCTION__);
+ return crypto4xx_setkey_esp_tunnel(cipher, key, keylen,
+ SA_CIPHER_ALG_AES,
+ SA_HASH_ALG_SHA224, 1,
+ SA_ESP_SHA256_LEN,
+ SA_ESP_SHA256_CONTENTS, 28, 0, 64);
+}
+
+int crypto4xx_setkey_tunnel_esp_cbc_aes_sha256(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ ESP_PRINTK(KERN_INFO "%s: called\n", __FUNCTION__);
+ return crypto4xx_setkey_esp_tunnel(cipher, key, keylen,
+ SA_CIPHER_ALG_AES,
+ SA_HASH_ALG_SHA256, 1,
+ SA_ESP_SHA256_LEN,
+ SA_ESP_SHA256_CONTENTS, 32, 0, 64);
+}
+
+int crypto4xx_setkey_tunnel_esp_cbc_aes_sha384(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ ESP_PRINTK(KERN_INFO "%s: called\n", __FUNCTION__);
+ return crypto4xx_setkey_esp_tunnel(cipher, key, keylen,
+ SA_CIPHER_ALG_AES,
+ SA_HASH_ALG_SHA384, 1,
+ SA_ESP_SHA512_LEN,
+ SA_ESP_SHA512_CONTENTS, 48, 0, 128);
+}
+
+int crypto4xx_setkey_tunnel_esp_cbc_aes_sha512(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ ESP_PRINTK(KERN_INFO "%s: called\n", __FUNCTION__);
+ return crypto4xx_setkey_esp_tunnel(cipher, key, keylen,
+ SA_CIPHER_ALG_AES,
+ SA_HASH_ALG_SHA512, 1,
+ SA_ESP_SHA512_LEN,
+ SA_ESP_SHA512_CONTENTS, 64, 0, 128);
+}
+/** DES and 3DES Related IPSEC Algorithms */
+int crypto4xx_setkey_tunnel_esp_cbc_des_md5(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ ESP_PRINTK(KERN_INFO "%s: called\n", __FUNCTION__);
+ return crypto4xx_setkey_esp_tunnel(cipher, key, keylen,
+ SA_CIPHER_ALG_DES,
+ SA_HASH_ALG_MD5, 1,
+ SA_ESP_DES_MD5_SHA1_LEN,
+ SA_ESP_DES_MD5_SHA1_CONTENTS,
+ 16, 0, 64);
+}
+
+int crypto4xx_setkey_tunnel_esp_cbc_des_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ ESP_PRINTK(KERN_INFO "%s: called\n", __FUNCTION__);
+ return crypto4xx_setkey_esp_tunnel(cipher, key, keylen,
+ SA_CIPHER_ALG_DES,
+ SA_HASH_ALG_SHA1, 1,
+ SA_ESP_DES_MD5_SHA1_LEN,
+ SA_ESP_DES_MD5_SHA1_CONTENTS, 20, 0,
+ 64);
+
+}
+
+int crypto4xx_setkey_tunnel_esp_cbc_des_sha224(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ ESP_PRINTK(KERN_INFO "%s: called\n", __FUNCTION__);
+ return crypto4xx_setkey_esp_tunnel(cipher, key, keylen,
+ SA_CIPHER_ALG_DES,
+ SA_HASH_ALG_SHA224, 1,
+ SA_ESP_SHA256_LEN,
+ SA_ESP_SHA256_CONTENTS,
+ 28, 0, 64);
+}
+
+int crypto4xx_setkey_tunnel_esp_cbc_des_sha256(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ ESP_PRINTK(KERN_INFO "%s: called\n", __FUNCTION__);
+ return crypto4xx_setkey_esp_tunnel(cipher, key, keylen,
+ SA_CIPHER_ALG_DES,
+ SA_HASH_ALG_SHA256, 1,
+ SA_ESP_SHA256_LEN,
+ SA_ESP_SHA256_CONTENTS, 32,
+ 0, 64);
+}
+
+int crypto4xx_setkey_tunnel_esp_cbc_des_sha384(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ ESP_PRINTK(KERN_INFO "%s: called\n", __FUNCTION__);
+ return crypto4xx_setkey_esp_tunnel(cipher, key, keylen,
+ SA_CIPHER_ALG_DES,
+ SA_HASH_ALG_SHA384, 1,
+ SA_ESP_SHA512_LEN,
+ SA_ESP_SHA512_CONTENTS,
+ 48, 0, 128);
+}
+
+int crypto4xx_setkey_tunnel_esp_cbc_des_sha512(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ ESP_PRINTK(KERN_INFO "%s: called\n", __FUNCTION__);
+ return crypto4xx_setkey_esp_tunnel(cipher, key, keylen,
+ SA_CIPHER_ALG_DES,
+ SA_HASH_ALG_SHA512, 1,
+ SA_ESP_SHA512_LEN,
+ SA_ESP_SHA512_CONTENTS,
+ 64, 0, 128);
+}
+
+int crypto4xx_setkey_tunnel_esp_cbc_3des_md5(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ ESP_PRINTK(KERN_INFO "%s: called\n", __FUNCTION__);
+ return crypto4xx_setkey_esp_tunnel(cipher, key, keylen,
+ SA_CIPHER_ALG_3DES,
+ SA_HASH_ALG_MD5, 1,
+ SA_ESP_3DES_MD5_SHA1_LEN,
+ SA_ESP_3DES_MD5_SHA1_CONTENTS,
+ 16, 0, 64);
+}
+int crypto4xx_setkey_tunnel_esp_cbc_3des_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ ESP_PRINTK(KERN_INFO "%s: called\n", __FUNCTION__);
+ return crypto4xx_setkey_esp_tunnel(cipher, key, keylen,
+ SA_CIPHER_ALG_3DES,
+ SA_HASH_ALG_SHA1, 1,
+ SA_ESP_3DES_MD5_SHA1_LEN,
+ SA_ESP_3DES_MD5_SHA1_CONTENTS, 20,
+ 0,
+ 64);
+}
+
+int crypto4xx_setkey_tunnel_esp_cbc_3des_sha224(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ ESP_PRINTK(KERN_INFO "%s: called\n", __FUNCTION__);
+ return crypto4xx_setkey_esp_tunnel(cipher, key, keylen,
+ SA_CIPHER_ALG_3DES,
+ SA_HASH_ALG_SHA224, 1,
+ SA_ESP_SHA256_LEN,
+ SA_ESP_SHA256_CONTENTS,
+ 28, 0, 64);
+}
+
+int crypto4xx_setkey_tunnel_esp_cbc_3des_sha256(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ ESP_PRINTK(KERN_INFO "%s: called\n", __FUNCTION__);
+ return crypto4xx_setkey_esp_tunnel(cipher, key, keylen,
+ SA_CIPHER_ALG_3DES,
+ SA_HASH_ALG_SHA256, 1,
+ SA_ESP_SHA256_LEN,
+ SA_ESP_SHA256_CONTENTS,
+ 32, 0, 64);
+}
+
+
+int crypto4xx_setkey_tunnel_esp_cbc_3des_sha384(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ ESP_PRINTK(KERN_INFO "%s: called\n", __FUNCTION__);
+ return crypto4xx_setkey_esp_tunnel(cipher, key, keylen,
+ SA_CIPHER_ALG_3DES,
+ SA_HASH_ALG_SHA384,
+ 1,
+ SA_ESP_SHA512_LEN,
+ SA_ESP_SHA512_CONTENTS,
+ 48, 0, 128);
+}
+
+int crypto4xx_setkey_tunnel_esp_cbc_3des_sha512(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ ESP_PRINTK(KERN_INFO "%s: called\n", __FUNCTION__);
+ return crypto4xx_setkey_esp_tunnel(cipher, key, keylen,
+ SA_CIPHER_ALG_3DES,
+ SA_HASH_ALG_SHA512, 1,
+ SA_ESP_SHA512_LEN,
+ SA_ESP_SHA512_CONTENTS,
+ 64, 0, 128);
+}
+
+int crypto4xx_encrypt_esp_tunnel(struct aead_givcrypt_request *givreq,
+ struct aead_request *req)
+{
+
+ struct crypto4xx_ctx *ctx;
+#if 0
+ struct scatterlist *sg;
+ struct iphdr *iph;
+ void * daddr;
+ struct dynamic_sa_ctl *sa;
+#endif
+ if (givreq)
+ req = &givreq->areq;
+
+ ctx = crypto_tfm_ctx(req->base.tfm);
+
+#if 0
+ sg = req->dst;
+ daddr = kmap_atomic(sg_page(sg), KM_SOFTIRQ1);
+ iph = (struct iphdr *)(daddr + sg->offset - 20);
+ ctx->next_hdr = (u32)(iph->protocol);
+ kunmap_atomic(daddr, KM_SOFTIRQ1);
+
+
+ ctx->next_hdr = 4;
+ ctx->pd_ctl =( ctx->pad_ctl << 24) + 0x11 + (ctx->next_hdr << 8);
+#endif
+ ctx->hc_offset = 0;
+ ctx->pd_ctl =( ctx->pad_ctl << 24) + 0x411;
+ ctx->direction = DIR_OUTBOUND;
+
+ return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+ req->cryptlen, NULL, 0, NULL, 0);
+
+
+}
+
+int crypto4xx_decrypt_esp_tunnel(struct aead_request *req)
+{
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+
+ ctx->pd_ctl =( ctx->pad_ctl << 24) + 0x11;
+ ctx->direction = DIR_INBOUND;
+ ctx->hc_offset = 6;
+ return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+ req->cryptlen, NULL, 0, NULL, 0);
+
+}
+int crypto4xx_encrypt_esp_cbc(struct aead_request *req)
+{
+ return crypto4xx_encrypt_esp_tunnel(NULL, req);
+}
+
+int crypto4xx_givencrypt_esp_cbc(struct aead_givcrypt_request *req)
+{
+ return crypto4xx_encrypt_esp_tunnel(req, NULL);
+}
+
+int crypto4xx_decrypt_esp_cbc(struct aead_request *req)
+{
+ return crypto4xx_decrypt_esp_tunnel(req);
+}
+
+/** Setkey Routine for IPSEC for Transport */
+int crypto4xx_setkey_esp(struct crypto_aead *cipher,
+ const u8 *key, unsigned int keylen, u32 cipher_alg,
+ u32 hashing, u32 c_mode, u32 sa_len,
+ u32 sa_contents, u32 ds,
+ u32 bypass, u32 hash_bs)
+
+{
+ struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_alg *alg = tfm->__crt_alg;
+ struct crypto4xx_alg *my_alg = crypto_alg_to_crypto4xx_alg(alg);
+ struct rtattr *rta = (void *) key;
+ struct dynamic_sa_ctl *sa;
+
+ struct esp_authenc_param {
+ __be32 spi;
+ __be32 seq;
+ __be16 pad_block_size;
+ __be16 encap_uhl;
+ struct crypto_authenc_key_param authenc_param;
+ } *param;
+
+ unsigned int enckeylen;
+ unsigned int authkeylen;
+
+ if (!RTA_OK(rta, keylen))
+ goto badkey;
+ if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
+ goto badkey;
+
+ if (RTA_PAYLOAD(rta) < sizeof(*param))
+ goto badkey;
+
+ ctx->dev = my_alg->dev;
+
+ param = RTA_DATA(rta);
+
+ ctx->spi = be32_to_cpu(param->spi);
+ ctx->seq = be32_to_cpu(param->seq);
+ ctx->pad_block_size = be16_to_cpu(param->pad_block_size);
+ ctx->encap_uhl = be16_to_cpu(param->encap_uhl);
+
+ ESP_PRINTK(KERN_INFO "%s: spi = 0x%08x, seq = %d, pad_size = %d, encap uhl = %d\n",__FUNCTION__,
+ ctx->spi, ctx->seq, ctx->pad_block_size, ctx->encap_uhl);
+
+ enckeylen = be32_to_cpu(param->authenc_param.enckeylen);
+
+ key += RTA_ALIGN(rta->rta_len);
+ keylen -= RTA_ALIGN(rta->rta_len);
+
+ authkeylen = keylen - enckeylen;
+
+ ESP_PRINTK(KERN_INFO "%s: enckeylen = %d, authkeylen = %d\n",
+ __FUNCTION__, enckeylen, authkeylen);
+
+ ESP_PHD(KERN_CONT, "", DUMP_PREFIX_OFFSET,
+ 16, 1,
+ (void*)key, authkeylen, false);
+
+ ESP_PHD(KERN_CONT, "", DUMP_PREFIX_OFFSET,
+ 16, 1,
+ (void*)key+authkeylen, enckeylen, false);
+
+ /* Create SA */
+ if (ctx->sa_in_dma_addr || ctx->sa_out_dma_addr) {
+ crypto4xx_free_sa(ctx);
+ }
+
+ crypto4xx_alloc_sa(ctx, sa_len);
+ if (!ctx->sa_in_dma_addr || !ctx->sa_out_dma_addr)
+ goto err_nomem;
+
+ if (!ctx->state_record) {
+ crypto4xx_alloc_state_record(ctx);
+ if (!ctx->state_record_dma_addr)
+ goto err_nomem_sr;
+ }
+
+ sa = (struct dynamic_sa_ctl *) ctx->sa_in;
+ /*
+ * Setup hash algorithm and hash mode
+ */
+ sa->sa_command_0.w = 0;
+ sa->sa_command_0.bf.hash_alg = hashing;
+ sa->sa_command_0.bf.gather = 0;
+ sa->sa_command_0.bf.save_hash_state = 1;
+ sa->sa_command_0.bf.load_hash_state = 0;
+ sa->sa_command_0.bf.cipher_alg = SA_CIPHER_ALG_NULL;
+ sa->sa_command_0.bf.opcode = SA_OPCODE_HASH;
+ sa->sa_command_0.bf.dir = DIR_INBOUND;
+ sa->sa_command_1.w = 0;
+ sa->sa_command_1.bf.hmac_muting = 0;
+ sa->sa_command_1.bf.sa_rev = 1;
+ sa->sa_contents = sa_contents;
+
+ ctx->direction = DIR_INBOUND;
+ memcpy((ctx->sa_in) + get_dynamic_sa_offset_state_ptr_field(ctx),
+ (void*)&(ctx->state_record_dma_addr), 4);
+
+ crypto4xx_pre_compute_hmac(ctx, (void *)key, authkeylen,
+ hash_bs, hashing, ds);
+
+ /*
+ * Now, setup command for ESP
+ */
+ sa->sa_command_0.bf.load_hash_state = 0;
+ sa->sa_command_0.bf.save_hash_state = 0;
+ sa->sa_command_0.bf.hdr_proc = 1;
+
+
+ sa->sa_command_0.bf.load_iv = 2;
+ sa->sa_command_0.bf.cipher_alg = cipher_alg;
+ sa->sa_command_0.bf.op_group = SA_OP_GROUP_PROTOCOL;
+ sa->sa_command_0.bf.opcode = SA_OPCODE_ESP;
+
+ sa->sa_command_1.bf.hmac_muting = 0;
+
+
+ ESP_PRINTK(KERN_INFO "%s: keylen = %d, enckeylen = %d\n",
+ __FUNCTION__, keylen, enckeylen);
+
+ if (cipher_alg == SA_CIPHER_ALG_AES) {
+ if ( enckeylen == 16)
+ {
+ ESP_PRINTK(KERN_INFO "%s: AES 128\n", __FUNCTION__);
+ sa->sa_command_1.bf.key_len = SA_AES_KEY_LEN_128;
+ } else if ( enckeylen == 24){
+ ESP_PRINTK(KERN_INFO "%s: AES 192\n", __FUNCTION__);
+ sa->sa_command_1.bf.key_len = SA_AES_KEY_LEN_192;
+ } else {
+ ESP_PRINTK(KERN_INFO "%s: AES 256\n", __FUNCTION__);
+ sa->sa_command_1.bf.key_len = SA_AES_KEY_LEN_256;
+ }
+ } else {
+ sa->sa_command_1.bf.key_len = 0;
+ }
+
+ sa->sa_command_1.bf.crypto_mode31 = c_mode >> 2;
+ sa->sa_command_1.bf.crypto_mode9_8 = c_mode & 3;
+ sa->sa_command_1.bf.feedback_mode = 0;
+ sa->sa_command_1.bf.copy_payload = 1;
+ sa->sa_command_1.bf.copy_pad = 1;
+ sa->sa_command_1.bf.copy_hdr = 1;
+
+ sa->sa_command_1.bf.seq_num_mask = 1;
+ sa->sa_command_1.bf.mutable_bit_proc = 0;
+
+ sa->sa_command_0.bf.hdr_proc = 1;
+
+ crypto4xx_memcpy_le((void*)(ctx->sa_in) + get_dynamic_sa_offset_key_field(ctx),
+ key+authkeylen, enckeylen);
+
+ memcpy(ctx->sa_in + get_dynamic_sa_offset_spi(ctx),
+ (void*)&(ctx->spi),
+ 4);
+ memcpy(ctx->sa_in + get_dynamic_sa_offset_seq_num(ctx),
+ (void*)&(ctx->seq), 4);
+
+
+ sa->sa_command_1.bf.copy_hdr = 0;
+ sa->sa_command_1.bf.hash_crypto_offset = 6;
+
+ sa->sa_command_0.bf.load_iv = 1;
+ sa->sa_command_0.bf.dir = DIR_INBOUND;
+
+ memcpy(ctx->sa_out, ctx->sa_in, ctx->sa_len * 4);
+ sa = (struct dynamic_sa_ctl *) ctx->sa_out;
+ sa->sa_command_0.bf.dir = DIR_OUTBOUND;
+ sa->sa_command_0.bf.load_iv = 3;
+ sa->sa_command_1.bf.hash_crypto_offset = 0;
+ sa->sa_command_1.bf.copy_hdr = 1;
+
+
+ ctx->bypass = bypass;
+ ctx->authenc = 0;
+ ctx->hash_final = 1;
+ ctx->is_hash = 0;
+ ctx->pad_ctl = param->pad_block_size/4;
+ ctx->append_icv = 0;
+
+ return 0;
+
+err_nomem_sr:
+ crypto4xx_free_sa(ctx);
+
+err_nomem:
+ return -ENOMEM;
+badkey:
+ ESP_PRINTK(KERN_INFO "%s: badkey\n",__FUNCTION__);
+ return -EINVAL;
+}
+
+/** Encrypt/Decrypt Routines for IPSEC for Transport */
+int crypto4xx_encrypt_esp(struct aead_givcrypt_request *givreq,
+ struct aead_request *req)
+{
+ struct crypto4xx_ctx *ctx;
+ struct scatterlist *sg;
+ struct iphdr *iph;
+ void * saddr;
+
+ if (givreq)
+ req = &givreq->areq;
+
+ ctx = crypto_tfm_ctx(req->base.tfm);
+
+ sg = req->src;
+ saddr = kmap_atomic(sg_page(sg), KM_SOFTIRQ1);
+ iph = (struct iphdr *)(saddr + sg->offset);
+ ctx->next_hdr = (u32)(iph->protocol);
+ iph->protocol = 0x32;
+
+#if 1
+ ESP_PHD(KERN_CONT, "", DUMP_PREFIX_OFFSET,
+ 16, 1,
+ (void*)saddr+ sg->offset, sg->length, false);
+
+ ESP_PRINTK(KERN_INFO "%s: next_hdr = %d\n",__FUNCTION__, ctx->next_hdr);
+#endif
+ kunmap_atomic(saddr, KM_SOFTIRQ1);
+
+ ctx->hc_offset = 0;
+ ctx->pd_ctl = (ctx->pad_ctl << 24) + 0x11;
+ ctx->direction = DIR_OUTBOUND;
+ return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+ req->cryptlen, NULL, 0, NULL, 0);
+
+}
+
+int crypto4xx_decrypt_esp(struct aead_request *req)
+{
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+#if 1
+ struct scatterlist *sg;
+ void * saddr;
+
+ sg = req->src;
+ saddr = kmap_atomic(sg_page(sg), KM_SOFTIRQ1);
+ ESP_PHD(KERN_CONT, "", DUMP_PREFIX_OFFSET,
+ 16, 1,
+ (void*)saddr+sg->offset, sg->length, false);
+ kunmap_atomic(saddr, KM_SOFTIRQ1);
+#endif
+ ctx->hc_offset = 0;
+ ctx->pd_ctl =( ctx->pad_ctl << 24) + 0x11;
+
+ ctx->direction = DIR_INBOUND;
+
+ return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+ req->cryptlen, NULL, 0, NULL, 0);
+}
+
+/**AES Transport Related Algorithms for IPSEC */
+int crypto4xx_setkey_transport_esp_cbc_aes_md5(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ return crypto4xx_setkey_esp(cipher, key, keylen,
+ SA_CIPHER_ALG_AES,
+ SA_HASH_ALG_MD5, 1,
+ SA_ESP_MD5_SHA1_LEN,
+ SA_ESP_MD5_SHA1_CONTENTS, 16, 5, 64);
+}
+
+int crypto4xx_setkey_transport_esp_cbc_aes_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ ESP_PRINTK(KERN_INFO "%s: called\n", __FUNCTION__);
+ return crypto4xx_setkey_esp(cipher, key, keylen,
+ SA_CIPHER_ALG_AES,
+ SA_HASH_ALG_SHA1, 1,
+ SA_ESP_MD5_SHA1_LEN,
+ SA_ESP_MD5_SHA1_CONTENTS, 20, 5, 64);
+}
+
+int crypto4xx_setkey_transport_esp_cbc_aes_sha224(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ ESP_PRINTK(KERN_INFO "%s: called\n", __FUNCTION__);
+ return crypto4xx_setkey_esp(cipher, key, keylen,
+ SA_CIPHER_ALG_AES,
+ SA_HASH_ALG_SHA224, 1,
+ SA_ESP_SHA256_LEN,
+ SA_ESP_SHA256_CONTENTS, 28, 5, 64);
+}
+
+int crypto4xx_setkey_transport_esp_cbc_aes_sha256(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ ESP_PRINTK(KERN_INFO "%s: called\n", __FUNCTION__);
+ return crypto4xx_setkey_esp(cipher, key, keylen,
+ SA_CIPHER_ALG_AES,
+ SA_HASH_ALG_SHA256, 1,
+ SA_ESP_SHA256_LEN,
+ SA_ESP_SHA256_CONTENTS, 32, 5, 64);
+}
+
+int crypto4xx_setkey_transport_esp_cbc_aes_sha384(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ ESP_PRINTK(KERN_INFO "%s: called\n", __FUNCTION__);
+ return crypto4xx_setkey_esp(cipher, key, keylen,
+ SA_CIPHER_ALG_AES,
+ SA_HASH_ALG_SHA384, 1,
+ SA_ESP_SHA512_LEN,
+ SA_ESP_SHA512_CONTENTS, 48, 5, 128);
+}
+
+int crypto4xx_setkey_transport_esp_cbc_aes_sha512(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ ESP_PRINTK(KERN_INFO "%s: called\n", __FUNCTION__);
+ return crypto4xx_setkey_esp(cipher, key, keylen,
+ SA_CIPHER_ALG_AES,
+ SA_HASH_ALG_SHA512, 1,
+ SA_ESP_SHA512_LEN,
+ SA_ESP_SHA512_CONTENTS, 64, 5, 128);
+}
+
+/**DES Transport Related Algorithms for IPSEC */
+int crypto4xx_setkey_transport_esp_cbc_des_md5(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ ESP_PRINTK(KERN_INFO "%s: called\n", __FUNCTION__);
+ return crypto4xx_setkey_esp(cipher, key, keylen,
+ SA_CIPHER_ALG_DES,
+ SA_HASH_ALG_MD5, 1,
+ SA_ESP_MD5_SHA1_LEN,
+ SA_ESP_MD5_SHA1_CONTENTS, 16, 5, 64);
+}
+
+int crypto4xx_setkey_transport_esp_cbc_des_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ ESP_PRINTK(KERN_INFO "%s: called\n", __FUNCTION__);
+ return crypto4xx_setkey_esp(cipher,
+ key,
+ keylen,
+ SA_CIPHER_ALG_DES,
+ SA_HASH_ALG_SHA1, 1,
+ SA_ESP_MD5_SHA1_LEN,
+ SA_ESP_MD5_SHA1_CONTENTS, 20, 5, 64);
+}
+
+int crypto4xx_setkey_transport_esp_cbc_des_sha224(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ ESP_PRINTK(KERN_INFO "%s: called\n", __FUNCTION__);
+ return crypto4xx_setkey_esp(cipher, key, keylen,
+ SA_CIPHER_ALG_DES,
+ SA_HASH_ALG_SHA224, 1,
+ SA_ESP_SHA256_LEN,
+ SA_ESP_SHA256_CONTENTS, 28, 5, 64);
+}
+
+int crypto4xx_setkey_transport_esp_cbc_des_sha256(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ ESP_PRINTK(KERN_INFO "%s: called\n", __FUNCTION__);
+ return crypto4xx_setkey_esp(cipher, key, keylen,
+ SA_CIPHER_ALG_DES,
+ SA_HASH_ALG_SHA256, 1,
+ SA_ESP_SHA256_LEN,
+ SA_ESP_SHA256_CONTENTS, 32, 5, 64);
+}
+
+int crypto4xx_setkey_transport_esp_cbc_des_sha384(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ ESP_PRINTK(KERN_INFO "%s: called\n", __FUNCTION__);
+ return crypto4xx_setkey_esp(cipher, key, keylen,
+ SA_CIPHER_ALG_DES,
+ SA_HASH_ALG_SHA384, 1,
+ SA_ESP_SHA512_LEN,
+ SA_ESP_SHA512_CONTENTS, 48, 5, 128);
+}
+
+int crypto4xx_setkey_transport_esp_cbc_des_sha512(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ ESP_PRINTK(KERN_INFO "%s: called\n", __FUNCTION__);
+ return crypto4xx_setkey_esp(cipher, key, keylen,
+ SA_CIPHER_ALG_DES,
+ SA_HASH_ALG_SHA512, 1,
+ SA_ESP_SHA512_LEN,
+ SA_ESP_SHA512_CONTENTS, 64, 5, 128);
+}
+
+/**3DES Transport Related Algorithms for IPSEC */
+int crypto4xx_setkey_transport_esp_cbc_3des_md5(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ ESP_PRINTK(KERN_INFO "%s: called\n", __FUNCTION__);
+ return crypto4xx_setkey_esp(cipher, key, keylen,
+ SA_CIPHER_ALG_3DES,
+ SA_HASH_ALG_MD5, 1,
+ SA_ESP_MD5_SHA1_LEN,
+ SA_ESP_MD5_SHA1_CONTENTS, 16, 5, 64);
+}
+int crypto4xx_setkey_transport_esp_cbc_3des_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ ESP_PRINTK(KERN_INFO "%s: called\n", __FUNCTION__);
+ return crypto4xx_setkey_esp(cipher, key, keylen,
+ SA_CIPHER_ALG_3DES,
+ SA_HASH_ALG_SHA1, 1,
+ SA_ESP_MD5_SHA1_LEN,
+ SA_ESP_MD5_SHA1_CONTENTS, 20, 5, 64);
+}
+
+
+int crypto4xx_setkey_transport_esp_cbc_3des_sha224(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ ESP_PRINTK(KERN_INFO "%s: called\n", __FUNCTION__);
+ return crypto4xx_setkey_esp(cipher, key, keylen,
+ SA_CIPHER_ALG_3DES,
+ SA_HASH_ALG_SHA224, 1,
+ SA_ESP_SHA256_LEN,
+ SA_ESP_SHA256_CONTENTS, 28, 5, 64);
+}
+
+int crypto4xx_setkey_transport_esp_cbc_3des_sha256(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ ESP_PRINTK(KERN_INFO "%s: called\n", __FUNCTION__);
+ return crypto4xx_setkey_esp(cipher, key, keylen,
+ SA_CIPHER_ALG_3DES,
+ SA_HASH_ALG_SHA256, 1,
+ SA_ESP_SHA256_LEN,
+ SA_ESP_SHA256_CONTENTS, 32, 5,64);
+}
+
+int crypto4xx_setkey_transport_esp_cbc_3des_sha384(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ ESP_PRINTK(KERN_INFO "%s: called\n", __FUNCTION__);
+ return crypto4xx_setkey_esp(cipher, key, keylen,
+ SA_CIPHER_ALG_3DES,
+ SA_HASH_ALG_SHA384, 1,
+ SA_ESP_SHA512_LEN,
+ SA_ESP_SHA512_CONTENTS, 48, 5, 128);
+}
+
+int crypto4xx_setkey_transport_esp_cbc_3des_sha512(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ ESP_PRINTK(KERN_INFO "%s: called\n", __FUNCTION__);
+ return crypto4xx_setkey_esp(cipher, key, keylen,
+ SA_CIPHER_ALG_3DES,
+ SA_HASH_ALG_SHA512, 1,
+ SA_ESP_SHA512_LEN,
+ SA_ESP_SHA512_CONTENTS, 64, 5, 128);
+}
+
+int crypto4xx_encrypt_transport_esp_cbc(struct aead_request *req)
+{
+ return crypto4xx_encrypt_esp(NULL, req);
+}
+
+int crypto4xx_givencrypt_transport_esp_cbc(struct aead_givcrypt_request *req)
+{
+ return crypto4xx_encrypt_esp(req, NULL);
+}
+
+int crypto4xx_decrypt_transport_esp_cbc(struct aead_request *req)
+{
+ return crypto4xx_decrypt_esp(req);
+}
+/** Setkey and Encrypt/Decrypt Functions for Macsec */
+int crypto4xx_setkey_macsec_gcm(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_alg *alg = tfm->__crt_alg;
+ struct crypto4xx_alg *my_alg = crypto_alg_to_crypto4xx_alg(alg);
+ struct dynamic_sa_ctl *sa;
+ int rc;
+ struct offload_param {
+ __be32 spi;
+ __be32 seq;
+ __be32 iv_h;
+ __be32 iv_l;
+ } *param;
+
+
+ ctx->dev = my_alg->dev;
+
+ /* Create SA */
+ if (ctx->sa_in_dma_addr || ctx->sa_out_dma_addr) {
+ crypto4xx_free_sa(ctx);
+ }
+
+ crypto4xx_alloc_sa(ctx, SA_MACSEC_GCM_LEN );
+ if (!ctx->sa_in_dma_addr || !ctx->sa_out_dma_addr)
+ goto err_nomem;
+
+ ctx->direction = DIR_INBOUND;
+
+ if (!ctx->state_record) {
+ crypto4xx_alloc_state_record(ctx);
+ if (!ctx->state_record_dma_addr)
+ goto err_nomem_sr;
+ }
+
+ param = (struct offload_param *) key;
+ key += sizeof(struct offload_param);
+
+ sa = (struct dynamic_sa_ctl *) ctx->sa_in;
+ sa->sa_contents = SA_MACSEC_GCM_CONTENTS;
+
+ keylen -= 16;
+ rc = crypto4xx_compute_gcm_hash_key_sw(ctx, key, keylen);
+ if (rc)
+ goto err_nomem_sr;
+
+ memcpy(ctx->sa_in + get_dynamic_sa_offset_state_ptr_field(ctx),
+ (void*)&(ctx->state_record_dma_addr), 4);
+
+ ctx->offset_to_sr_ptr = get_dynamic_sa_offset_state_ptr_field(ctx);
+ set_dynamic_sa_command_0(sa,
+ SA_SAVE_HASH,
+ SA_NOT_SAVE_IV,
+ SA_LOAD_HASH_FROM_SA,
+ SA_LOAD_IV_FROM_INPUT,
+ SA_HEADER_PROC,
+ SA_HASH_ALG_GHASH,
+ SA_CIPHER_ALG_AES,
+ SA_PAD_TYPE_ZERO,
+ SA_OP_GROUP_PROTOCOL,
+ SA_OPCODE_EXT_PROT_MACSEC,
+ DIR_INBOUND);
+
+ set_dynamic_sa_command_1(sa, CRYPTO_MODE_AES_CTR,
+ SA_HASH_MODE_HASH,
+ CRYPTO_FEEDBACK_MODE_NO_FB,
+ SA_EXTENDED_SN_OFF,
+ SA_SEQ_MASK_ON,
+ SA_MC_ENABLE,
+ SA_NOT_COPY_PAD,
+ SA_COPY_PAYLOAD,
+ SA_COPY_HDR);
+
+
+ sa->sa_command_1.bf.byte_offset = 1;
+ sa->sa_command_1.bf.key_len = keylen >> 3;
+ crypto4xx_memcpy_le(ctx->sa_in + get_dynamic_sa_offset_key_field(ctx),
+ key, keylen);
+
+ /*
+ * Setup sa for inbound processing
+ */
+ ((struct dynamic_sa_macsec_gcm *) ctx->sa_in)->seq_mask[0] = 0x00000001;
+ ((struct dynamic_sa_macsec_gcm *) ctx->sa_in)->seq_mask[1] = 0x00000000;
+ ((struct dynamic_sa_macsec_gcm *) ctx->sa_in)->seq_mask[2] = 0x00000000;
+ ((struct dynamic_sa_macsec_gcm *) ctx->sa_in)->seq_mask[3] = 0x00000000;
+
+ ((struct dynamic_sa_macsec_gcm *) ctx->sa_in)->spi = cpu_to_le32(param->spi);
+ ((struct dynamic_sa_macsec_gcm *) ctx->sa_in)->seq = cpu_to_be32(param->seq);
+ ((struct dynamic_sa_macsec_gcm *) ctx->sa_in)->iv[0] = cpu_to_le32(param->iv_h);
+ ((struct dynamic_sa_macsec_gcm *) ctx->sa_in)->iv[1] = cpu_to_le32(param->iv_l);
+
+
+ sa->sa_command_1.bf.copy_payload = 1;
+ sa->sa_command_1.bf.copy_pad = 1;
+ sa->sa_command_1.bf.copy_hdr = 1;
+ sa->sa_command_1.bf.hash_crypto_offset = 0;
+
+ /*
+ * Setup sa for outbound processing
+ */
+ ctx->direction = DIR_OUTBOUND;
+ memcpy(ctx->sa_out, ctx->sa_in, ctx->sa_len * 4);
+ sa = (struct dynamic_sa_ctl *) ctx->sa_out;
+ set_dynamic_sa_command_0(sa,
+ SA_SAVE_HASH,
+ SA_SAVE_IV,
+ SA_LOAD_HASH_FROM_SA,
+ SA_LOAD_IV_FROM_SA,
+ SA_HEADER_PROC,
+ SA_HASH_ALG_GHASH,
+ SA_CIPHER_ALG_AES,
+ 0,
+ SA_OP_GROUP_PROTOCOL,
+ SA_OPCODE_EXT_PROT_MACSEC,
+ DIR_OUTBOUND);
+
+ set_dynamic_sa_command_1(sa, CRYPTO_MODE_AES_CTR,
+ SA_HASH_MODE_HASH,
+ CRYPTO_FEEDBACK_MODE_NO_FB,
+ SA_EXTENDED_SN_OFF,
+ SA_SEQ_MASK_ON,
+ SA_MC_ENABLE,
+ SA_NOT_COPY_PAD,
+ SA_COPY_PAYLOAD,
+ SA_COPY_HDR);
+
+ ctx->authenc = 0;
+ ctx->hash_final = 1;
+ ctx->is_hash = 0;
+ ctx->bypass = 0;
+
+ return 0;
+
+err_nomem_sr:
+ crypto4xx_free_sa(ctx);
+
+err_nomem:
+ return -ENOMEM;
+
+}
+
+int crypto4xx_encrypt_macsec(struct aead_request *req)
+{
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ ctx->direction = DIR_OUTBOUND;
+ ctx->pd_ctl =( ctx->pad_ctl << 24) + 0x11;
+
+ return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+ req->cryptlen, NULL, 0, NULL, 0);
+}
+
+int crypto4xx_decrypt_macsec(struct aead_request *req)
+{
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto4xx_device *dev = ctx->dev;
+
+ dev->macsec_decrypt_num++;
+ ctx->pd_ctl =(ctx->pad_ctl << 24) + 0x11;
+ ctx->direction = DIR_INBOUND;
+ ctx->bypass = 0;
+
+ return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+ req->cryptlen, NULL, 0, NULL, 0);
+}
+
+/** DTLS/SSL/TLS Related Algorithms */
+static int crypto4xx_setkey_dtls(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen,
+ u32 hash_alg,
+ u32 cipher_alg,
+ u32 opcode,
+ u32 op_grp)
+{
+ struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_alg *alg = tfm->__crt_alg;
+ struct crypto4xx_alg *my_alg = crypto_alg_to_crypto4xx_alg(alg);
+ int bs = crypto_tfm_alg_blocksize(tfm);
+ struct rtattr *rta = (void *) key;
+ struct dynamic_sa_ctl *sa;
+ struct offload_param {
+ __be32 spi;
+ __be32 seq_h;
+ __be32 seq_l;
+ struct crypto_authenc_key_param authenc_param;
+ } *param;
+
+ unsigned int enckeylen;
+ unsigned int authkeylen;
+ int rc;
+
+ if (!RTA_OK(rta, keylen))
+ goto badkey;
+
+ if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
+ goto badkey;
+
+ if (RTA_PAYLOAD(rta) < sizeof(*param))
+ goto badkey;
+
+ ctx->dev = my_alg->dev;
+ param = RTA_DATA(rta);
+ enckeylen = be32_to_cpu(param->authenc_param.enckeylen);
+
+ key += RTA_ALIGN(rta->rta_len);
+ keylen -= RTA_ALIGN(rta->rta_len);
+ authkeylen = keylen - enckeylen;
+
+ /* Create SA */
+ if (ctx->sa_in_dma_addr || ctx->sa_out_dma_addr)
+ crypto4xx_free_sa(ctx);
+
+ rc = crypto4xx_alloc_sa(ctx, SA_DTLS_LEN);
+ if (rc)
+ goto err_nomem;
+
+ if (!ctx->state_record) {
+ rc = crypto4xx_alloc_state_record(ctx);
+ if (rc)
+ goto err_nomem_sr;
+ }
+
+ ctx->direction = DIR_INBOUND;
+ sa = (struct dynamic_sa_ctl *) ctx->sa_in;
+ sa->sa_contents = SA_DTLS_CONTENTS;
+ memcpy(ctx->sa_in + get_dynamic_sa_offset_state_ptr_field(ctx),
+ (void *)&(ctx->state_record_dma_addr), 4);
+
+ set_dynamic_sa_command_0(sa,
+ SA_SAVE_HASH,
+ SA_NOT_SAVE_IV,
+ SA_LOAD_HASH_FROM_SA,
+ SA_LOAD_IV_FROM_INPUT,
+ SA_HEADER_PROC,
+ hash_alg,
+ cipher_alg,
+ SA_PAD_TYPE_DTLS,
+ op_grp,
+ opcode,
+ DIR_INBOUND);
+
+ set_dynamic_sa_command_1(sa, CRYPTO_MODE_CBC,
+ SA_HASH_MODE_HASH,
+ CRYPTO_FEEDBACK_MODE_NO_FB,
+ SA_EXTENDED_SN_OFF,
+ SA_SEQ_MASK_OFF,
+ SA_MC_ENABLE,
+ SA_NOT_COPY_PAD,
+ SA_COPY_PAYLOAD,
+ SA_NOT_COPY_HDR);
+
+ sa->sa_command_1.bf.mutable_bit_proc = 0;
+
+ crypto4xx_pre_compute_hmac(ctx, (void *)key, authkeylen, bs, hash_alg,
+ authkeylen);
+ crypto4xx_memcpy_le((u32 *)((void *) sa +
+ get_dynamic_sa_offset_key_field(ctx)),
+ key + authkeylen, enckeylen);
+
+ if (cipher_alg == SA_CIPHER_ALG_AES)
+ sa->sa_command_1.bf.key_len = enckeylen >> 3;
+
+ ((struct dynamic_sa_dtls *) sa)->spi.w = cpu_to_le32(param->spi);
+ ((struct dynamic_sa_dtls *) sa)->seq[1] = cpu_to_be32(param->seq_h);
+ ((struct dynamic_sa_dtls *) sa)->seq[0] = cpu_to_be32(param->seq_l);
+
+ ctx->hash_final = 1;
+ ctx->is_hash = 0;
+ ctx->pad_ctl = 4;
+ ctx->append_icv = 0;
+ ctx->pd_ctl =( ctx->pad_ctl << 24) + 0x11;
+
+ memcpy(ctx->sa_out, ctx->sa_in, ctx->sa_len * 4);
+ sa = (struct dynamic_sa_ctl *) ctx->sa_out;
+
+ set_dynamic_sa_command_0(sa, SA_NOT_SAVE_HASH, SA_NOT_SAVE_IV,
+ SA_LOAD_HASH_FROM_SA, /*SA_LOAD_IV_FROM_STATE */SA_GEN_IV,
+ SA_HEADER_PROC, hash_alg, cipher_alg,
+ SA_PAD_TYPE_DTLS, SA_OP_GROUP_EXTEND_PROTOCOL,
+ opcode, DIR_OUTBOUND);
+
+ set_dynamic_sa_command_1(sa, CRYPTO_MODE_CBC, SA_HASH_MODE_HASH,
+ CRYPTO_FEEDBACK_MODE_NO_FB, SA_EXTENDED_SN_ON,
+ SA_SEQ_MASK_OFF, SA_MC_ENABLE,
+ SA_COPY_PAD, SA_COPY_PAYLOAD, SA_COPY_HDR);
+ return 0;
+
+err_nomem_sr:
+ crypto4xx_free_sa(ctx);
+err_nomem:
+ return -ENOMEM;
+
+badkey:
+ printk("%s: badkey\n",__FUNCTION__);
+ return -EINVAL;
+}
+
+int crypto4xx_setkey_dtls_aes_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ return crypto4xx_setkey_dtls(cipher, key, keylen, SA_HASH_ALG_SHA1,
+ SA_CIPHER_ALG_AES, SA_OPCODE_DTLS,
+ SA_OP_GROUP_EXTEND_PROTOCOL);
+}
+
+int crypto4xx_setkey_dtls_des_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+
+ return crypto4xx_setkey_dtls(cipher, key, keylen, SA_HASH_ALG_SHA1,
+ SA_CIPHER_ALG_DES, SA_OPCODE_DTLS,
+ SA_OP_GROUP_EXTEND_PROTOCOL);
+}
+
+int crypto4xx_setkey_dtls_des3_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ return crypto4xx_setkey_dtls(cipher, key, keylen, SA_HASH_ALG_SHA1,
+ SA_CIPHER_ALG_3DES, SA_OPCODE_DTLS,
+ SA_OP_GROUP_EXTEND_PROTOCOL);
+}
+
+int crypto4xx_setkey_dtls_null_md5(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ return crypto4xx_setkey_dtls(cipher, key, keylen, SA_HASH_ALG_MD5,
+ SA_CIPHER_ALG_NULL, SA_OPCODE_DTLS,
+ SA_OP_GROUP_EXTEND_PROTOCOL);
+}
+
+int crypto4xx_setkey_dtls_null_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ return crypto4xx_setkey_dtls(cipher, key, keylen, SA_HASH_ALG_SHA1,
+ SA_CIPHER_ALG_NULL, SA_OPCODE_DTLS,
+ SA_OP_GROUP_EXTEND_PROTOCOL);
+}
+/** DTLS/SSL/TLS Related Encrypt/Decrypt Algorithms */
+
+int crypto4xx_encrypt_dtls(struct aead_request *req)
+{
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+
+ ctx->direction = DIR_OUTBOUND;
+
+ return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+ req->cryptlen, NULL, 0, NULL, 0);
+}
+
+int crypto4xx_decrypt_dtls(struct aead_request *req)
+{
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+
+ ctx->direction = DIR_INBOUND;
+
+ return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+ req->cryptlen, NULL, 0, NULL, 0);
+}
+int tls;
+/** Setkey Functions for SSL/TLS */
+
+static int crypto4xx_setkey_ssl_tls(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen,
+ u32 hash_alg,
+ u32 cipher_alg,
+ u32 opcode,
+ u32 op_grp)
+{
+ struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_alg *alg = tfm->__crt_alg;
+ struct crypto4xx_alg *my_alg = crypto_alg_to_crypto4xx_alg(alg);
+ int bs = crypto_tfm_alg_blocksize(tfm);
+ struct rtattr *rta = (void *) key;
+ struct dynamic_sa_ctl *sa;
+ struct offload_param {
+ __be32 spi;
+ __be32 seq_h;
+ __be32 seq_l;
+ struct crypto_authenc_key_param authenc_param;
+ } *param;
+
+ unsigned int enckeylen;
+ unsigned int authkeylen;
+ int rc;
+
+ if (!RTA_OK(rta, keylen))
+ goto badkey;
+
+ if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
+ goto badkey;
+
+ if (RTA_PAYLOAD(rta) < sizeof(*param))
+ goto badkey;
+
+ ctx->dev = my_alg->dev;
+
+ param = RTA_DATA(rta);
+
+ enckeylen = be32_to_cpu(param->authenc_param.enckeylen);
+
+ key += RTA_ALIGN(rta->rta_len);
+ keylen -= RTA_ALIGN(rta->rta_len);
+
+ authkeylen = keylen - enckeylen;
+
+ /* Create SA */
+ if (ctx->sa_in_dma_addr || ctx->sa_out_dma_addr)
+ crypto4xx_free_sa(ctx);
+
+
+ rc = crypto4xx_alloc_sa(ctx, SA_DTLS_LEN );
+ if (rc)
+ goto err_nomem;
+
+ if (!ctx->state_record) {
+ rc = crypto4xx_alloc_state_record(ctx);
+ if (rc)
+ goto err_nomem_sr;
+ }
+
+ ctx->direction = DIR_INBOUND;
+
+ sa = (struct dynamic_sa_ctl *) ctx->sa_in;
+ /*
+ * Setup hash algorithm and hash mode
+ */
+ sa->sa_contents = SA_DTLS_CONTENTS;
+
+ ((struct dynamic_sa_dtls *)sa)->state_ptr= (u32)ctx->state_record_dma_addr;
+
+ ctx->offset_to_sr_ptr = get_dynamic_sa_offset_state_ptr_field(ctx);
+ set_dynamic_sa_command_0(sa,
+ SA_SAVE_HASH,
+ SA_NOT_SAVE_IV,
+ SA_LOAD_HASH_FROM_SA,
+ SA_LOAD_IV_FROM_STATE,
+ SA_HEADER_PROC,
+ hash_alg,
+ cipher_alg,
+ SA_PAD_TYPE_TLS,
+ op_grp,
+ opcode,
+ DIR_INBOUND);
+
+ set_dynamic_sa_command_1(sa, CRYPTO_MODE_CBC,
+ SA_HASH_MODE_HASH,
+ CRYPTO_FEEDBACK_MODE_NO_FB,
+ SA_EXTENDED_SN_OFF,
+ SA_SEQ_MASK_ON,
+ SA_MC_ENABLE,
+ SA_NOT_COPY_PAD,
+ SA_COPY_PAYLOAD,
+ SA_NOT_COPY_HDR);
+
+ if (opcode == SA_OPCODE_SSL) {
+ if (hash_alg == SA_HASH_ALG_SHA1) {
+ memcpy(ctx->sa_in +
+ get_dynamic_sa_offset_inner_digest(ctx),
+ (void*)key, authkeylen);
+ memcpy(ctx->sa_in +
+ get_dynamic_sa_offset_outer_digest(ctx),
+ (void*)key, authkeylen);
+ } else if (hash_alg == SA_HASH_ALG_MD5) {
+ crypto4xx_pre_compute_ssl_mac(ctx, (void*)key,
+ authkeylen, bs,hash_alg);
+ }
+ } else {
+ crypto4xx_pre_compute_hmac(ctx, (void *)key, authkeylen, bs,
+ hash_alg, authkeylen);
+ sa->sa_command_1.bf.hmac_muting = 1;
+ }
+
+ ((struct dynamic_sa_dtls *) sa)->spi.w = cpu_to_le32(param->spi);
+ ((struct dynamic_sa_dtls *) sa)->seq[1] = cpu_to_be32(param->seq_h);
+ ((struct dynamic_sa_dtls *) sa)->seq[0] = cpu_to_be32(param->seq_l);
+
+ crypto4xx_memcpy_le((u32 *) ((void *) sa +
+ get_dynamic_sa_offset_key_field(ctx)),
+ key + authkeylen, enckeylen);
+
+ if (cipher_alg == SA_CIPHER_ALG_AES)
+ sa->sa_command_1.bf.key_len = enckeylen >> 3;
+
+ ctx->hash_final = 1;
+ ctx->is_hash = 0;
+ ctx->pad_ctl = 4;
+ ctx->append_icv = 0;
+ ctx->pd_ctl =( ctx->pad_ctl << 24) + 0x11;
+ ctx->direction = DIR_OUTBOUND;
+
+ memcpy(ctx->sa_out, ctx->sa_in, ctx->sa_len * 4);
+
+ sa = (struct dynamic_sa_ctl *) ctx->sa_out;
+ set_dynamic_sa_command_0(sa,
+ SA_NOT_SAVE_HASH,
+ SA_NOT_SAVE_IV,
+ SA_LOAD_HASH_FROM_SA,
+ SA_LOAD_IV_FROM_STATE,
+ SA_HEADER_PROC,
+ hash_alg,
+ cipher_alg,
+ SA_PAD_TYPE_TLS,
+ op_grp, opcode,
+ DIR_OUTBOUND);
+ set_dynamic_sa_command_1(sa,
+ CRYPTO_MODE_CBC,
+ SA_HASH_MODE_HASH,
+ CRYPTO_FEEDBACK_MODE_NO_FB,
+ SA_EXTENDED_SN_ON,
+ SA_SEQ_MASK_ON,
+ SA_MC_ENABLE,
+ SA_COPY_PAD,
+ SA_COPY_PAYLOAD,
+ SA_COPY_HDR);
+
+ return 0;
+
+err_nomem_sr:
+ crypto4xx_free_sa(ctx);
+err_nomem:
+ return -ENOMEM;
+badkey:
+ printk("%s: badkey\n",__FUNCTION__);
+ return -EINVAL;
+}
+extern int ssl_arc4;
+
+int crypto4xx_setkey_ssl_tls_arc4(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen,
+ u32 hash_alg,
+ u32 cipher_alg,
+ u32 opcode,
+ u32 op_grp)
+{
+ struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_alg *alg = tfm->__crt_alg;
+ struct crypto4xx_alg *my_alg = crypto_alg_to_crypto4xx_alg(alg);
+ int bs = crypto_tfm_alg_blocksize(tfm);
+ struct rtattr *rta = (void *) key;
+ struct dynamic_sa_ctl *sa;
+ int i, j = 0, k = 0;
+ u8 a;
+ const u8 arc4_key[256];
+
+ struct offload_param {
+ __be32 spi;
+ __be32 seq_h;
+ __be32 seq_l;
+ struct crypto_authenc_key_param authenc_param;
+ } *param;
+
+ unsigned int enckeylen;
+ unsigned int authkeylen;
+ int rc;
+
+ if (!RTA_OK(rta, keylen))
+ goto badkey;
+ if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
+ goto badkey;
+
+ if (RTA_PAYLOAD(rta) < sizeof(*param))
+ goto badkey;
+
+ ctx->dev = my_alg->dev;
+ param = RTA_DATA(rta);
+ enckeylen = be32_to_cpu(param->authenc_param.enckeylen);
+
+ key += RTA_ALIGN(rta->rta_len);
+ keylen -= RTA_ALIGN(rta->rta_len);
+
+ authkeylen = keylen - enckeylen;
+ /* Create SA */
+ if (ctx->sa_in_dma_addr || ctx->sa_out_dma_addr) {
+ crypto4xx_free_sa(ctx);
+ }
+
+ rc = crypto4xx_alloc_sa(ctx, SA_SSL_ARC4_LEN);
+ if (rc)
+ return rc;
+
+ if (!ctx->state_record) {
+ rc = crypto4xx_alloc_state_record(ctx);
+ if (rc)
+ goto err_nomem_sr;
+ }
+
+ if (ctx->arc4_state_record == NULL) {
+ rc = crypto4xx_alloc_arc4_state_record(ctx);
+ if (rc)
+ goto err_nomem_arc4;
+ }
+
+ sa = (struct dynamic_sa_ctl *) ctx->sa_in;
+
+ ctx->direction = DIR_INBOUND;
+ ctx->init_arc4 = 1;
+ sa->sa_contents = SA_SSL_ARC4_CONTENTS;
+
+ memcpy(ctx->sa_in + get_dynamic_sa_offset_state_ptr_field(ctx),
+ (void *)&(ctx->state_record_dma_addr), 4);
+ ctx->offset_to_sr_ptr = get_dynamic_sa_offset_state_ptr_field(ctx);
+ set_dynamic_sa_command_0(sa,
+ SA_NOT_SAVE_HASH, SA_NOT_SAVE_IV,
+ SA_LOAD_HASH_FROM_SA,
+ SA_LOAD_IV_FROM_SA,
+ SA_HEADER_PROC,
+ hash_alg,
+ cipher_alg,
+ SA_PAD_TYPE_TLS,
+ op_grp,
+ opcode,
+ DIR_INBOUND);
+
+ set_dynamic_sa_command_1(sa,
+ CRYPTO_MODE_CBC,
+ SA_HASH_MODE_HASH,
+ CRYPTO_FEEDBACK_MODE_NO_FB,
+ SA_EXTENDED_SN_ON,
+ SA_SEQ_MASK_OFF,
+ SA_MC_ENABLE,
+ SA_NOT_COPY_PAD,
+ SA_COPY_PAYLOAD,
+ SA_NOT_COPY_HDR);
+
+ sa->sa_command_1.bf.arc4_stateful = 1;
+ sa->sa_command_1.bf.save_arc4_state = 1;
+
+ if (opcode == SA_OPCODE_SSL) {
+ if (hash_alg == SA_HASH_ALG_SHA1) {
+ crypto4xx_memcpy_le(ctx->sa_in +
+ get_dynamic_sa_offset_inner_digest(ctx),
+ (void*) key, authkeylen);
+ crypto4xx_memcpy_le(ctx->sa_in +
+ get_dynamic_sa_offset_outer_digest(ctx),
+ (void*) key, authkeylen);
+ } else if (hash_alg == SA_HASH_ALG_MD5) {
+ crypto4xx_pre_compute_ssl_mac(ctx, (void*)key,
+ authkeylen, bs, hash_alg);
+ }
+ sa->sa_command_1.bf.hmac_muting = 0;
+ } else {
+ crypto4xx_pre_compute_hmac(ctx, (void*) key, authkeylen, bs,
+ hash_alg, authkeylen);
+ sa->sa_command_1.bf.hmac_muting = 1;
+ }
+
+ ((struct dynamic_sa_ssl_tls_arc4 *) sa)->arc4_state_ptr =
+ (u32)ctx->arc4_state_record_dma_addr;
+
+ /* Setting Key */
+ crypto4xx_memcpy_le((u32 *) ((void *) sa +
+ get_dynamic_sa_offset_key_field(ctx)),
+ key + authkeylen, enckeylen);
+
+ memcpy((u8 *)arc4_key, key + authkeylen, enckeylen);
+
+ ((struct dynamic_sa_ssl_tls_arc4 *) sa)->spi.w =
+ cpu_to_le32(param->spi);
+ ((struct dynamic_sa_ssl_tls_arc4 *) sa)->seq[1] =
+ cpu_to_be32(param->seq_h);
+ ((struct dynamic_sa_ssl_tls_arc4 *) sa)->seq[0] =
+ cpu_to_be32(param->seq_l);
+ /* For stateful mode we need to initialize the ARC4 state record */
+ ((struct dynamic_sa_ssl_tls_arc4 *) ctx->sa_in)->ij.i = 1;
+ ((struct dynamic_sa_ssl_tls_arc4 *) ctx->sa_in)->ij.j = 0;
+
+ for (i = 0; i < 256; i++)
+ ((struct arc4_sr *) ctx->arc4_state_record)->arc4_state[i] = i;
+
+ for (i = 0; i < 256; i++) {
+ a = ((struct arc4_sr *) ctx->arc4_state_record)->arc4_state[i];
+ j = (j + arc4_key[k] + a) & 0xff;
+ ((struct arc4_sr *) ctx->arc4_state_record)->arc4_state[i] =
+ ((struct arc4_sr *) ctx->arc4_state_record)->arc4_state[j];
+ ((struct arc4_sr *) ctx->arc4_state_record)->arc4_state[j] = a;
+ if (++k >= enckeylen)
+ k = 0;
+ }
+
+ ctx->hash_final = 1;
+ ctx->is_hash = 0;
+ ctx->pad_ctl = 4;
+ ctx->append_icv = 0;
+ ctx->direction = DIR_OUTBOUND;
+ ctx->pd_ctl =( ctx->pad_ctl << 24) + 0x11;
+
+ /* Setup SA command for outbound process */
+ memcpy(ctx->sa_out, ctx->sa_in, ctx->sa_len * 4);
+ sa = (struct dynamic_sa_ctl *) ctx->sa_out;
+ set_dynamic_sa_command_0(sa,
+ SA_NOT_SAVE_HASH,
+ SA_NOT_SAVE_IV,
+ SA_LOAD_HASH_FROM_SA,
+ SA_LOAD_IV_FROM_SA,
+ SA_HEADER_PROC,
+ hash_alg,
+ cipher_alg,
+ SA_PAD_TYPE_TLS,
+ op_grp,
+ opcode,
+ DIR_OUTBOUND);
+
+ set_dynamic_sa_command_1(sa,
+ CRYPTO_MODE_CBC,
+ SA_HASH_MODE_HASH,
+ CRYPTO_FEEDBACK_MODE_NO_FB,
+ SA_EXTENDED_SN_ON,
+ SA_SEQ_MASK_ON,
+ SA_MC_ENABLE,
+ SA_COPY_PAD,
+ SA_COPY_PAYLOAD,
+ SA_COPY_HDR);
+
+ sa->sa_command_1.bf.arc4_stateful = 1;
+ sa->sa_command_1.bf.save_arc4_state = 1;
+
+ return 0;
+
+err_nomem_arc4:
+ crypto4xx_free_state_record(ctx);
+err_nomem_sr:
+ crypto4xx_free_sa(ctx);
+ return -ENOMEM;
+badkey:
+ printk("%s: badkey\n",__FUNCTION__);
+ return 0xffffffff;
+}
+
+int crypto4xx_setkey_ssl_aes_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ return crypto4xx_setkey_ssl_tls(cipher, key, keylen, SA_HASH_ALG_SHA1,
+ SA_CIPHER_ALG_AES, SA_OPCODE_SSL,
+ SA_OP_GROUP_EXTEND_PROTOCOL);
+}
+
+int crypto4xx_setkey_ssl_des_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+
+ return crypto4xx_setkey_ssl_tls(cipher, key, keylen, SA_HASH_ALG_SHA1,
+ SA_CIPHER_ALG_DES, SA_OPCODE_SSL,
+ SA_OP_GROUP_EXTEND_PROTOCOL);
+}
+int crypto4xx_setkey_ssl_des3_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ return crypto4xx_setkey_ssl_tls(cipher, key, keylen, SA_HASH_ALG_SHA1,
+ SA_CIPHER_ALG_3DES, SA_OPCODE_SSL,
+ SA_OP_GROUP_EXTEND_PROTOCOL);
+}
+
+int crypto4xx_setkey_ssl_arc4_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ return crypto4xx_setkey_ssl_tls_arc4(cipher, key, keylen,
+ SA_HASH_ALG_SHA1,
+ SA_CIPHER_ALG_ARC4,
+ SA_OPCODE_SSL,
+ SA_OP_GROUP_EXTEND_PROTOCOL);
+}
+
+int crypto4xx_setkey_ssl_arc4_md5(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ return crypto4xx_setkey_ssl_tls_arc4(cipher, key, keylen,
+ SA_HASH_ALG_MD5,
+ SA_CIPHER_ALG_ARC4, SA_OPCODE_SSL,
+ SA_OP_GROUP_EXTEND_PROTOCOL);
+}
+
+int crypto4xx_setkey_ssl_null_md5(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+
+ return crypto4xx_setkey_ssl_tls(cipher, key, keylen, SA_HASH_ALG_MD5,
+ SA_CIPHER_ALG_NULL, SA_OPCODE_SSL,
+ SA_OP_GROUP_EXTEND_PROTOCOL);
+}
+
+int crypto4xx_setkey_ssl_null_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ return crypto4xx_setkey_ssl_tls(cipher, key, keylen, SA_HASH_ALG_SHA1,
+ SA_CIPHER_ALG_NULL, SA_OPCODE_SSL,
+ SA_OP_GROUP_EXTEND_PROTOCOL);
+}
+
+/** Encrypt Decrpt Functions for SSL-AES*/
+int crypto4xx_encrypt_ssl_aes(struct aead_request *req)
+{
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+
+ ctx->direction = DIR_OUTBOUND;
+ return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+ req->cryptlen, NULL, 0,
+ req->iv, AES_BLOCK_SIZE);
+}
+
+int crypto4xx_decrypt_ssl_aes(struct aead_request *req)
+{
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+
+ ctx->direction = DIR_INBOUND;
+
+ return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+ req->cryptlen, NULL, 0,
+ req->iv, AES_BLOCK_SIZE);
+}
+
+/** Encrypt Decrpt Functions for SSL-DES*/
+int crypto4xx_encrypt_ssl_des(struct aead_request *req)
+{
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+
+ ctx->direction = DIR_OUTBOUND;
+
+ return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+ req->cryptlen, NULL, 0,
+ req->iv, DES_BLOCK_SIZE);
+}
+
+int crypto4xx_decrypt_ssl_des(struct aead_request *req)
+{
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+
+ ctx->direction = DIR_INBOUND;
+
+ return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+ req->cryptlen, NULL, 0,
+ req->iv, DES_BLOCK_SIZE);
+}
+
+/** Encrypt Decyrpt Functions for SSL-NULL*/
+int crypto4xx_encrypt_ssl_null(struct aead_request *req)
+{
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+
+ ctx->direction = DIR_OUTBOUND;
+
+ return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+ req->cryptlen, NULL, 0, NULL, 0);
+}
+
+int crypto4xx_decrypt_ssl_null(struct aead_request *req)
+{
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+
+ ctx->direction = DIR_INBOUND;
+
+ return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+ req->cryptlen, NULL, 0, NULL, 0);
+}
+
+int ssl_arc4 = 0;
+/** Encrypt Decyrpt Functions for SSL- ARC4*/
+int crypto4xx_encrypt_ssl_arc4(struct aead_request *req)
+{
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ int ret = 0;
+
+ ctx->direction = DIR_OUTBOUND;
+ ssl_arc4 = 1;
+ ret = crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+ req->cryptlen, NULL, 0, NULL, 0);
+ ssl_arc4 = 0;
+ return ret;
+}
+
+int crypto4xx_decrypt_ssl_arc4(struct aead_request *req)
+{
+ int ret;
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ ssl_arc4 = 1;
+ ctx->direction = DIR_INBOUND;
+
+ ret = crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+ req->cryptlen, NULL, 0, NULL, 0);
+ ssl_arc4 = 0;
+ return ret;
+}
+
+/** TLS and TLS V1 Setkey Functions */
+int crypto4xx_setkey_tls_aes_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ return crypto4xx_setkey_ssl_tls(cipher, key, keylen, SA_HASH_ALG_SHA1,
+ SA_CIPHER_ALG_AES, SA_OPCODE_TLS,
+ SA_OP_GROUP_EXTEND_PROTOCOL);
+}
+
+int crypto4xx_setkey_tls_des_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ return crypto4xx_setkey_ssl_tls(cipher, key, keylen, SA_HASH_ALG_SHA1,
+ SA_CIPHER_ALG_DES, SA_OPCODE_TLS,
+ SA_OP_GROUP_EXTEND_PROTOCOL);
+}
+
+int crypto4xx_setkey_tls_des3_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ return crypto4xx_setkey_ssl_tls(cipher, key, keylen, SA_HASH_ALG_SHA1,
+ SA_CIPHER_ALG_3DES, SA_OPCODE_TLS,
+ SA_OP_GROUP_EXTEND_PROTOCOL);
+}
+
+int crypto4xx_setkey_tls_arc4_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ return crypto4xx_setkey_ssl_tls_arc4(cipher, key, keylen,
+ SA_HASH_ALG_SHA1,
+ SA_CIPHER_ALG_ARC4, SA_OPCODE_TLS,
+ SA_OP_GROUP_EXTEND_PROTOCOL);
+}
+
+int crypto4xx_setkey_tls_arc4_md5(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ return crypto4xx_setkey_ssl_tls_arc4(cipher, key, keylen,
+ SA_HASH_ALG_MD5,
+ SA_CIPHER_ALG_ARC4, SA_OPCODE_TLS,
+ SA_OP_GROUP_EXTEND_PROTOCOL);
+}
+
+int crypto4xx_setkey_tls_null_md5(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ return crypto4xx_setkey_ssl_tls(cipher, key, keylen, SA_HASH_ALG_MD5,
+ SA_CIPHER_ALG_NULL, SA_OPCODE_TLS,
+ SA_OP_GROUP_EXTEND_PROTOCOL);
+}
+int crypto4xx_setkey_tls_null_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ return crypto4xx_setkey_ssl_tls(cipher, key, keylen, SA_HASH_ALG_SHA1,
+ SA_CIPHER_ALG_NULL, SA_OPCODE_TLS,
+ SA_OP_GROUP_EXTEND_PROTOCOL);
+}
+
+int crypto4xx_setkey_tls1_1_aes_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ tls = 1;
+ return crypto4xx_setkey_ssl_tls(cipher, key, keylen, SA_HASH_ALG_SHA1,
+ SA_CIPHER_ALG_AES, SA_OPCODE_TLS1_1,
+ SA_OP_GROUP_EXTEND_PROTOCOL);
+}
+
+int crypto4xx_setkey_tls1_1_des_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ return crypto4xx_setkey_ssl_tls(cipher, key, keylen, SA_HASH_ALG_SHA1,
+ SA_CIPHER_ALG_DES, SA_OPCODE_TLS1_1,
+ SA_OP_GROUP_EXTEND_PROTOCOL);
+}
+
+int crypto4xx_setkey_tls1_1_des3_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ return crypto4xx_setkey_ssl_tls(cipher, key, keylen, SA_HASH_ALG_SHA1,
+ SA_CIPHER_ALG_3DES, SA_OPCODE_TLS1_1,
+ SA_OP_GROUP_EXTEND_PROTOCOL);
+}
+
+int crypto4xx_setkey_tls1_1_arc4_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ return crypto4xx_setkey_ssl_tls_arc4(cipher, key, keylen,
+ SA_HASH_ALG_SHA1,
+ SA_CIPHER_ALG_ARC4, SA_OPCODE_TLS1_1,
+ SA_OP_GROUP_EXTEND_PROTOCOL);
+}
+
+int crypto4xx_setkey_tls1_1_arc4_md5(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ return crypto4xx_setkey_ssl_tls_arc4(cipher, key, keylen,
+ SA_HASH_ALG_MD5,
+ SA_CIPHER_ALG_ARC4, SA_OPCODE_TLS1_1,
+ SA_OP_GROUP_EXTEND_PROTOCOL);
+}
+
+int crypto4xx_setkey_tls1_1_null_md5(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ return crypto4xx_setkey_ssl_tls(cipher, key, keylen, SA_HASH_ALG_MD5,
+ SA_CIPHER_ALG_NULL, SA_OPCODE_TLS1_1,
+ SA_OP_GROUP_EXTEND_PROTOCOL);
+}
+int crypto4xx_setkey_tls1_1_null_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ return crypto4xx_setkey_ssl_tls(cipher, key, keylen, SA_HASH_ALG_SHA1,
+ SA_CIPHER_ALG_NULL, SA_OPCODE_TLS1_1,
+ SA_OP_GROUP_EXTEND_PROTOCOL);
+}
+
+int crypto4xx_setkey_transport_esp_rfc4106_gcm(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_alg *alg = tfm->__crt_alg;
+ struct crypto4xx_alg *my_alg = crypto_alg_to_crypto4xx_alg(alg);
+ struct rtattr *rta = (void *) key;
+ struct dynamic_sa_ctl *sa;
+ u32 rc;
+
+ ctx->dev = my_alg->dev;
+ struct esp_authenc_param {
+ __be32 spi;
+ __be32 seq;
+ __be16 pad_block_size;
+ __be16 encap_uhl;
+ struct crypto_authenc_key_param authenc_param;
+ } *param;
+
+ unsigned int enckeylen;
+
+ ESP_PRINTK("%s: keylen = %d\n",__FUNCTION__, keylen);
+ ESP_PHD(KERN_CONT, "", DUMP_PREFIX_OFFSET,
+ 16, 1,
+ (void*)key, keylen, false);
+
+ if (!RTA_OK(rta, keylen)) {
+ printk("%s: badkey 1\n",__FUNCTION__);
+ goto badkey;
+ }
+
+ param = RTA_DATA(rta);
+ enckeylen = be32_to_cpu(param->authenc_param.enckeylen);
+
+ key += RTA_ALIGN(rta->rta_len);
+ key += 4;
+ //keylen -= RTA_ALIGN(rta->rta_len);
+ keylen = keylen - sizeof(*param) - 4;
+
+ ctx->spi = be32_to_cpu(param->spi);
+ ctx->seq = be32_to_cpu(param->seq);
+ ctx->pad_block_size = be16_to_cpu(param->pad_block_size);
+ ctx->encap_uhl = be16_to_cpu(param->encap_uhl);
+
+ ESP_PRINTK("%s: spi = 0x%08x, seq = %d, pad_size = %d, encap uhl = %d\n",__FUNCTION__,
+ ctx->spi, ctx->seq, ctx->pad_block_size, ctx->encap_uhl);
+
+ /* Create SA */
+ if (ctx->sa_in_dma_addr || ctx->sa_out_dma_addr) {
+ crypto4xx_free_sa(ctx);
+ }
+
+ rc = crypto4xx_alloc_sa(ctx, SA_ESP_GCM_LEN);
+ if (rc)
+ return rc;
+
+ if (!ctx->state_record) {
+ crypto4xx_alloc_state_record(ctx);
+ if (!ctx->state_record_dma_addr)
+ goto err_nomem_sr;
+ }
+
+ sa = (struct dynamic_sa_ctl *) ctx->sa_in;
+ ctx->direction = DIR_INBOUND;
+ sa->sa_contents = SA_ESP_GCM_CONTENTS;
+ ctx->offset_to_sr_ptr = get_dynamic_sa_offset_state_ptr_field(ctx);
+
+ rc = crypto4xx_compute_gcm_hash_key_sw(ctx, key, keylen);
+ if (rc)
+ goto err_nomem_sr;
+
+ memcpy(ctx->sa_in + get_dynamic_sa_offset_state_ptr_field(ctx),
+ (void*)&(ctx->state_record_dma_addr), 4);
+
+ crypto4xx_memcpy_le(ctx->sa_in + get_dynamic_sa_offset_key_field(ctx),
+ key, keylen);
+
+ memcpy(ctx->sa_in + get_dynamic_sa_offset_spi(ctx),
+ (void*)&(ctx->spi), 4);
+ memcpy(ctx->sa_in + get_dynamic_sa_offset_seq_num(ctx),
+ (void*)&(ctx->seq), 4);
+
+
+ set_dynamic_sa_command_0(sa,
+ SA_NOT_SAVE_HASH,
+ SA_NOT_SAVE_IV,
+ SA_LOAD_HASH_FROM_SA,
+ SA_LOAD_IV_FROM_INPUT,
+ SA_HEADER_PROC,
+ SA_HASH_ALG_GHASH,
+ SA_CIPHER_ALG_AES,
+ 0,
+ SA_OP_GROUP_PROTOCOL,
+ SA_OPCODE_ESP,
+ DIR_INBOUND);
+
+ sa->sa_command_0.bf.digest_len = 3;
+ set_dynamic_sa_command_1(sa, CRYPTO_MODE_AES_CTR,
+ SA_HASH_MODE_HASH,
+ CRYPTO_FEEDBACK_MODE_NO_FB,
+ SA_EXTENDED_SN_OFF,
+ SA_SEQ_MASK_ON,
+ SA_MC_ENABLE,
+ SA_COPY_PAD,
+ SA_COPY_PAYLOAD,
+ SA_NOT_COPY_HDR);
+
+ sa->sa_command_1.bf.key_len = SA_AES_KEY_LEN_128;
+ sa->sa_command_1.bf.hash_crypto_offset = 0;
+ /*
+ * Setup sa for outbound processing
+ */
+ memcpy(ctx->sa_out, ctx->sa_in, ctx->sa_len * 4);
+ sa = (struct dynamic_sa_ctl *) ctx->sa_out;
+
+ set_dynamic_sa_command_0(sa,
+ SA_NOT_SAVE_HASH,
+ SA_NOT_SAVE_IV,
+ SA_LOAD_HASH_FROM_SA,
+ //SA_LOAD_IV_FROM_SA,
+ SA_LOAD_IV_GEN_IV,
+ SA_HEADER_PROC,
+ SA_HASH_ALG_GHASH,
+ SA_CIPHER_ALG_AES,
+ 0,
+ SA_OP_GROUP_PROTOCOL,
+ SA_OPCODE_ESP,
+ DIR_OUTBOUND);
+
+ sa->sa_command_0.bf.digest_len = 3;
+ set_dynamic_sa_command_1(sa, CRYPTO_MODE_AES_CTR,
+ SA_HASH_MODE_HASH,
+ CRYPTO_FEEDBACK_MODE_NO_FB,
+ SA_EXTENDED_SN_OFF,
+ SA_SEQ_MASK_ON,
+ SA_MC_ENABLE,
+ SA_COPY_PAD,
+ SA_COPY_PAYLOAD,
+ SA_COPY_HDR);
+
+ sa->sa_command_1.bf.key_len = SA_AES_KEY_LEN_128;
+ sa->sa_command_1.bf.hash_crypto_offset = 0;
+
+ ctx->bypass = 5;
+ ctx->authenc = 0;
+ ctx->hash_final = 1;
+ ctx->is_hash = 0;
+ printk("param->pad_block_size = %d\n", param->pad_block_size);
+ //ctx->pad_ctl = param->pad_block_size / 4;
+ ctx->pad_ctl = 0x08;
+ ctx->append_icv = 0;
+
+ return 0;
+
+err_nomem_sr:
+ crypto4xx_free_sa(ctx);
+
+ return -ENOMEM;
+badkey:
+
+ ESP_PRINTK("%s: badkey\n",__FUNCTION__);
+ return -EINVAL;
+}
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index 46e899ac924..8c00e30e9e6 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -28,17 +28,40 @@
#include <linux/platform_device.h>
#include <linux/init.h>
#include <linux/of_platform.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/highmem.h>
#include <asm/dcr.h>
#include <asm/dcr-regs.h>
#include <asm/cacheflush.h>
+#include <crypto/internal/hash.h>
+#include <crypto/algapi.h>
+#include <asm/ppc4xx_ocm.h>
+#include <crypto/internal/hash.h>
+#include <crypto/algapi.h>
#include <crypto/aes.h>
+#include <crypto/des.h>
#include <crypto/sha.h>
+#include <crypto/ctr.h>
#include "crypto4xx_reg_def.h"
#include "crypto4xx_core.h"
#include "crypto4xx_sa.h"
#define PPC4XX_SEC_VERSION_STR "0.5"
-
+void my_dump_Data(const u_char* dptr, u_int size)
+{
+ int i;
+ for (i = 0; i < size; i++) {
+ printk("0x%02x, ", dptr[i]);
+ if ((i+1) % 8 == 0)
+ printk(" ");
+ if ((i+1) % 16 == 0)
+ printk("\n");
+ }
+ printk("\n");
+}
+static struct proc_dir_entry *proc_crypto4xx = NULL;
+struct proc_dir_entry *entry;
/**
* PPC4xx Crypto Engine Initialization Routine
*/
@@ -72,16 +95,24 @@ static void crypto4xx_hw_init(struct crypto4xx_device *dev)
writel(pe_dma_cfg.w, dev->ce_base + CRYPTO4XX_PE_DMA_CFG);
writel(dev->pdr_pa, dev->ce_base + CRYPTO4XX_PDR_BASE);
writel(dev->pdr_pa, dev->ce_base + CRYPTO4XX_RDR_BASE);
- writel(PPC4XX_PRNG_CTRL_AUTO_EN, dev->ce_base + CRYPTO4XX_PRNG_CTRL);
get_random_bytes(&rand_num, sizeof(rand_num));
writel(rand_num, dev->ce_base + CRYPTO4XX_PRNG_SEED_L);
get_random_bytes(&rand_num, sizeof(rand_num));
writel(rand_num, dev->ce_base + CRYPTO4XX_PRNG_SEED_H);
+ writel(PPC4XX_PRNG_CTRL_AUTO_EN, dev->ce_base + CRYPTO4XX_PRNG_CTRL);
+
ring_size.w = 0;
ring_size.bf.ring_offset = PPC4XX_PD_SIZE;
ring_size.bf.ring_size = PPC4XX_NUM_PD;
writel(ring_size.w, dev->ce_base + CRYPTO4XX_RING_SIZE);
ring_ctrl.w = 0;
+
+ if (dev->core_dev->revb_ver == 1) {
+#ifdef CONFIG_SEC_HW_POLL
+ ring_ctrl.bf.ring_retry_divisor = CONFIG_SEC_HW_POLL_RETRY_FREQ;
+ ring_ctrl.bf.ring_poll_divisor = CONFIG_SEC_HW_RING_POLL_FREQ;
+#endif
+ }
writel(ring_ctrl.w, dev->ce_base + CRYPTO4XX_RING_CTRL);
writel(PPC4XX_DC_3DES_EN, dev->ce_base + CRYPTO4XX_DEVICE_CTRL);
writel(dev->gdr_pa, dev->ce_base + CRYPTO4XX_GATH_RING_BASE);
@@ -95,11 +126,19 @@ static void crypto4xx_hw_init(struct crypto4xx_device *dev)
io_threshold.bf.output_threshold = PPC4XX_OUTPUT_THRESHOLD;
io_threshold.bf.input_threshold = PPC4XX_INPUT_THRESHOLD;
writel(io_threshold.w, dev->ce_base + CRYPTO4XX_IO_THRESHOLD);
+
+#ifdef CONFIG_SEC_PD_OCM
+ writel((dev->pdr_ocm_addr >> 32), dev->ce_base + CRYPTO4XX_PDR_BASE_UADDR);
+ writel((dev->pdr_ocm_addr >> 32), dev->ce_base + CRYPTO4XX_RDR_BASE_UADDR);
+#else
writel(0, dev->ce_base + CRYPTO4XX_PDR_BASE_UADDR);
writel(0, dev->ce_base + CRYPTO4XX_RDR_BASE_UADDR);
+#endif
writel(0, dev->ce_base + CRYPTO4XX_PKT_SRC_UADDR);
writel(0, dev->ce_base + CRYPTO4XX_PKT_DEST_UADDR);
+
writel(0, dev->ce_base + CRYPTO4XX_SA_UADDR);
+
writel(0, dev->ce_base + CRYPTO4XX_GATH_RING_BASE_UADDR);
writel(0, dev->ce_base + CRYPTO4XX_SCAT_RING_BASE_UADDR);
/* un reset pe,sg and pdr */
@@ -112,13 +151,108 @@ static void crypto4xx_hw_init(struct crypto4xx_device *dev)
/*clear all pending interrupt*/
writel(PPC4XX_INTERRUPT_CLR, dev->ce_base + CRYPTO4XX_INT_CLR);
writel(PPC4XX_INT_DESCR_CNT, dev->ce_base + CRYPTO4XX_INT_DESCR_CNT);
- writel(PPC4XX_INT_DESCR_CNT, dev->ce_base + CRYPTO4XX_INT_DESCR_CNT);
- writel(PPC4XX_INT_CFG, dev->ce_base + CRYPTO4XX_INT_CFG);
- writel(PPC4XX_PD_DONE_INT, dev->ce_base + CRYPTO4XX_INT_EN);
+
+ if (dev->core_dev->revb_ver == 1) {
+ writel(PPC4XX_INT_TIMEOUT_CNT_REVB << 10,
+ dev->ce_base + CRYPTO4XX_INT_TIMEOUT_CNT);
+ /* For RevB, 460EX and 460ExR Rev B */
+ writel(PPC4XX_PD_DONE_INT | PPC4XX_TMO_ERR_INT,
+ dev->ce_base + CRYPTO4XX_INT_EN);
+ } else
+ writel(PPC4XX_PD_DONE_INT, dev->ce_base + CRYPTO4XX_INT_EN);
+}
+
+void crypto4xx_dump_regs(struct crypto4xx_core_device* core_dev)
+{
+ u32 reg_dump;
+
+ reg_dump = readl(core_dev->dev->ce_base + CRYPTO4XX_CTRL_STAT);
+ printk("crypto4xx_dump_regs: CRYPTO4XX_PD_CTRL_STAT = 0x%08x\n", reg_dump);
+
+ reg_dump = readl(core_dev->dev->ce_base + CRYPTO4XX_SOURCE);
+ printk("crypto4xx_dump_regs: CRYPTO4XX_Source_REG = 0x%08x\n", reg_dump);
+
+ reg_dump = readl(core_dev->dev->ce_base + CRYPTO4XX_DEST);
+ printk("crypto4xx_dump_regs: CRYPTO4XX_Des_REG= 0x%08x\n", reg_dump);
+
+ reg_dump = readl(core_dev->dev->ce_base + CRYPTO4XX_SA);
+ printk("crypto4xx_dump_regs: CRYPTO4XX_SA_REG= 0x%08x\n", reg_dump);
+
+ reg_dump = readl(core_dev->dev->ce_base + CRYPTO4XX_PE_DMA_CFG);
+ printk("crypto4xx_dump_regs: CRYPTO4XX_PE_DMA_CFG = 0x%08x\n", reg_dump);
+
+ reg_dump = readl(core_dev->dev->ce_base + CRYPTO4XX_RING_SIZE);
+ printk("crypto4xx_dump_regs: CRYPTO4XX_RING_SIZE = 0x%08x\n", reg_dump);
+
+ reg_dump = readl(core_dev->dev->ce_base + CRYPTO4XX_RING_CTRL);
+ printk("crypto4xx_dump_regs: CRYPTO4XX_RING_CTRL = 0x%08x\n", reg_dump);
+
+ reg_dump = readl(core_dev->dev->ce_base + CRYPTO4XX_IO_THRESHOLD);
+ printk("crypto4xx_dump_regs: CRYPTO4XX_IO_THRESHOLD = 0x%08x\n", reg_dump);
+
+ reg_dump = readl(core_dev->dev->ce_base + CRYPTO4XX_PE_DMA_STAT);
+ printk("crypto4xx_dump_regs: CRYPTO4XX_PE_DMA_STAT= 0x%08x\n", reg_dump);
+
+ reg_dump = readl(core_dev->dev->ce_base + CRYPTO4XX_PDR_BASE);
+ printk("crypto4xx_dump_regs: CRYPTO4XX_PDR_BASE = 0x%08x\n", reg_dump);
+
+ reg_dump = readl(core_dev->dev->ce_base + CRYPTO4XX_STATE_PTR);
+ printk("crypto4xx_dump_regs: CRYPTO4XX_STATE_PTR = 0x%08x\n", reg_dump);
+
+ reg_dump = readl(core_dev->dev->ce_base + CRYPTO4XX_SA_CMD_0);
+ printk("crypto4xx_dump_regs: CRYPTO4XX_SA_CMD_0 = 0x%08x\n", reg_dump);
+
+ reg_dump = readl(core_dev->dev->ce_base + CRYPTO4XX_SA_CMD_1);
+ printk("crypto4xx_dump_regs: CRYPTO4XX_SA_CMD_1 = 0x%08x\n", reg_dump);
+
+ reg_dump = readl(core_dev->dev->ce_base + CRYPTO4XX_SPI);
+ printk("crypto4xx_dump_regs: CRYPTO4XX_SPI = 0x%08x\n", reg_dump);
+
+ reg_dump = readl(core_dev->dev->ce_base + CRYPTO4XX_SEQ_NUM0);
+ printk("crypto4xx_dump_regs: CRYPTO4XX_SEQ_NUM_0 = 0x%08x\n", reg_dump);
+
+ reg_dump = readl(core_dev->dev->ce_base + CRYPTO4XX_SEQ_NUM1);
+ printk("crypto4xx_dump_regs: CRYPTO4XX_SEQ_NUM_1 = 0x%08x\n", reg_dump);
+
+ reg_dump = readl(core_dev->dev->ce_base + CRYPTO4XX_STATE_IV + 0);
+ printk("crypto4xx_dump_regs: CRYPTO4XX_STATE_IV + 0 = 0x%08x\n", reg_dump);
+
+ reg_dump = readl(core_dev->dev->ce_base + CRYPTO4XX_STATE_IV + 4);
+ printk("crypto4xx_dump_regs: CRYPTO4XX_STATE_IV + 4 = 0x%08x\n", reg_dump);
+
+ reg_dump = readl(core_dev->dev->ce_base +CRYPTO4XX_STATE_IV + 8);
+ printk("crypto4xx_dump_regs: CRYPTO4XX_STATE_IV + 8 = 0x%08x\n", reg_dump);
+
+ reg_dump = readl(core_dev->dev->ce_base + CRYPTO4XX_STATE_IV + 12);
+ printk("crypto4xx_dump_regs: CRYPTO4XX_STATE_IV + 12 = 0x%08x\n", reg_dump);
+
+ reg_dump = readl(core_dev->dev->ce_base + CRYPTO4XX_STATE_HASH_BYTE_CNT_0);
+ printk("crypto4xx_dump_regs: CRYPTO4XX_STATE_HASH_BYTE_CNT_0 = 0x%08x\n", reg_dump);
+
}
int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size)
{
+#ifdef CONFIG_SEC_SA_OCM
+ ctx->sa_out = ocm_alloc(&ctx->sa_out_ocm_addr, size * 4, 4,
+ OCM_NON_CACHED, "sec_sa_out");
+ ctx->sa_out_dma_addr = (u32)ctx->sa_out_ocm_addr;
+ printk("OCM Allocation done for SA Out %llx\n", (unsigned long long)ctx->sa_out_ocm_addr);
+ if (ctx->sa_out == NULL)
+ return -ENOMEM;
+ ctx->sa_in = ocm_alloc(&ctx->sa_in_ocm_addr, size * 4, 4,
+ OCM_NON_CACHED, "sec_sa_in");
+ if (ctx->sa_in == NULL) {
+ ocm_free(ctx->sa_out);
+ return -ENOMEM;
+ }
+ ctx->sa_in_dma_addr = (u32)ctx->sa_in_ocm_addr;
+ //printk("OCM Allocation done for SA In %llx\n", (unsigned long long)ctx->sa_in_ocm_addr);
+ memset(ctx->sa_in, 0, size * 4);
+ memset(ctx->sa_out, 0, size * 4);
+ ctx->sa_len = size;
+ return 0;
+#endif
ctx->sa_in = dma_alloc_coherent(ctx->dev->core_dev->device, size * 4,
&ctx->sa_in_dma_addr, GFP_ATOMIC);
if (ctx->sa_in == NULL)
@@ -142,6 +276,14 @@ int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size)
void crypto4xx_free_sa(struct crypto4xx_ctx *ctx)
{
+#ifdef CONFIG_SEC_SA_OCM
+ ocm_free(ctx->sa_out);
+ ocm_free(ctx->sa_in);
+ ctx->sa_in_dma_addr = 0;
+ ctx->sa_out_dma_addr = 0;
+ ctx->sa_len = 0;
+ return;
+#endif
if (ctx->sa_in != NULL)
dma_free_coherent(ctx->dev->core_dev->device, ctx->sa_len * 4,
ctx->sa_in, ctx->sa_in_dma_addr);
@@ -151,16 +293,28 @@ void crypto4xx_free_sa(struct crypto4xx_ctx *ctx)
ctx->sa_in_dma_addr = 0;
ctx->sa_out_dma_addr = 0;
+ ctx->sa_in = NULL;
+ ctx->sa_out = NULL;
ctx->sa_len = 0;
}
u32 crypto4xx_alloc_state_record(struct crypto4xx_ctx *ctx)
{
+#ifdef CONFIG_SEC_SA_OCM
+ ctx->state_record = ocm_alloc(&ctx->state_record_ocm_addr,
+ sizeof(struct sa_state_record), 4,
+ OCM_NON_CACHED, "sec_state_record");
+ if (ctx->state_record == NULL)
+ return -ENOMEM;
+ ctx->state_record_dma_addr = (u32)ctx->state_record_ocm_addr;
+#else
ctx->state_record = dma_alloc_coherent(ctx->dev->core_dev->device,
sizeof(struct sa_state_record),
&ctx->state_record_dma_addr, GFP_ATOMIC);
- if (!ctx->state_record_dma_addr)
+
+ if (!ctx->state_record_dma_addr || !ctx->state_record)
return -ENOMEM;
+#endif
memset(ctx->state_record, 0, sizeof(struct sa_state_record));
return 0;
@@ -168,14 +322,82 @@ u32 crypto4xx_alloc_state_record(struct crypto4xx_ctx *ctx)
void crypto4xx_free_state_record(struct crypto4xx_ctx *ctx)
{
+#ifdef CONFIG_SEC_SA_OCM
+ if (ctx->state_record != NULL)
+ ocm_free(ctx->state_record);
+#else
if (ctx->state_record != NULL)
dma_free_coherent(ctx->dev->core_dev->device,
sizeof(struct sa_state_record),
ctx->state_record,
ctx->state_record_dma_addr);
+#endif
+ ctx->state_record = NULL;
ctx->state_record_dma_addr = 0;
}
+u32 crypto4xx_alloc_arc4_state_record(struct crypto4xx_ctx *ctx)
+{
+#ifdef CONFIG_SEC_SA_OCM
+ ctx->arc4_state_record = ocm_alloc(&ctx->arc4_state_ocm_addr,
+ sizeof(struct arc4_sr), 4,
+ OCM_NON_CACHED, "sec_state_arc4_record");
+ if (ctx->arc4_state_record == NULL)
+ return -ENOMEM;
+ ctx->arc4_state_record_dma_addr = (u32)ctx->arc4_state_ocm_addr;
+#else
+ ctx->arc4_state_record = dma_alloc_coherent(ctx->dev->core_dev->device,
+ sizeof(struct arc4_sr),
+ /* &dma_addr */ &ctx->arc4_state_record_dma_addr,
+ GFP_ATOMIC);
+
+ if (!ctx->arc4_state_record_dma_addr)
+ return -ENOMEM;
+#endif
+ memset(ctx->arc4_state_record, 0, sizeof(struct arc4_sr));
+
+ return 0;
+}
+
+void crypto4xx_free_arc4_state_record(struct crypto4xx_ctx *ctx)
+{
+
+ if (ctx->arc4_state_record != NULL) {
+#ifdef CONFIG_SEC_SA_OCM
+ ocm_free(ctx->arc4_state_record);
+
+#else
+ dma_free_coherent(ctx->dev->core_dev->device,
+ sizeof(struct arc4_sr),
+ ctx->arc4_state_record,
+ ctx->arc4_state_record_dma_addr);
+#endif
+ }
+ ctx->arc4_state_record = NULL;
+ ctx->arc4_state_record_dma_addr = 0;
+}
+
+int datalen_check;
+static int crypto4xx_device_read_procmem(char *buffer, char **start, off_t offset,
+ int count, int *eof, void *data)
+{
+ struct crypto4xx_core_device *core_dev = (struct crypto4xx_core_device *)data;
+ int len = 0;
+ u32 ring_ctrl_val;
+
+ ring_ctrl_val = readl(core_dev->dev->ce_base + CRYPTO4XX_RING_CTRL);
+
+ len += sprintf(buffer + len, "ring_ctrl_val = 0x%08x\n", ring_ctrl_val);
+ len += sprintf(buffer + len,
+ "Crypto4xx Controller on AMCC PPC 460EX Canyonlands Board\n");
+ len += sprintf(buffer + len,
+ "%u packets received for packetsize = %d\n", core_dev->dev->pkt_cnt,
+ datalen_check);
+ len += sprintf(buffer + len,
+ "%lld interrupts received\n", core_dev->irq_cnt);
+ *eof = 1;
+ return len;
+}
/**
* alloc memory for the gather ring
* no need to alloc buf for the ring
@@ -185,20 +407,37 @@ static u32 crypto4xx_build_pdr(struct crypto4xx_device *dev)
{
int i;
struct pd_uinfo *pd_uinfo;
+
+#ifdef CONFIG_SEC_PD_OCM
+ int pd_size;
+ pd_size = sizeof(struct ce_pd) * PPC4XX_NUM_PD;
+ dev->pdr = ocm_alloc(&dev->pdr_ocm_addr, pd_size, 4,
+ OCM_NON_CACHED, "sec_pd");
+ dev->pdr_pa = (u32)dev->pdr_ocm_addr;
+ printk(KERN_INFO "Security OCM Allocation done for packet Descriptor: %llx,\n"
+ "Virtual OCM Address: %p, OCM Allocation size: %d\n",
+ (unsigned long long)dev->pdr_ocm_addr, dev->pdr, pd_size);
+ if (dev->pdr == NULL) {
+ printk("PD Allocation failed on OCM\n");
+ return -ENOMEM;
+ }
+#else
dev->pdr = dma_alloc_coherent(dev->core_dev->device,
sizeof(struct ce_pd) * PPC4XX_NUM_PD,
&dev->pdr_pa, GFP_ATOMIC);
if (!dev->pdr)
return -ENOMEM;
-
+#endif
dev->pdr_uinfo = kzalloc(sizeof(struct pd_uinfo) * PPC4XX_NUM_PD,
GFP_KERNEL);
if (!dev->pdr_uinfo) {
+#ifndef CONFIG_SEC_PD_OCM
dma_free_coherent(dev->core_dev->device,
sizeof(struct ce_pd) * PPC4XX_NUM_PD,
dev->pdr,
dev->pdr_pa);
return -ENOMEM;
+#endif
}
memset(dev->pdr, 0, sizeof(struct ce_pd) * PPC4XX_NUM_PD);
dev->shadow_sa_pool = dma_alloc_coherent(dev->core_dev->device,
@@ -233,10 +472,14 @@ static u32 crypto4xx_build_pdr(struct crypto4xx_device *dev)
static void crypto4xx_destroy_pdr(struct crypto4xx_device *dev)
{
+#ifndef CONFIG_SEC_PD_OCM
if (dev->pdr != NULL)
dma_free_coherent(dev->core_dev->device,
sizeof(struct ce_pd) * PPC4XX_NUM_PD,
dev->pdr, dev->pdr_pa);
+#else
+ ocm_free(dev->pdr);
+#endif
if (dev->shadow_sa_pool)
dma_free_coherent(dev->core_dev->device, 256 * PPC4XX_NUM_PD,
dev->shadow_sa_pool, dev->shadow_sa_pool_pa);
@@ -245,6 +488,7 @@ static void crypto4xx_destroy_pdr(struct crypto4xx_device *dev)
sizeof(struct sa_state_record) * PPC4XX_NUM_PD,
dev->shadow_sr_pool, dev->shadow_sr_pool_pa);
+ dev->pkt_cnt = 0;
kfree(dev->pdr_uinfo);
}
@@ -526,7 +770,7 @@ static u32 crypto4xx_fill_one_page(struct crypto4xx_device *dev,
(*idx)++;
return 0;
- }
+ }
}
static void crypto4xx_copy_pkt_to_dst(struct crypto4xx_device *dev,
@@ -589,9 +833,25 @@ static u32 crypto4xx_copy_digest_to_dst(struct pd_uinfo *pd_uinfo,
struct sa_state_record *state_record =
(struct sa_state_record *) pd_uinfo->sr_va;
- if (sa->sa_command_0.bf.hash_alg == SA_HASH_ALG_SHA1) {
- memcpy((void *) pd_uinfo->dest_va, state_record->save_digest,
- SA_HASH_ALG_SHA1_DIGEST_SIZE);
+ switch (sa->sa_command_0.bf.hash_alg) {
+ case SA_HASH_ALG_KASUMI_f9:
+ crypto4xx_memcpy_le((void *)pd_uinfo->dest_va,
+ (u8 *)state_record->save_digest, 8);
+ break;
+ case SA_HASH_ALG_AES_XCBC_MAC_128:
+ crypto4xx_memcpy_le((void *)pd_uinfo->dest_va,
+ (u8 *) state_record->save_digest, 16);
+ break;
+ case SA_HASH_ALG_MD5:
+ crypto4xx_memcpy_le((void *)pd_uinfo->dest_va,
+ (u8 *) state_record->save_digest,
+ SA_HASH_ALG_MD5_DIGEST_SIZE);
+ break;
+ default:
+ memcpy((void *)pd_uinfo->dest_va,
+ state_record->save_digest,
+ crypto4xx_sa_hash_tbl[1][sa->sa_command_0.bf.hash_alg]);
+ break;
}
return 0;
@@ -616,6 +876,57 @@ static void crypto4xx_ret_sg_desc(struct crypto4xx_device *dev,
}
}
+void crypto4xx_append_icv_to_end(struct crypto4xx_device *dev,
+ struct scatterlist *dst,
+ struct sa_state_record *sr,
+ u32 offset,
+ u32 len)
+{
+ struct scatterlist *sg;
+ int i = 0;
+ u32 cp_len;
+ dma_addr_t addr;
+
+ sg = &dst[i];
+ while (len) {
+ while (sg->length < offset) {
+ offset -= sg->length;
+ i++;
+ sg = &sg[i];
+ }
+ /* at here, icv could be in this sg,
+ * or icv could be in the next sg
+ */
+ if (sg->length > offset) {
+ /* icv should be in middle of this sg */
+ addr = dma_map_page(dev->core_dev->device, sg_page(sg),
+ sg->offset,
+ sg->length, DMA_TO_DEVICE);
+ cp_len = (sg->length-offset >= len) ? len :
+ sg->length-offset;
+ len -= cp_len;
+ crypto4xx_memcpy_le((u32 *)(phys_to_virt(addr)
+ + offset),
+ (u8 *)sr->save_digest, cp_len);
+ } else {
+ /* start from begin of next sg*/
+ i++;
+ sg = &sg[i];
+ offset = 0;
+ addr = dma_map_page(dev->core_dev->device, sg_page(sg),
+ sg->offset,
+ sg->length, DMA_FROM_DEVICE);
+ cp_len = (sg->length >= len) ? len : sg->length;
+ len -= cp_len;
+ crypto4xx_memcpy_le((u32 *) (phys_to_virt(addr)
+ + offset),
+ (u8 *) sr->save_digest, cp_len);
+ }
+ i++;
+ sg = &sg[i];
+ }
+}
+
static u32 crypto4xx_ablkcipher_done(struct crypto4xx_device *dev,
struct pd_uinfo *pd_uinfo,
struct ce_pd *pd)
@@ -637,6 +948,11 @@ static u32 crypto4xx_ablkcipher_done(struct crypto4xx_device *dev,
dst->offset, dst->length, DMA_FROM_DEVICE);
}
crypto4xx_ret_sg_desc(dev, pd_uinfo);
+
+ if (pd->pd_ctl.bf.status & 0xff) {
+ printk("ablkcipher return err status = 0x%08x\n",
+ pd->pd_ctl.bf.status & 0xff);
+ }
if (ablk_req->base.complete != NULL)
ablk_req->base.complete(&ablk_req->base, 0);
@@ -644,7 +960,8 @@ static u32 crypto4xx_ablkcipher_done(struct crypto4xx_device *dev,
}
static u32 crypto4xx_ahash_done(struct crypto4xx_device *dev,
- struct pd_uinfo *pd_uinfo)
+ struct pd_uinfo *pd_uinfo,
+ struct ce_pd *pd)
{
struct crypto4xx_ctx *ctx;
struct ahash_request *ahash_req;
@@ -656,24 +973,101 @@ static u32 crypto4xx_ahash_done(struct crypto4xx_device *dev,
crypto_tfm_ctx(ahash_req->base.tfm));
crypto4xx_ret_sg_desc(dev, pd_uinfo);
/* call user provided callback function x */
+ if (pd->pd_ctl.bf.status & 0xff) {
+ printk("ahash return err status = 0x%08x\n",
+ pd->pd_ctl.bf.status & 0xff);
+ }
if (ahash_req->base.complete != NULL)
ahash_req->base.complete(&ahash_req->base, 0);
return 0;
}
-static u32 crypto4xx_pd_done(struct crypto4xx_device *dev, u32 idx)
+static u32 crypto4xx_aead_done(struct crypto4xx_device *dev,
+ struct pd_uinfo *pd_uinfo,
+ struct ce_pd *pd)
+{
+ struct aead_request *aead_req;
+ struct crypto4xx_ctx *ctx;
+ struct scatterlist *dst;
+ dma_addr_t addr;
+ struct crypto_aead *aead;
+
+ aead_req = container_of(pd_uinfo->async_req,
+ struct aead_request, base);
+ aead = crypto_aead_reqtfm(aead_req);
+ ctx = crypto_tfm_ctx(aead_req->base.tfm);
+
+ if (pd_uinfo->using_sd) {
+ crypto4xx_copy_pkt_to_dst(dev, pd, pd_uinfo,
+ pd->pd_ctl_len.bf.pkt_len,
+ aead_req->dst);
+ } else {
+ dst = pd_uinfo->dest_va;
+ addr = dma_map_page(dev->core_dev->device, sg_page(dst),
+ dst->offset,
+ dst->length, DMA_FROM_DEVICE);
+ }
+
+ if (ctx->append_icv != 0) {
+ dst = pd_uinfo->dest_va;
+ crypto4xx_append_icv_to_end(dev, dst,
+ (struct sa_state_record *)
+ pd_uinfo->sr_va,
+ aead_req->cryptlen,
+ crypto_aead_authsize(aead));
+ }
+ crypto4xx_ret_sg_desc(dev, pd_uinfo);
+ /* call user provided callback function x */
+
+ if (pd->pd_ctl.bf.status & 0xff) {
+ if (pd->pd_ctl.bf.status & 1)
+ printk("authentication error\n");
+ if (pd->pd_ctl.bf.status & 2)
+ printk("pad fail error\n");
+ if (pd->pd_ctl.bf.status & 4)
+ printk("seqnum fail\n");
+ if (pd->pd_ctl.bf.status & 8)
+ printk("error _notify\n");
+ printk("aead return err status = 0x%08x\n",
+ pd->pd_ctl.bf.status & 0xff);
+ printk("pd pad_ctl = 0x%08x\n", pd->pd_ctl.bf.pd_pad_ctl);
+ }
+
+#if 0
+ void * saddr;
+ dst = pd_uinfo->dest_va;
+ printk("dumping aead_done length = %d\n", dst->length);
+ saddr = kmap_atomic(sg_page(dst), KM_SOFTIRQ1);
+ print_hex_dump(KERN_CONT, "", DUMP_PREFIX_OFFSET,
+ 16, 1,
+ (void*)saddr+dst->offset, dst->length, false);
+ kunmap_atomic(saddr, KM_SOFTIRQ1);
+#endif
+ if (aead_req->base.complete != NULL)
+ aead_req->base.complete(&aead_req->base, 0);
+ return 0;
+}
+
+u32 crypto4xx_pd_done(struct crypto4xx_device *dev, u32 idx)
{
struct ce_pd *pd;
struct pd_uinfo *pd_uinfo;
pd = dev->pdr + sizeof(struct ce_pd)*idx;
pd_uinfo = dev->pdr_uinfo + sizeof(struct pd_uinfo)*idx;
+
if (crypto_tfm_alg_type(pd_uinfo->async_req->tfm) ==
+ CRYPTO_ALG_TYPE_AEAD)
+ return crypto4xx_aead_done(dev, pd_uinfo, pd);
+ else if (crypto_tfm_alg_type(pd_uinfo->async_req->tfm) ==
CRYPTO_ALG_TYPE_ABLKCIPHER)
return crypto4xx_ablkcipher_done(dev, pd_uinfo, pd);
- else
- return crypto4xx_ahash_done(dev, pd_uinfo);
+ else if (crypto_tfm_alg_type(pd_uinfo->async_req->tfm) ==
+ CRYPTO_ALG_TYPE_AHASH)
+ return crypto4xx_ahash_done(dev, pd_uinfo, pd);
+
+ return 0;
}
/**
@@ -769,18 +1163,23 @@ static u32 get_next_sd(u32 current)
else
return 0;
}
-
+extern int ssl_arc4;
+extern int tls;
+int var = 0;
u32 crypto4xx_build_pd(struct crypto_async_request *req,
struct crypto4xx_ctx *ctx,
struct scatterlist *src,
struct scatterlist *dst,
unsigned int datalen,
+ struct scatterlist *assoc,
+ u32 aad_len,
void *iv, u32 iv_len)
{
struct crypto4xx_device *dev = ctx->dev;
dma_addr_t addr, pd_dma, sd_dma, gd_dma;
struct dynamic_sa_ctl *sa;
struct scatterlist *sg;
+ struct scatterlist *aad;
struct ce_gd *gd;
struct ce_pd *pd;
u32 num_gd, num_sd;
@@ -790,13 +1189,19 @@ u32 crypto4xx_build_pd(struct crypto_async_request *req,
unsigned long flags;
struct pd_uinfo *pd_uinfo = NULL;
unsigned int nbytes = datalen, idx;
- unsigned int ivlen = 0;
u32 gd_idx = 0;
+ unsigned int aadlen = 0;
+ datalen_check = datalen;
/* figure how many gd is needed */
- num_gd = get_sg_count(src, datalen);
- if (num_gd == 1)
- num_gd = 0;
+ if (aad_len) {
+ num_gd = get_sg_count(assoc, aad_len) +
+ get_sg_count(src, datalen);
+ } else {
+ num_gd = get_sg_count(src, datalen);
+ if (num_gd == 1)
+ num_gd = 0;
+ }
/* figure how many sd is needed */
if (sg_is_last(dst) || ctx->is_hash) {
@@ -852,8 +1257,8 @@ u32 crypto4xx_build_pd(struct crypto_async_request *req,
pd_uinfo->num_gd = num_gd;
pd_uinfo->num_sd = num_sd;
+ writel(0, ctx->dev->ce_base + CRYPTO4XX_SA_UADDR);
if (iv_len || ctx->is_hash) {
- ivlen = iv_len;
pd->sa = pd_uinfo->sa_pa;
sa = (struct dynamic_sa_ctl *) pd_uinfo->sa_va;
if (ctx->direction == DIR_INBOUND)
@@ -864,17 +1269,48 @@ u32 crypto4xx_build_pd(struct crypto_async_request *req,
memcpy((void *) sa + ctx->offset_to_sr_ptr,
&pd_uinfo->sr_pa, 4);
- if (iv_len)
- crypto4xx_memcpy_le(pd_uinfo->sr_va, iv, iv_len);
+ if (iv_len) {
+ if (ctx->ctr_aes) {
+ /* First the nonce */
+ memcpy(pd_uinfo->sr_va, ctx->state_record,
+ CTR_RFC3686_NONCE_SIZE);
+ /* Copy the IV that is passed through
+ * each operation
+ */
+ crypto4xx_memcpy_le(pd_uinfo->sr_va +
+ CTR_RFC3686_NONCE_SIZE, iv, iv_len);
+ } else
+ crypto4xx_memcpy_le(pd_uinfo->sr_va,
+ iv, iv_len);
+ }
+ if (ctx->is_gcm || ctx->ctr_aes) {
+ u32 seq = 1;
+ /*For GCM and CTR(AES) algs adding the counter value*/
+ crypto4xx_memcpy_le(pd_uinfo->sr_va + 12,
+ (void *)&seq, 4);
+ }
} else {
if (ctx->direction == DIR_INBOUND) {
+#ifdef CONFIG_SEC_SA_OCM
+ writel((ctx->sa_in_ocm_addr >> 32),
+ ctx->dev->ce_base + CRYPTO4XX_SA_UADDR);
+#endif
pd->sa = ctx->sa_in_dma_addr;
sa = (struct dynamic_sa_ctl *) ctx->sa_in;
- } else {
+ } else {
+#ifdef CONFIG_SEC_SA_OCM
+ writel((ctx->sa_out_ocm_addr >> 32),
+ ctx->dev->ce_base + CRYPTO4XX_SA_UADDR);
+#endif
pd->sa = ctx->sa_out_dma_addr;
sa = (struct dynamic_sa_ctl *) ctx->sa_out;
}
}
+
+ //u32 sa_upper = readl(ctx->dev->ce_base + CRYPTO4XX_SA_UADDR);
+ //printk("Dumping the Upper SA address = 0x%x\n", sa_upper);
+ //printk("Dumping the lower SA address = 0x%x\n", pd->sa);
+
pd->sa_len = ctx->sa_len;
if (num_gd) {
/* get first gd we are going to use */
@@ -886,6 +1322,35 @@ u32 crypto4xx_build_pd(struct crypto_async_request *req,
/* enable gather */
sa->sa_command_0.bf.gather = 1;
idx = 0;
+ if (aad_len) {
+ aadlen = aad_len;
+ aad = assoc;
+ /* walk the sg, and setup gather array for aad*/
+ while (aadlen) {
+ sg = &aad[idx];
+ addr = dma_map_page(dev->core_dev->device,
+ sg_page(sg), sg->offset,
+ sg->length, DMA_TO_DEVICE);
+
+ gd->ptr = addr;
+ gd->ctl_len.len = sg->length;
+ gd->ctl_len.done = 0;
+ gd->ctl_len.ready = 1;
+
+ if (sg->length >= aadlen)
+ break;
+
+ aadlen -= sg->length;
+
+ gd_idx = get_next_gd(gd_idx);
+ gd = crypto4xx_get_gdp(dev, &gd_dma, gd_idx);
+ idx++;
+ }
+ /* prepare gd for src */
+ gd_idx = get_next_gd(gd_idx);
+ gd = crypto4xx_get_gdp(dev, &gd_dma, gd_idx);
+ }
+ idx = 0;
src = &src[0];
/* walk the sg, and setup gather array */
while (nbytes) {
@@ -970,13 +1435,24 @@ u32 crypto4xx_build_pd(struct crypto_async_request *req,
}
}
- sa->sa_command_1.bf.hash_crypto_offset = 0;
+ sa->sa_command_1.bf.hash_crypto_offset = (aad_len >> 2);
pd->pd_ctl.w = ctx->pd_ctl;
- pd->pd_ctl_len.w = 0x00400000 | (ctx->bypass << 24) | datalen;
+ pd->pd_ctl_len.w = 0x00400000 | (ctx->bypass << 24) |
+ (datalen + aad_len);
+ if (ctx->next_hdr)
+ pd->pd_ctl.bf.next_hdr = ctx->next_hdr;
pd_uinfo->state = PD_ENTRY_INUSE;
wmb();
/* write any value to push engine to read a pd */
- writel(1, dev->ce_base + CRYPTO4XX_INT_DESCR_RD);
+ if (dev->core_dev->revb_ver == 1) {
+#ifndef CONFIG_SEC_HW_POLL
+ writel(1, dev->ce_base + CRYPTO4XX_INT_DESCR_RD);
+#endif
+ } else
+ writel(1, dev->ce_base + CRYPTO4XX_INT_DESCR_RD);
+
+
+ dev->pkt_cnt++;
return -EINPROGRESS;
}
@@ -995,6 +1471,8 @@ static int crypto4xx_alg_init(struct crypto_tfm *tfm)
ctx->sa_in_dma_addr = 0;
ctx->sa_out_dma_addr = 0;
ctx->sa_len = 0;
+ ctx->is_gcm = 0;
+ ctx->append_icv = 0;
switch (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
default:
@@ -1004,6 +1482,9 @@ static int crypto4xx_alg_init(struct crypto_tfm *tfm)
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
sizeof(struct crypto4xx_ctx));
break;
+ case CRYPTO_ALG_TYPE_AEAD:
+ tfm->crt_aead.reqsize = sizeof(struct crypto4xx_ctx);
+ break;
}
return 0;
@@ -1015,6 +1496,7 @@ static void crypto4xx_alg_exit(struct crypto_tfm *tfm)
crypto4xx_free_sa(ctx);
crypto4xx_free_state_record(ctx);
+ crypto4xx_free_arc4_state_record(ctx);
}
int crypto4xx_register_alg(struct crypto4xx_device *sec_dev,
@@ -1098,6 +1580,14 @@ static void crypto4xx_bh_tasklet_cb(unsigned long data)
}
}
}
+#define SRAM_OCM_ADDR_ERR 0x0B4
+#define SRAM_OCM_STATUS0 0x0B5
+#define SRAM_OCM_STATUS1 0X0B6
+
+#define PLBA0_ESRL 0x0082
+#define PLBA0_ESRH 0x0083
+#define PLBA0_EARL 0x0084
+#define PLBA0_EARH 0x0085
/**
* Top Half of isr.
@@ -1106,33 +1596,140 @@ static irqreturn_t crypto4xx_ce_interrupt_handler(int irq, void *data)
{
struct device *dev = (struct device *)data;
struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
+ //u32 int_status;
if (core_dev->dev->ce_base == 0)
return 0;
- writel(PPC4XX_INTERRUPT_CLR,
- core_dev->dev->ce_base + CRYPTO4XX_INT_CLR);
+ //int_status = readl(core_dev->dev->ce_base + CRYPTO4XX_INT_UNMASK_STAT);
+ //printk("Interrupt status = 0x%08x\n", int_status);
+
+ /* For RevB, 460EX and 460ExR Rev B */
+ if (core_dev->revb_ver == 1) {
+ writel(PPC4XX_INTERRUPT_CLR_REVB,
+ core_dev->dev->ce_base + CRYPTO4XX_INT_CLR);
+ } else {
+ writel(PPC4XX_INTERRUPT_CLR,
+ core_dev->dev->ce_base + CRYPTO4XX_INT_CLR);
+ }
+
+ core_dev->irq_cnt++;
tasklet_schedule(&core_dev->tasklet);
return IRQ_HANDLED;
}
+
/**
* Supported Crypto Algorithms
*/
struct crypto4xx_alg_common crypto4xx_alg[] = {
+ /* Crypto DES ECB, CBC, modes */
+#if 1
+ { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = {
+ .cra_name = "cbc(des)",
+ .cra_driver_name = "ppc4xx-cbc-des",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = crypto4xx_setkey_3des_cbc,
+ .encrypt = crypto4xx_encrypt,
+ .decrypt = crypto4xx_decrypt,
+ }
+ }
+ }},
+ { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = {
+ .cra_name = "ecb(des)",
+ .cra_driver_name = "ppc4xx-ecb-des",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .setkey = crypto4xx_setkey_3des_ecb,
+ .encrypt = crypto4xx_encrypt,
+ .decrypt = crypto4xx_decrypt,
+ }
+ }
+ }},
+
+ /* Crypto 3DES ECB, CBC, CFB, and OFB modes */
+ { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = {
+ .cra_name = "cbc(des3_ede)",
+ .cra_driver_name = "ppc4xx-cbc-3des",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .setkey = crypto4xx_setkey_3des_cbc,
+ .encrypt = crypto4xx_encrypt,
+ .decrypt = crypto4xx_decrypt,
+ }
+ }
+ }},
+ { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = {
+ .cra_name = "ecb(des3_ede)",
+ .cra_driver_name = "ppc4xx-ecb-3des",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .setkey = crypto4xx_setkey_3des_ecb,
+ .encrypt = crypto4xx_encrypt,
+ .decrypt = crypto4xx_decrypt,
+ }
+ }
+ }},
/* Crypto AES modes */
{ .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = {
- .cra_name = "cbc(aes)",
- .cra_driver_name = "cbc-aes-ppc4xx",
- .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct crypto4xx_ctx),
- .cra_type = &crypto_ablkcipher_type,
- .cra_init = crypto4xx_alg_init,
- .cra_exit = crypto4xx_alg_exit,
- .cra_module = THIS_MODULE,
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "cbc-aes-ppc4xx",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
.cra_u = {
.ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
@@ -1144,8 +1741,2184 @@ struct crypto4xx_alg_common crypto4xx_alg[] = {
}
}
}},
-};
+ { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = {
+ .cra_name = "ofb(aes)",
+ .cra_driver_name = "ppc4xx-ofb-aes",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = crypto4xx_setkey_aes_ofb,
+ .encrypt = crypto4xx_encrypt,
+ .decrypt = crypto4xx_decrypt,
+ }
+ }
+ }},
+ { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = {
+ .cra_name = "cfb(aes)",
+ .cra_driver_name = "ppc4xx-cfb-aes",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = crypto4xx_setkey_aes_cfb,
+ .encrypt = crypto4xx_encrypt,
+ .decrypt = crypto4xx_decrypt,
+ }
+ }
+ }},
+ /* Crypto AES ECB, CBC, CTR, GCM, CCM, and GMAC modes */
+ { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = {
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "ppc4xx-ecb-aes",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = crypto4xx_setkey_aes_ecb,
+ .encrypt = crypto4xx_encrypt,
+ .decrypt = crypto4xx_decrypt,
+ }
+ }
+ }},
+ { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = {
+ .cra_name = "rfc3686(ctr(aes))",
+ .cra_driver_name = "ppc4xx-ctr-aes",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = CTR_RFC3686_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = CTR_RFC3686_BLOCK_SIZE,
+ .setkey = crypto4xx_setkey_aes_ctr,
+ .encrypt = crypto4xx_encrypt_ctr,
+ .decrypt = crypto4xx_decrypt_ctr,
+ }
+ }
+ }},
+
+ /* AEAD Algorithms */
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "gcm(aes)",
+ .cra_driver_name = "ppc4xx-gcm-aes",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .aead = {
+ .maxauthsize = 16,
+ .ivsize = 12,
+ .setkey = crypto4xx_setkey_aes_gcm,
+ .setauthsize = crypto4xx_setauthsize_aes,
+ .encrypt = crypto4xx_encrypt_aes_gcm,
+ .decrypt = crypto4xx_decrypt_aes_gcm,
+ .givencrypt = crypto4xx_givencrypt_aes_gcm,
+ .givdecrypt = crypto4xx_givdecrypt_aes_gcm,
+ }
+ }
+ }},
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "ccm(aes)",
+ .cra_driver_name = "ppc4xx-ccm-aes",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .aead = {
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = 16,
+ .setkey = crypto4xx_setkey_aes_ccm,
+ .setauthsize = crypto4xx_setauthsize_aes,
+ .encrypt = crypto4xx_encrypt_aes_ccm,
+ .decrypt = crypto4xx_decrypt_aes_ccm,
+ .givencrypt = crypto4xx_givencrypt_aes_ccm,
+ .givdecrypt = crypto4xx_givdecrypt_aes_ccm,
+ }
+ }
+ }},
+
+ /* Hash MD5 */
+ { .type = CRYPTO_ALG_TYPE_AHASH, .u.hash = {
+ .init = crypto4xx_hash_init,
+ .update = crypto4xx_hash_update,
+ .final = crypto4xx_hash_final,
+ .digest = crypto4xx_hash_digest,
+ .halg.digestsize = SA_HASH_ALG_MD5_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "md5",
+ .cra_driver_name = "ppc4xx-md5",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 64,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ahash_type,
+ .cra_init = crypto4xx_md5_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ }
+ }},
+#endif
+#if 1
+ /* Hash MD5-HMAC */
+ { .type = CRYPTO_ALG_TYPE_AHASH, .u.hash = {
+ .init = crypto4xx_hash_init,
+ .update = crypto4xx_hash_update,
+ .final = crypto4xx_hash_final,
+ .digest = crypto4xx_hash_digest,
+ .setkey = crypto4xx_md5_hmac_setkey,
+ .halg.digestsize = SA_HASH_ALG_MD5_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "hmac(md5)",
+ .cra_driver_name = "ppc4xx-hmac-md5",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 64,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ahash_type,
+ .cra_module = THIS_MODULE,
+ }
+ }},
+
+ /* Hash SHA1, SHA2 and HMAC */
+ { .type = CRYPTO_ALG_TYPE_AHASH, .u.hash = {
+ .init = crypto4xx_hash_init,
+ .update = crypto4xx_hash_update,
+ .final = crypto4xx_hash_final,
+ .digest = crypto4xx_hash_digest,
+ .halg.digestsize = SHA1_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "sha1",
+ .cra_driver_name = "ppc4xx-sha1",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ahash_type,
+ .cra_init = crypto4xx_sha1_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ }
+ }},
+#endif
+ { .type = CRYPTO_ALG_TYPE_AHASH, .u.hash = {
+ .init = crypto4xx_hash_init,
+ .update = crypto4xx_hash_update,
+ .final = crypto4xx_hash_final,
+ .digest = crypto4xx_hash_digest,
+ .setkey = crypto4xx_sha1_hmac_setkey,
+ .halg.digestsize = SHA1_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "hmac(sha1)",
+ .cra_driver_name = "ppc4xx-hmac-sha1",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ahash_type,
+ .cra_module = THIS_MODULE,
+ }
+ }},
+
+#if 1
+ { .type = CRYPTO_ALG_TYPE_AHASH, .u.hash = {
+ .init = crypto4xx_hash_init,
+ .update = crypto4xx_hash_update,
+ .final = crypto4xx_hash_final,
+ .digest = crypto4xx_hash_digest,
+ .halg.digestsize = SHA224_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "sha224",
+ .cra_driver_name = "ppc4xx-sha224",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA224_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ahash_type,
+ .cra_init = crypto4xx_sha2_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ }
+ }},
+
+ { .type = CRYPTO_ALG_TYPE_AHASH, .u.hash = {
+ .init = crypto4xx_hash_init,
+ .update = crypto4xx_hash_update,
+ .final = crypto4xx_hash_final,
+ .digest = crypto4xx_hash_digest,
+ .setkey = crypto4xx_sha2_hmac_setkey,
+ .halg.digestsize = SHA224_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "hmac(sha224)",
+ .cra_driver_name = "ppc4xx-hmac-sha224",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA224_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ahash_type,
+ .cra_module = THIS_MODULE,
+ }
+ }},
+
+ { .type = CRYPTO_ALG_TYPE_AHASH, .u.hash = {
+ .init = crypto4xx_hash_init,
+ .update = crypto4xx_hash_update,
+ .final = crypto4xx_hash_final,
+ .digest = crypto4xx_hash_digest,
+ .halg.digestsize = SHA256_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "sha256",
+ .cra_driver_name = "ppc4xx-sha256",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ahash_type,
+ .cra_init = crypto4xx_sha2_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ }
+ }},
+ { .type = CRYPTO_ALG_TYPE_AHASH, .u.hash = {
+ .init = crypto4xx_hash_init,
+ .update = crypto4xx_hash_update,
+ .final = crypto4xx_hash_final,
+ .digest = crypto4xx_hash_digest,
+ .setkey = crypto4xx_sha2_hmac_setkey,
+ .halg.digestsize = SHA256_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "hmac(sha256)",
+ .cra_driver_name = "ppc4xx-hmac-sha256",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ahash_type,
+ .cra_module = THIS_MODULE,
+ }
+ }},
+
+ { .type = CRYPTO_ALG_TYPE_AHASH, .u.hash = {
+ .init = crypto4xx_hash_init,
+ .update = crypto4xx_hash_update,
+ .final = crypto4xx_hash_final,
+ .digest = crypto4xx_hash_digest,
+ .halg.digestsize = SHA384_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "sha384",
+ .cra_driver_name = "ppc4xx-sha384",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA384_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ahash_type,
+ .cra_init = crypto4xx_sha2_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ }
+ }},
+
+ { .type = CRYPTO_ALG_TYPE_AHASH, .u.hash = {
+ .init = crypto4xx_hash_init,
+ .update = crypto4xx_hash_update,
+ .final = crypto4xx_hash_final,
+ .digest = crypto4xx_hash_digest,
+ .setkey = crypto4xx_sha2_hmac_setkey,
+ .halg.digestsize = SHA384_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "hmac(sha384)",
+ .cra_driver_name = "ppc4xx-hmac-sha384",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA384_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ahash_type,
+ .cra_module = THIS_MODULE,
+ }
+ }},
+
+ { .type = CRYPTO_ALG_TYPE_AHASH, .u.hash = {
+ .init = crypto4xx_hash_init,
+ .update = crypto4xx_hash_update,
+ .final = crypto4xx_hash_final,
+ .digest = crypto4xx_hash_digest,
+ .halg.digestsize = SHA512_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "sha512",
+ .cra_driver_name = "ppc4xx-sha512",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA512_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ahash_type,
+ .cra_init = crypto4xx_sha2_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ }
+ }},
+#endif
+#if 1
+ { .type = CRYPTO_ALG_TYPE_AHASH, .u.hash = {
+ .init = crypto4xx_hash_init,
+ .update = crypto4xx_hash_update,
+ .final = crypto4xx_hash_final,
+ .digest = crypto4xx_hash_digest,
+ .setkey = crypto4xx_sha2_hmac_setkey,
+ .halg.digestsize = SHA512_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "hmac(sha512)",
+ .cra_driver_name = "ppc4xx-hmac-sha512",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA512_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ahash_type,
+ .cra_module = THIS_MODULE,
+ }
+ }},
+ /* Hash XCBC, GHASH, and Kasumi F9 */
+ { .type = CRYPTO_ALG_TYPE_AHASH, .u.hash = {
+ .init = crypto4xx_hash_init,
+ .update = crypto4xx_hash_update,
+ .final = crypto4xx_hash_final,
+ .digest = crypto4xx_hash_digest,
+ .setkey = crypto4xx_xcbc_setkey,
+ .halg.digestsize = 16,
+ .halg.base = {
+ .cra_name = "xcbc(aes)",
+ .cra_driver_name = "ppc4xx-xcbc-aes",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ahash_type,
+ .cra_module = THIS_MODULE,
+ }
+ }},
+
+ /* Crypto Kasumi and Kasumi F8 */
+ { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = {
+ .cra_name = "kasumi",
+ .cra_driver_name = "ppc4xx-kasumi",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = KASUMI_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = KASUMI_KEY_SIZE,
+ .max_keysize = KASUMI_KEY_SIZE,
+ .ivsize = KASUMI_BLOCK_SIZE,
+ .setkey = crypto4xx_setkey_kasumi_p,
+ .encrypt = crypto4xx_encrypt,
+ .decrypt = crypto4xx_decrypt,
+ }
+ }
+ }},
+ { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = {
+ .cra_name = "f8(kasumi)",
+ .cra_driver_name = "ppc4xx-f8-kasumi",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = KASUMI_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = KASUMI_KEY_SIZE,
+ .max_keysize = KASUMI_KEY_SIZE,
+ .ivsize = KASUMI_BLOCK_SIZE,
+ .setkey = crypto4xx_setkey_kasumi_f8,
+ .encrypt = crypto4xx_encrypt_kasumi_f8,
+ .decrypt = crypto4xx_decrypt_kasumi_f8,
+ }
+ }
+ }},
+ { .type = CRYPTO_ALG_TYPE_AHASH, .u.hash = {
+ .init = crypto4xx_hash_init,
+ .update = crypto4xx_hash_update,
+ .final = crypto4xx_hash_final,
+ .digest = crypto4xx_kasumi_f9_digest,
+ .setkey = crypto4xx_kasumi_f9_setkey,
+ .halg.digestsize = 8,
+ .halg.base = {
+ .cra_name = "f9(kasumi)",
+ .cra_driver_name = "ppc4xx-f9-kasumi",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = KASUMI_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ahash_type,
+ .cra_module = THIS_MODULE,
+ }
+ }},
+#endif
+#if 1
+ /* Crypto ARC4 - stateless */
+ { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = {
+ .cra_name = "ecb(arc4)",
+ .cra_driver_name = "ppc4xx-arc4",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = 1,
+ .max_keysize = 16,
+ .setkey = crypto4xx_setkey_arc4,
+ .encrypt = crypto4xx_arc4_encrypt,
+ .decrypt = crypto4xx_arc4_decrypt,
+ }
+ }
+ }},
+ /* Crypto ARC4 - statefull */
+ { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = {
+ .cra_name = "cbc(arc4)",
+ .cra_driver_name = "ppc4xx-arc4",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = 1,
+ .max_keysize = 16,
+ .setkey = crypto4xx_setkey_arc4,
+ .encrypt = crypto4xx_arc4_encrypt,
+ .decrypt = crypto4xx_arc4_decrypt,
+ }
+ }
+ }},
+#endif
+#if 1
+ /* IPSec combined hash and crypto Algorithms */
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "tunnel(esp(authenc(hmac(md5),cbc(aes))))",
+ .cra_driver_name = "tunnel-esp-cbc-aes-md5",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY_IPSEC,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 16, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_u ={
+ .aead = {
+ .ivsize = 16, /* IV size is 16 bytes */
+ .maxauthsize = 16, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_tunnel_esp_cbc_aes_md5,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_esp_cbc,
+ .decrypt = crypto4xx_decrypt_esp_cbc,
+ .givencrypt = crypto4xx_givencrypt_esp_cbc,
+ }
+ }
+ }},
+ /* IPSec combined hash and crypto Algorithms */
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "tunnel(esp(authenc(hmac(sha1),cbc(aes))))",
+ .cra_driver_name = "tunnel-esp-cbc-aes-sha1",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY_IPSEC,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 16, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_u = {
+ .aead = {
+ .ivsize = 16, /* IV size is 16 bytes */
+ .maxauthsize = 20, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_tunnel_esp_cbc_aes_sha1,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_esp_cbc,
+ .decrypt = crypto4xx_decrypt_esp_cbc,
+ .givencrypt = crypto4xx_givencrypt_esp_cbc,
+ }
+ }
+ }},
+#endif
+#if 1
+ /* IPSec combined hash and crypto Algorithms */
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "tunnel(esp(authenc(hmac(sha224),cbc(aes))))",
+ .cra_driver_name = "tunnel-esp-cbc-aes-sha224",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY_IPSEC,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 16, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_u = {
+ .aead = {
+ .ivsize = 16, /* IV size is 16 bytes */
+ .maxauthsize = 28, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_tunnel_esp_cbc_aes_sha224,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_esp_cbc,
+ .decrypt = crypto4xx_decrypt_esp_cbc,
+ .givencrypt = crypto4xx_givencrypt_esp_cbc,
+ }
+ }
+ }},
+#endif
+#if 1
+ /* IPSec combined hash and crypto Algorithms */
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "tunnel(esp(authenc(hmac(sha256),cbc(aes))))",
+ .cra_driver_name = "tunnel-esp-cbc-aes-sha256",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY_IPSEC,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 16, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_u = {
+ .aead = {
+ .ivsize = 16, /* IV size is 16 bytes */
+ .maxauthsize = 32, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_tunnel_esp_cbc_aes_sha256,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_esp_cbc,
+ .decrypt = crypto4xx_decrypt_esp_cbc,
+ .givencrypt = crypto4xx_givencrypt_esp_cbc,
+ }
+ }
+ }},
+
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "tunnel(esp(authenc(hmac(sha384),cbc(aes))))",
+ .cra_driver_name = "tunnel-esp-cbc-aes-sha384",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY_IPSEC,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 16, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_u = {
+ .aead = {
+ .ivsize = 16, /* IV size is 16 bytes */
+ .maxauthsize = 48, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_tunnel_esp_cbc_aes_sha384,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_esp_cbc,
+ .decrypt = crypto4xx_decrypt_esp_cbc,
+ .givencrypt = crypto4xx_givencrypt_esp_cbc,
+ }
+ }
+ }},
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "tunnel(esp(authenc(hmac(sha512),cbc(aes))))",
+ .cra_driver_name = "tunnel-esp-cbc-aes-sha512",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY_IPSEC,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 16, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_u = {
+ .aead = {
+ .ivsize = 16, /* IV size is 16 bytes */
+ .maxauthsize = 64, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_tunnel_esp_cbc_aes_sha512,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_esp_cbc,
+ .decrypt = crypto4xx_decrypt_esp_cbc,
+ .givencrypt = crypto4xx_givencrypt_esp_cbc,
+ }
+ }
+ }},
+ /* IPSec combined hash and crypto Algorithms */
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "tunnel(esp(authenc(hmac(md5),cbc(des))))",
+ .cra_driver_name = "tunnel-esp-cbc-des-md5",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY_IPSEC,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 8, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_u = {
+ .aead = {
+ .ivsize = 8, /* IV size is 16 bytes */
+ .maxauthsize = 16, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_tunnel_esp_cbc_des_md5,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_esp_cbc,
+ .decrypt = crypto4xx_decrypt_esp_cbc,
+ .givencrypt = crypto4xx_givencrypt_esp_cbc,
+ }
+ }
+ }},
+ /* IPSec combined hash and crypto Algorithms */
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "tunnel(esp(authenc(hmac(sha1),cbc(des))))",
+ .cra_driver_name = "tunnel-esp-cbc-des-sha1",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY_IPSEC,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 8, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_u = {
+ .aead = {
+ .ivsize = 8, /* IV size is 16 bytes */
+ .maxauthsize = 20, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_tunnel_esp_cbc_des_sha1,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_esp_cbc,
+ .decrypt = crypto4xx_decrypt_esp_cbc,
+ .givencrypt = crypto4xx_givencrypt_esp_cbc,
+ }
+ }
+ }},
+ /* IPSec combined hash and crypto Algorithms */
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "tunnel(esp(authenc(hmac(sha224),cbc(des))))",
+ .cra_driver_name = "tunnel-esp-cbc-des-sha224",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY_IPSEC,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 8, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_u = {
+ .aead = {
+ .ivsize = 8, /* IV size is 16 bytes */
+ .maxauthsize = 28, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_tunnel_esp_cbc_des_sha224,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_esp_cbc,
+ .decrypt = crypto4xx_decrypt_esp_cbc,
+ .givencrypt = crypto4xx_givencrypt_esp_cbc,
+ }
+ }
+ }},
+ /* IPSec combined hash and crypto Algorithms */
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "tunnel(esp(authenc(hmac(sha256),cbc(des))))",
+ .cra_driver_name = "tunnel-esp-cbc-des-sha256",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY_IPSEC,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 8, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_u = {
+ .aead = {
+ .ivsize = 8, /* IV size is 16 bytes */
+ .maxauthsize = 32, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_tunnel_esp_cbc_des_sha256,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_esp_cbc,
+ .decrypt = crypto4xx_decrypt_esp_cbc,
+ .givencrypt = crypto4xx_givencrypt_esp_cbc,
+ }
+ }
+ }},
+ /* IPSec combined hash and crypto Algorithms */
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "tunnel(esp(authenc(hmac(sha384),cbc(des))))",
+ .cra_driver_name = "tunnel-esp-cbc-des-sha384",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY_IPSEC,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 8, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_u = {
+ .aead = {
+ .ivsize = 8, /* IV size is 16 bytes */
+ .maxauthsize = 48, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_tunnel_esp_cbc_des_sha384,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_esp_cbc,
+ .decrypt = crypto4xx_decrypt_esp_cbc,
+ .givencrypt = crypto4xx_givencrypt_esp_cbc,
+ }
+ }
+ }},
+ /* IPSec combined hash and crypto Algorithms */
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "tunnel(esp(authenc(hmac(sha512),cbc(des))))",
+ .cra_driver_name = "tunnel-esp-cbc-des-sha512",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY_IPSEC,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 8, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_u = {
+ .aead = {
+ .ivsize = 8, /* IV size is 16 bytes */
+ .maxauthsize = 64, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_tunnel_esp_cbc_des_sha512,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_esp_cbc,
+ .decrypt = crypto4xx_decrypt_esp_cbc,
+ .givencrypt = crypto4xx_givencrypt_esp_cbc,
+ }
+ }
+ }},
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "tunnel(esp(authenc(hmac(md5),cbc(des3_ede))))",
+ .cra_driver_name = "tunnel-esp-cbc-3des-md5",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY_IPSEC,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 8, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_u = {
+ .aead = {
+ .ivsize = 8, /* IV size is 16 bytes */
+ .maxauthsize = 16, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_tunnel_esp_cbc_3des_md5,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_esp_cbc,
+ .decrypt = crypto4xx_decrypt_esp_cbc,
+ .givencrypt = crypto4xx_givencrypt_esp_cbc,
+ }
+ }
+ }},
+ /* IPSec combined hash and crypto Algorithms */
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "tunnel(esp(authenc(hmac(md5),cbc(des3_ede))))",
+ .cra_driver_name = "tunnel-esp-cbc-3des-md5",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY_IPSEC,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 8, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_u = {
+ .aead = {
+ .ivsize = 8, /* IV size is 16 bytes */
+ .maxauthsize = 16, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_tunnel_esp_cbc_3des_md5,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_esp_cbc,
+ .decrypt = crypto4xx_decrypt_esp_cbc,
+ .givencrypt = crypto4xx_givencrypt_esp_cbc,
+ }
+ }
+ }},
+ /* IPSec combined hash and crypto Algorithms */
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "tunnel(esp(authenc(hmac(sha1),cbc(des3_ede))))",
+ .cra_driver_name = "tunnel-esp-cbc-3des-sha1",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY_IPSEC,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 8, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_u = {
+ .aead = {
+ .ivsize = 8, /* IV size is 16 bytes */
+ .maxauthsize = 20, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_tunnel_esp_cbc_3des_sha1,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_esp_cbc,
+ .decrypt = crypto4xx_decrypt_esp_cbc,
+ .givencrypt = crypto4xx_givencrypt_esp_cbc,
+ }
+ }
+ }},
+ /* IPSec combined hash and crypto Algorithms */
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "tunnel(esp(authenc(hmac(sha224),cbc(des3_ede))))",
+ .cra_driver_name = "tunnel-esp-cbc-3des-sha224",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY_IPSEC,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 8, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_u = {
+ .aead = {
+ .ivsize = 8, /* IV size is 16 bytes */
+ .maxauthsize = 28, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_tunnel_esp_cbc_3des_sha224,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_esp_cbc,
+ .decrypt = crypto4xx_decrypt_esp_cbc,
+ .givencrypt = crypto4xx_givencrypt_esp_cbc,
+ }
+ }
+ }},
+ /* IPSec combined hash and crypto Algorithms */
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "tunnel(esp(authenc(hmac(sha256),cbc(des3_ede))))",
+ .cra_driver_name = "tunnel-esp-cbc-3des-sha256",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY_IPSEC,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 8,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_u = {
+ .aead = {
+ .ivsize = 8, /* IV size is 16 bytes */
+ .maxauthsize = 32, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_tunnel_esp_cbc_3des_sha256,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_esp_cbc,
+ .decrypt = crypto4xx_decrypt_esp_cbc,
+ .givencrypt = crypto4xx_givencrypt_esp_cbc,
+ }
+ }
+ }},
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "tunnel(esp(authenc(hmac(sha384),cbc(des3_ede))))",
+ .cra_driver_name = "tunnel-esp-cbc-3des-sha384",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY_IPSEC,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 8,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_u = {
+ .aead = {
+ .ivsize = 8, /* IV size is 16 bytes */
+ .maxauthsize = 48, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_tunnel_esp_cbc_3des_sha384,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_esp_cbc,
+ .decrypt = crypto4xx_decrypt_esp_cbc,
+ .givencrypt = crypto4xx_givencrypt_esp_cbc,
+ }
+ }
+ }},
+ /* IPSec combined hash and crypto Algorithms */
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "tunnel(esp(authenc(hmac(sha512),cbc(des3_ede))))",
+ .cra_driver_name = "tunnel-esp-cbc-3des-sha512",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY_IPSEC,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 8,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_u = {
+ .aead = {
+ .ivsize = 8, /* IV size is 8 bytes */
+ .maxauthsize = 64, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_tunnel_esp_cbc_3des_sha512,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_esp_cbc,
+ .decrypt = crypto4xx_decrypt_esp_cbc,
+ .givencrypt = crypto4xx_givencrypt_esp_cbc,
+ }
+ }
+ }},
+
+ /** IPSec transport combined hash and crypto Algorithms */
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "transport(esp(authenc(hmac(md5),cbc(aes))))",
+ .cra_driver_name = "transport-esp-cbc-aes-md5",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY_IPSEC,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 16, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_u = {
+ .aead = {
+ .ivsize = 16, /* IV size is 16 bytes */
+ .maxauthsize = 16, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_transport_esp_cbc_aes_md5,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_transport_esp_cbc,
+ .decrypt = crypto4xx_decrypt_transport_esp_cbc,
+ .givencrypt = crypto4xx_givencrypt_transport_esp_cbc,
+ }
+ }
+ }},
+
+
+ /* IPSec combined hash and crypto Algorithms */
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "transport(esp(authenc(hmac(sha1),cbc(aes))))",
+ .cra_driver_name = "transport-esp-cbc-aes-sha1",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY_IPSEC,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 16, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_u = {
+ .aead = {
+ .ivsize = 16, /* IV size is 16 bytes */
+ .maxauthsize = 20, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_transport_esp_cbc_aes_sha1,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_transport_esp_cbc,
+ .decrypt = crypto4xx_decrypt_transport_esp_cbc,
+ .givencrypt = crypto4xx_givencrypt_transport_esp_cbc,
+ }
+ }
+ }},
+
+ /* IPSec combined hash and crypto Algorithms */
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "transport(esp(authenc(hmac(sha224),cbc(aes))))",
+ .cra_driver_name = "transport-esp-cbc-aes-sha224",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY_IPSEC,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 16, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_u = {
+ .aead = {
+ .ivsize = 16, /* IV size is 16 bytes */
+ .maxauthsize = 28, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_transport_esp_cbc_aes_sha224,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_transport_esp_cbc,
+ .decrypt = crypto4xx_decrypt_transport_esp_cbc,
+ .givencrypt = crypto4xx_givencrypt_transport_esp_cbc,
+ }
+ }
+ }},
+
+ /* IPSec combined hash and crypto Algorithms */
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "transport(esp(authenc(hmac(sha256),cbc(aes))))",
+ .cra_driver_name = "transport-esp-cbc-aes-sha224",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY_IPSEC,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 16, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_u = {
+ .aead = {
+ .ivsize = 16, /* IV size is 16 bytes */
+ .maxauthsize = 32, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_transport_esp_cbc_aes_sha256,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_transport_esp_cbc,
+ .decrypt = crypto4xx_decrypt_transport_esp_cbc,
+ .givencrypt = crypto4xx_givencrypt_transport_esp_cbc,
+ }
+ }
+ }},
+
+ /* IPSec combined hash and crypto Algorithms */
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "transport(esp(authenc(hmac(sha384),cbc(aes))))",
+ .cra_driver_name = "transport-esp-cbc-aes-sha384",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY_IPSEC,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 16, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_u = {
+ .aead = {
+ .ivsize = 16, /* IV size is 16 bytes */
+ .maxauthsize = 48, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_transport_esp_cbc_aes_sha384,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_transport_esp_cbc,
+ .decrypt = crypto4xx_decrypt_transport_esp_cbc,
+ .givencrypt = crypto4xx_givencrypt_transport_esp_cbc,
+ }
+ }
+ }},
+
+
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "transport(esp(authenc(hmac(sha512),cbc(aes))))",
+ .cra_driver_name = "transport-esp-cbc-aes-sha512",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY_IPSEC,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 16, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_u = {
+ .aead = {
+ .ivsize = 16, /* IV size is 16 bytes */
+ .maxauthsize = 64, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_transport_esp_cbc_aes_sha512,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_transport_esp_cbc,
+ .decrypt = crypto4xx_decrypt_transport_esp_cbc,
+ .givencrypt = crypto4xx_givencrypt_transport_esp_cbc,
+ }
+ }
+ }},
+
+ /* IPSec transport combined hash and crypto Algorithms */
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "transport(esp(authenc(hmac(md5),cbc(des))))",
+ .cra_driver_name = "transport-esp-cbc-des-md5",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY_IPSEC,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 8, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_u = {
+ .aead = {
+ .ivsize = 8, /* IV size is 16 bytes */
+ .maxauthsize = 16, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_transport_esp_cbc_des_md5,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_transport_esp_cbc,
+ .decrypt = crypto4xx_decrypt_transport_esp_cbc,
+ .givencrypt = crypto4xx_givencrypt_transport_esp_cbc,
+ }
+ }
+ }},
+
+
+ /* IPSec combined hash and crypto Algorithms */
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "transport(esp(authenc(hmac(sha1),cbc(des))))",
+ .cra_driver_name = "transport-esp-cbc-des-sha1",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY_IPSEC,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 8,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_u = {
+ .aead = {
+
+ .ivsize = 8, /* IV size is 16 bytes */
+ .maxauthsize = 20, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_transport_esp_cbc_des_sha1,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_transport_esp_cbc,
+ .decrypt = crypto4xx_decrypt_transport_esp_cbc,
+ .givencrypt = crypto4xx_givencrypt_transport_esp_cbc,
+ }
+ }
+ }},
+
+ /* IPSec combined hash and crypto Algorithms */
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "transport(esp(authenc(hmac(sha224),cbc(des))))",
+ .cra_driver_name = "transport-esp-cbc-des-sha224",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY_IPSEC,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 8, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_u = {
+ .aead = {
+ .ivsize = 8, /* IV size is 16 bytes */
+ .maxauthsize = 28, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_transport_esp_cbc_des_sha224,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_transport_esp_cbc,
+ .decrypt = crypto4xx_decrypt_transport_esp_cbc,
+ .givencrypt = crypto4xx_givencrypt_transport_esp_cbc,
+ }
+ }
+ }},
+
+ /* IPSec combined hash and crypto Algorithms */
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "transport(esp(authenc(hmac(sha256),cbc(des))))",
+ .cra_driver_name = "transport-esp-cbc-des-sha256",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY_IPSEC,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 8, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_u = {
+ .aead = {
+ .ivsize = 8, /* IV size is 16 bytes */
+ .maxauthsize = 32, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_transport_esp_cbc_des_sha256,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_transport_esp_cbc,
+ .decrypt = crypto4xx_decrypt_transport_esp_cbc,
+ .givencrypt = crypto4xx_givencrypt_transport_esp_cbc,
+ }
+ }
+ }},
+
+
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "transport(esp(authenc(hmac(sha384),cbc(des))))",
+ .cra_driver_name = "transport-esp-cbc-des-sha384",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY_IPSEC,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 8, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_u = {
+ .aead = {
+ .ivsize = 8, /* IV size is 16 bytes */
+ .maxauthsize = 48, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_transport_esp_cbc_des_sha384,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_transport_esp_cbc,
+ .decrypt = crypto4xx_decrypt_transport_esp_cbc,
+ .givencrypt = crypto4xx_givencrypt_transport_esp_cbc,
+ }
+ }
+ }},
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "transport(esp(authenc(hmac(sha512),cbc(des))))",
+ .cra_driver_name = "transport-esp-cbc-des-sha512",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY_IPSEC,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 8, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_u = {
+ .aead = {
+ .ivsize = 8, /* IV size is 16 bytes */
+ .maxauthsize = 64, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_transport_esp_cbc_des_sha512,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_transport_esp_cbc,
+ .decrypt = crypto4xx_decrypt_transport_esp_cbc,
+ .givencrypt = crypto4xx_givencrypt_transport_esp_cbc,
+ }
+ }
+ }},
+
+ /* IPSec transport combined hash and crypto Algorithms */
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "transport(esp(authenc(hmac(md5),cbc(des3_ede))))",
+ .cra_driver_name = "transport-esp-cbc-3des-md5",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY_IPSEC,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 8, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_u = {
+ .aead = {
+ .ivsize = 8, /* IV size is 16 bytes */
+ .maxauthsize = 16, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_transport_esp_cbc_3des_md5,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_transport_esp_cbc,
+ .decrypt = crypto4xx_decrypt_transport_esp_cbc,
+ .givencrypt = crypto4xx_givencrypt_transport_esp_cbc,
+ }
+ }
+ }},
+
+
+ /* IPSec combined hash and crypto Algorithms */
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "transport(esp(authenc(hmac(sha1),cbc(des3_ede))))",
+ .cra_driver_name = "transport-esp-cbc-3des-sha1",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY_IPSEC,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 8, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_u = {
+ .aead = {
+ .ivsize = 8, /* IV size is 16 bytes */
+ .maxauthsize = 20, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_transport_esp_cbc_3des_sha1,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_transport_esp_cbc,
+ .decrypt = crypto4xx_decrypt_transport_esp_cbc,
+ .givencrypt = crypto4xx_givencrypt_transport_esp_cbc,
+ }
+ }
+ }},
+
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "transport(esp(authenc(hmac(sha224),cbc(des3_ede))))",
+ .cra_driver_name = "transport-esp-cbc-3des-sha224",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY_IPSEC,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 8, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_u = {
+ .aead = {
+ .ivsize = 8, /* IV size is 16 bytes */
+ .maxauthsize = 28, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_transport_esp_cbc_3des_sha224,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_transport_esp_cbc,
+ .decrypt = crypto4xx_decrypt_transport_esp_cbc,
+ .givencrypt = crypto4xx_givencrypt_transport_esp_cbc,
+ }
+ }
+ }},
+#endif
+#if 1
+ /* IPSec combined hash and crypto Algorithms */
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "transport(esp(authenc(hmac(sha256),cbc(des3_ede))))",
+ .cra_driver_name = "transport-esp-cbc-3des-sha256",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY_IPSEC,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 8, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_u = {
+ .aead = {
+ .ivsize = 8, /* IV size is 16 bytes */
+ .maxauthsize = 32, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_transport_esp_cbc_3des_sha256,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_transport_esp_cbc,
+ .decrypt = crypto4xx_decrypt_transport_esp_cbc,
+ .givencrypt = crypto4xx_givencrypt_transport_esp_cbc,
+ }
+ }
+ }},
+
+ /* IPSec combined hash and crypto Algorithms */
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "transport(esp(authenc(hmac(sha384),cbc(des3_ede))))",
+ .cra_driver_name = "transport-esp-cbc-3des-sha384",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY_IPSEC,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 8, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_u = {
+ .aead = {
+ .ivsize = 8, /* IV size is 16 bytes */
+ .maxauthsize = 48, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_transport_esp_cbc_3des_sha384,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_transport_esp_cbc,
+ .decrypt = crypto4xx_decrypt_transport_esp_cbc,
+ .givencrypt = crypto4xx_givencrypt_transport_esp_cbc,
+ }
+ }
+ }},
+
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "transport(esp(authenc(hmac(sha512),cbc(des3_ede))))",
+ .cra_driver_name = "transport-esp-cbc-3des-sha512",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY_IPSEC,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 8,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_u = {
+ .aead = {
+ .ivsize = 8, /* IV size is 16 bytes */
+ .maxauthsize = 64, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_transport_esp_cbc_3des_sha512,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_transport_esp_cbc,
+ .decrypt = crypto4xx_decrypt_transport_esp_cbc,
+ .givencrypt = crypto4xx_givencrypt_transport_esp_cbc,
+ }
+ }
+ }},
+
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "transport(esp(rfc4106(gcm(aes))))",
+ .cra_driver_name = "transport-esp-rfc4106-gcm-aes",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY_IPSEC,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 16, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0, /* Hardware requires 16 bytes aligned */
+ .cra_type = &crypto_aead_type,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .aead = {
+ .ivsize = 8, /* IV size for crypto */
+ .maxauthsize = 12, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_transport_esp_rfc4106_gcm,
+ .setauthsize = crypto4xx_setauthsize_aes,
+ .encrypt = crypto4xx_encrypt_transport_esp_cbc,
+ .decrypt = crypto4xx_decrypt_transport_esp_cbc,
+ .givencrypt = crypto4xx_givencrypt_transport_esp_cbc,
+ }
+ }
+ }},
+
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "rfc4106(gcm(aes))",
+ .cra_driver_name = "transport-esp-rfc4104-gcm-aes",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY_IPSEC-100,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 16,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_u = {.aead = {
+ .ivsize = 8, /* IV size is 16 bytes */
+ .setkey = crypto4xx_setkey_transport_esp_rfc4106_gcm,
+ .setauthsize = crypto4xx_setauthsize_aes,
+ .encrypt = crypto4xx_encrypt_transport_esp_cbc,
+ .decrypt = crypto4xx_decrypt_transport_esp_cbc,
+ .givencrypt = crypto4xx_givencrypt_transport_esp_cbc,
+ }
+ }}},
+
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "macsec(gcm)",
+ .cra_driver_name = "macsec-ppc4xx",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 64,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .aead = {
+ .ivsize = 16, /* IV size is 16 bytes */
+ .maxauthsize = 128, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_macsec_gcm,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_macsec,
+ .decrypt = crypto4xx_decrypt_macsec,
+ .givencrypt = NULL,
+ .givdecrypt = NULL,
+ }
+ }
+ }},
+#endif
+#if 1
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "dtls(aes-sha1)",
+ .cra_driver_name = "dtls-ppc4xx",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 64, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .aead = {
+ .ivsize = 16, /* IV size is 16 bytes */
+ .maxauthsize = 128, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_dtls_aes_sha1,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_dtls,
+ .decrypt = crypto4xx_decrypt_dtls,
+ .givencrypt = NULL,
+ .givdecrypt = NULL,
+ }
+ }
+ }},
+#endif
+#if 1
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "dtls(des-sha1)",
+ .cra_driver_name = "ppc4xx-dtls-des-sha1",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 64, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .aead = {
+ .ivsize = 8, /* IV size is 16 bytes */
+ .maxauthsize = 128, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_dtls_des_sha1,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_dtls,
+ .decrypt = crypto4xx_decrypt_dtls,
+ .givencrypt = NULL,
+ .givdecrypt = NULL,
+ }
+ }
+ }},
+#endif
+#if 1
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "dtls(des3-sha1)",
+ .cra_driver_name = "ppc4xx-dtls-des3-sha1",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 64, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .aead = {
+ .ivsize = 8, /* IV size is 16 bytes */
+ .maxauthsize = 128, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_dtls_des3_sha1,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_dtls,
+ .decrypt = crypto4xx_decrypt_dtls,
+ .givencrypt = NULL,
+ .givdecrypt = NULL,
+ }
+ }
+ }},
+
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "dtls(NULL-md5)",
+ .cra_driver_name = "ppc4xx-dtls-null-md5",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 64, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .aead = {
+ .ivsize = 0, /* IV size is 16 bytes */
+ .maxauthsize = 128, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_dtls_null_md5,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_dtls,
+ .decrypt = crypto4xx_decrypt_dtls,
+ .givencrypt = NULL,
+ .givdecrypt = NULL,
+ }
+ }
+ }},
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "dtls(NULL-sha1)",
+ .cra_driver_name = "ppc4xx-dtls-null-sha1",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 64, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .aead = {
+ .ivsize = 0, /* IV size is 16 bytes */
+ .maxauthsize = 128, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_dtls_null_sha1,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_dtls,
+ .decrypt = crypto4xx_decrypt_dtls,
+ .givencrypt = NULL,
+ .givdecrypt = NULL,
+ }
+ }
+ }},
+
+#endif
+#if 1
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "ssl(aes-sha1)",
+ .cra_driver_name = "ppc4xx-ssl-aes-sha1",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 64, /* 64 bits ... 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0xF,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .aead = {
+ .ivsize = 16, /* IV size is 16 bytes */
+ .maxauthsize = 128, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_ssl_aes_sha1,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_ssl_aes,
+ .decrypt = crypto4xx_decrypt_ssl_aes,
+ .givencrypt = NULL,
+ .givdecrypt = NULL,
+ }
+ }
+ }},
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "ssl(des-sha1)",
+ .cra_driver_name = "ppc4xx-ssl-des-sha1",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 64, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .aead = {
+ .ivsize = 16, /* IV size is 16 bytes */
+ .maxauthsize = 128, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_ssl_des_sha1,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_ssl_des,
+ .decrypt = crypto4xx_decrypt_ssl_des,
+ .givencrypt = NULL,
+ .givdecrypt = NULL,
+ }
+ }
+ }},
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "ssl(des3-sha1)",
+ .cra_driver_name = "ppc4xx-ssl-des3-sha1",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 64, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .aead = {
+ .ivsize = 8, /* IV size is 16 bytes */
+ .maxauthsize = 128, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_ssl_des3_sha1,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_ssl_des,
+ .decrypt = crypto4xx_decrypt_ssl_des,
+ .givencrypt = NULL,
+ .givdecrypt = NULL,
+ }
+ }
+ }},
+
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "ssl(arc4-sha1)",
+ .cra_driver_name = "ppc4xx-ssl-arc4-sha1",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 64, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .aead = {
+ .ivsize = 16, /* IV size is 16 bytes */
+ .maxauthsize = 128, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_ssl_arc4_sha1,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_ssl_arc4,
+ .decrypt = crypto4xx_decrypt_ssl_arc4,
+ .givencrypt = NULL,
+ .givdecrypt = NULL,
+ }
+ }
+ }},
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "ssl(arc4-md5)",
+ .cra_driver_name = "ppc4xx-ssl-arc4-md5",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 64, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .aead = {
+ .ivsize = 16, /* IV size is 16 bytes */
+ .maxauthsize = 128, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_ssl_arc4_md5,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_ssl_arc4,
+ .decrypt = crypto4xx_decrypt_ssl_arc4,
+ .givencrypt = NULL,
+ .givdecrypt = NULL,
+ }
+ }
+ }},
+#endif
+#if 1
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "ssl(NULL-md5)",
+ .cra_driver_name = "ppc4xx-ssl-null-md5",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 64, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .aead = {
+ .ivsize = 0, /* IV size is 16 bytes */
+ .maxauthsize = 128, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_ssl_null_md5,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_ssl_null,
+ .decrypt = crypto4xx_decrypt_ssl_null,
+ .givencrypt = NULL,
+ .givdecrypt = NULL,
+ }
+ }
+ }},
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "ssl(NULL-sha1)",
+ .cra_driver_name = "ppc4xx-ssl-null-sha1",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 64, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .aead = {
+ .ivsize = 0, /* IV size is 16 bytes */
+ .maxauthsize = 128, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_ssl_null_sha1,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_ssl_null,
+ .decrypt = crypto4xx_decrypt_ssl_null,
+ .givencrypt = NULL,
+ .givdecrypt = NULL,
+ }
+ }
+ }},
+#endif
+#if 1
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "tls(aes-sha1)",
+ .cra_driver_name = "ppc4xx-tls-aes-sha1",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 64, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .aead = {
+ .ivsize = 16, /* IV size is 16 bytes */
+ .maxauthsize = 128, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_tls_aes_sha1,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_ssl_aes,
+ .decrypt = crypto4xx_decrypt_ssl_aes,
+ .givencrypt = NULL,
+ .givdecrypt = NULL,
+ }
+ }
+ }},
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "tls(des-sha1)",
+ .cra_driver_name = "ppc4xx-tls-des-sha1",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 64, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .aead = {
+ .ivsize = 16, /* IV size is 16 bytes */
+ .maxauthsize = 128, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_tls_des_sha1,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_ssl_des,
+ .decrypt = crypto4xx_decrypt_ssl_des,
+ .givencrypt = NULL,
+ .givdecrypt = NULL,
+ }
+ }
+ }},
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "tls(des3-sha1)",
+ .cra_driver_name = "ppc4xx-tls-des3-sha1",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 64, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .aead = {
+ .ivsize = 8, /* IV size is 16 bytes */
+ .maxauthsize = 128, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_tls_des3_sha1,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_ssl_des,
+ .decrypt = crypto4xx_decrypt_ssl_des,
+ .givencrypt = NULL,
+ .givdecrypt = NULL,
+ }
+ }
+ }},
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "tls(arc4-sha1)",
+ .cra_driver_name = "ppc4xx-tls-arc4-sha1",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 64, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .aead = {
+ .ivsize = 16, /* IV size is 16 bytes */
+ .maxauthsize = 128, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_tls_arc4_sha1,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_ssl_arc4,
+ .decrypt = crypto4xx_decrypt_ssl_arc4,
+ .givencrypt = NULL,
+ .givdecrypt = NULL,
+ }
+ }
+ }},
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "tls(arc4-md5)",
+ .cra_driver_name = "ppc4xx-tls-arc4-md5",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 64, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .aead = {
+ .ivsize = 16, /* IV size is 16 bytes */
+ .maxauthsize = 128, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_tls_arc4_md5,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_ssl_arc4,
+ .decrypt = crypto4xx_decrypt_ssl_arc4,
+ .givencrypt = NULL,
+ .givdecrypt = NULL,
+ }
+ }
+ }},
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "tls(NULL-md5)",
+ .cra_driver_name = "ppc4xx-tls-null-md5",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 64, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .aead = {
+ .ivsize = 0, /* IV size is 16 bytes */
+ .maxauthsize = 128, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_tls_null_md5,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_ssl_null,
+ .decrypt = crypto4xx_decrypt_ssl_null,
+ .givencrypt = NULL,
+ .givdecrypt = NULL,
+ }
+ }
+ }},
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "tls(NULL-sha1)",
+ .cra_driver_name = "ppc4xx-tls-null-sha1",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 64, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .aead = {
+ .ivsize = 0, /* IV size is 16 bytes */
+ .maxauthsize = 128, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_tls_null_sha1,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_ssl_null,
+ .decrypt = crypto4xx_decrypt_ssl_null,
+ .givencrypt = NULL,
+ .givdecrypt = NULL,
+ }
+ }
+ }},
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "tls1_1(aes-sha1)",
+ .cra_driver_name = "ppc4xx-tls1.1-aes-sha1",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 64, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .aead = {
+ .ivsize = 16, /* IV size is 16 bytes */
+ .maxauthsize = 128, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_tls1_1_aes_sha1,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_dtls,
+ .decrypt = crypto4xx_decrypt_dtls,
+ .givencrypt = NULL,
+ .givdecrypt = NULL,
+ }
+ }
+ }},
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "tls1_1(des-sha1)",
+ .cra_driver_name = "ppc4xx-tls1.1-des-sha1",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 64, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .aead = {
+ .ivsize = 16, /* IV size is 16 bytes */
+ .maxauthsize = 128, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_tls1_1_des_sha1,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_dtls,
+ .decrypt = crypto4xx_decrypt_dtls,
+ .givencrypt = NULL,
+ .givdecrypt = NULL,
+ }
+ }
+ }},
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "tls1_1(des3-sha1)",
+ .cra_driver_name = "ppc4xx-tls1.1-des3-sha1",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 64, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .aead = {
+ .ivsize = 8, /* IV size is 16 bytes */
+ .maxauthsize = 128, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_tls1_1_des3_sha1,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_dtls,
+ .decrypt = crypto4xx_decrypt_dtls,
+ .givencrypt = NULL,
+ .givdecrypt = NULL,
+ }
+ }
+ }},
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "tls1_1(arc4-sha1)",
+ .cra_driver_name = "ppc4xx-tls1.1-arc4-sha1",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 64, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .aead = {
+ .ivsize = 16, /* IV size is 16 bytes */
+ .maxauthsize = 128, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_tls1_1_arc4_sha1,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_ssl_arc4,
+ .decrypt = crypto4xx_decrypt_ssl_arc4,
+ .givencrypt = NULL,
+ .givdecrypt = NULL,
+ }
+ }
+ }},
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "tls1_1(arc4-md5)",
+ .cra_driver_name = "ppc4xx-tls1.1-arc4-md5",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 64, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .aead = {
+ .ivsize = 16, /* IV size is 16 bytes */
+ .maxauthsize = 128, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_tls1_1_arc4_md5,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_ssl_arc4,
+ .decrypt = crypto4xx_decrypt_ssl_arc4,
+ .givencrypt = NULL,
+ .givdecrypt = NULL,
+ }
+ }
+ }},
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "tls1_1(NULL-md5)",
+ .cra_driver_name = "ppc4xx-tls1.1-null-md5",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 64, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .aead = {
+ .ivsize = 0, /* IV size is 16 bytes */
+ .maxauthsize = 128, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_tls1_1_null_md5,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_dtls,
+ .decrypt = crypto4xx_decrypt_dtls,
+ .givencrypt = NULL,
+ .givdecrypt = NULL,
+ }
+ }
+ }},
+
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "tls1_1(NULL-sha1)",
+ .cra_driver_name = "ppc4xx-tls1.1-null-sha1",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 64, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .aead = {
+ .ivsize = 0, /* IV size is 16 bytes */
+ .maxauthsize = 128, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_tls1_1_null_sha1,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_dtls,
+ .decrypt = crypto4xx_decrypt_dtls,
+ .givencrypt = NULL,
+ .givdecrypt = NULL,
+ }
+ }
+ }},
+#endif
+};
+#define CRYPTO4XX_CONSOLE_DRIVER_NAME "crypto4xx"
/**
* Module Initialization Routine
*/
@@ -1156,6 +3929,9 @@ static int __init crypto4xx_probe(struct of_device *ofdev,
struct resource res;
struct device *dev = &ofdev->dev;
struct crypto4xx_core_device *core_dev;
+ unsigned int pvr;
+ unsigned short min;
+ int revb_ver = 1; /* RevB of crypto core */
rc = of_address_to_resource(ofdev->node, 0, &res);
if (rc)
@@ -1172,6 +3948,7 @@ static int __init crypto4xx_probe(struct of_device *ofdev,
mfdcri(SDR0, PPC405EX_SDR0_SRST) | PPC405EX_CE_RESET);
mtdcri(SDR0, PPC405EX_SDR0_SRST,
mfdcri(SDR0, PPC405EX_SDR0_SRST) & ~PPC405EX_CE_RESET);
+ revb_ver = 0;
} else if (of_find_compatible_node(NULL, NULL,
"amcc,ppc460sx-crypto")) {
mtdcri(SDR0, PPC460SX_SDR0_SRST,
@@ -1193,8 +3970,23 @@ static int __init crypto4xx_probe(struct of_device *ofdev,
if (!core_dev->dev)
goto err_alloc_dev;
+ /* Older version of 460EX/GT does not support H/W based security intr coalescing */
+ pvr = mfspr(SPRN_PVR);
+ printk("Reading pvr value = %x\n", pvr);
+ if ((pvr & 0xfffffff0) == 0x130218A0) {
+ min = PVR_MIN(pvr);
+ if (min < 4) {
+ printk(KERN_INFO "RevA 460EX/GT ... h/w bug in security intr coal\n");
+ revb_ver = 0;
+ } else {
+ printk(KERN_INFO "RevB h/w security interrupt coalescing supported ...\n");
+ }
+ }
+
+ core_dev->revb_ver = revb_ver;
core_dev->dev->core_dev = core_dev;
core_dev->device = dev;
+ core_dev->irq_cnt = 0ll;
spin_lock_init(&core_dev->lock);
INIT_LIST_HEAD(&core_dev->dev->alg_list);
rc = crypto4xx_build_pdr(core_dev->dev);
@@ -1209,6 +4001,25 @@ static int __init crypto4xx_probe(struct of_device *ofdev,
if (rc)
goto err_build_sdr;
+ proc_crypto4xx = proc_mkdir("driver/crypto4xx", NULL);
+ if(proc_crypto4xx == NULL) {
+ printk(KERN_ERR
+ "%s: Error creating proc entry\n",__FUNCTION__);
+ return -ENOMEM;
+ }
+
+ entry = create_proc_read_entry("crypto4xx",
+ 0,
+ proc_crypto4xx,
+ crypto4xx_device_read_procmem,
+ core_dev);
+ if(entry == NULL) {
+ printk(KERN_CRIT
+ "%s: crypto4xx: create_proc_read_entry failed!\n",
+ __FUNCTION__);
+ return -ENOMEM;
+ }
+
/* Init tasklet for bottom half processing */
tasklet_init(&core_dev->tasklet, crypto4xx_bh_tasklet_cb,
(unsigned long) dev);
@@ -1216,7 +4027,7 @@ static int __init crypto4xx_probe(struct of_device *ofdev,
/* Register for Crypto isr, Crypto Engine IRQ */
core_dev->irq = irq_of_parse_and_map(ofdev->node, 0);
rc = request_irq(core_dev->irq, crypto4xx_ce_interrupt_handler, 0,
- core_dev->dev->name, dev);
+ "CRYPTO", dev);
if (rc)
goto err_request_irq;
@@ -1269,8 +4080,12 @@ static int __exit crypto4xx_remove(struct of_device *ofdev)
/* Un-register with Linux CryptoAPI */
crypto4xx_unregister_alg(core_dev->dev);
/* Free all allocated memory */
- crypto4xx_stop_all(core_dev);
+ remove_proc_entry("crypto4xx", entry) ;
+ entry = NULL;
+ remove_proc_entry("driver/crypto4xx", proc_crypto4xx);
+ proc_crypto4xx = NULL;
+ crypto4xx_stop_all(core_dev);
return 0;
}
diff --git a/drivers/crypto/amcc/crypto4xx_core.h b/drivers/crypto/amcc/crypto4xx_core.h
index da9cbe3b9fc..658a2416294 100644
--- a/drivers/crypto/amcc/crypto4xx_core.h
+++ b/drivers/crypto/amcc/crypto4xx_core.h
@@ -32,10 +32,16 @@
#define PPC405EX_CE_RESET 0x00000008
#define CRYPTO4XX_CRYPTO_PRIORITY 300
-#define PPC4XX_LAST_PD 63
-#define PPC4XX_NUM_PD 64
+
+//#define PPC4XX_LAST_PD 1022
+//#define PPC4XX_NUM_PD 1023
+
+#define PPC4XX_LAST_PD 511
+#define PPC4XX_NUM_PD 512
+
#define PPC4XX_LAST_GD 1023
#define PPC4XX_NUM_GD 1024
+
#define PPC4XX_LAST_SD 63
#define PPC4XX_NUM_SD 64
#define PPC4XX_SD_BUFFER_SIZE 2048
@@ -76,7 +82,9 @@ struct crypto4xx_device {
void *pdr; /* base address of packet
descriptor ring */
dma_addr_t pdr_pa; /* physical address used to
- program ce pdr_base_register */
+ program ce pdr_base_register */
+ phys_addr_t pdr_ocm_addr;
+
void *gdr; /* gather descriptor ring */
dma_addr_t gdr_pa; /* physical address used to
program ce gdr_base_register */
@@ -100,6 +108,9 @@ struct crypto4xx_device {
void *pdr_uinfo;
struct list_head alg_list; /* List of algorithm supported
by this device */
+ u32 pkt_cnt;
+ u32 macsec_decrypt_num;
+
};
struct crypto4xx_core_device {
@@ -108,18 +119,31 @@ struct crypto4xx_core_device {
struct crypto4xx_device *dev;
u32 int_status;
u32 irq;
+ u64 irq_cnt;
struct tasklet_struct tasklet;
spinlock_t lock;
+ struct timer_list crypto4xx_timer;
+ int revb_ver;
};
struct crypto4xx_ctx {
struct crypto4xx_device *dev;
void *sa_in;
dma_addr_t sa_in_dma_addr;
+ phys_addr_t sa_in_ocm_addr;
+
void *sa_out;
dma_addr_t sa_out_dma_addr;
+ phys_addr_t sa_out_ocm_addr;
+
+ void *arc4_state_record;
+ dma_addr_t arc4_state_record_dma_addr;
+ phys_addr_t arc4_state_ocm_addr;
+
void *state_record;
dma_addr_t state_record_dma_addr;
+ phys_addr_t state_record_ocm_addr;
+
u32 sa_len;
u32 offset_to_sr_ptr; /* offset to state ptr, in dynamic sa */
u32 direction;
@@ -127,9 +151,20 @@ struct crypto4xx_ctx {
u32 save_iv;
u32 pd_ctl_len;
u32 pd_ctl;
+ u32 append_icv;
+ u32 is_gcm;
+ u32 ctr_aes;
u32 bypass;
+ u32 init_arc4;
u32 is_hash;
u32 hash_final;
+ u32 spi;
+ u32 seq;
+ u32 pad_block_size;
+ u32 encap_uhl;
+ u32 pad_ctl;
+ u32 authenc;
+ u32 hc_offset;
};
struct crypto4xx_req_ctx {
@@ -166,6 +201,7 @@ static inline struct crypto4xx_alg *crypto_alg_to_crypto4xx_alg(
return container_of(x, struct crypto4xx_alg, alg.u.cipher);
}
+extern void my_dump_Data(const u_char* dptr, u_int size);
extern int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size);
extern void crypto4xx_free_sa(struct crypto4xx_ctx *ctx);
extern u32 crypto4xx_alloc_sa_rctx(struct crypto4xx_ctx *ctx,
@@ -173,9 +209,15 @@ extern u32 crypto4xx_alloc_sa_rctx(struct crypto4xx_ctx *ctx,
extern void crypto4xx_free_sa_rctx(struct crypto4xx_ctx *rctx);
extern void crypto4xx_free_ctx(struct crypto4xx_ctx *ctx);
extern u32 crypto4xx_alloc_state_record(struct crypto4xx_ctx *ctx);
+extern void crypto4xx_free_state_record(struct crypto4xx_ctx *ctx);
+extern u32 crypto4xx_alloc_arc4_state_record(struct crypto4xx_ctx *ctx);
+extern void crypto4xx_free_arc4_state_record(struct crypto4xx_ctx *ctx);
extern u32 get_dynamic_sa_offset_state_ptr_field(struct crypto4xx_ctx *ctx);
extern u32 get_dynamic_sa_offset_key_field(struct crypto4xx_ctx *ctx);
extern u32 get_dynamic_sa_iv_size(struct crypto4xx_ctx *ctx);
+u32 get_dynamic_sa_offset_arc4_state_ptr(struct crypto4xx_ctx *ctx);
+u32 get_dynamic_sa_offset_seq_num(struct crypto4xx_ctx *ctx);
+u32 get_dynamic_sa_offset_spi(struct crypto4xx_ctx *ctx);
extern void crypto4xx_memcpy_le(unsigned int *dst,
const unsigned char *buf, int len);
extern u32 crypto4xx_build_pd(struct crypto_async_request *req,
@@ -183,9 +225,15 @@ extern u32 crypto4xx_build_pd(struct crypto_async_request *req,
struct scatterlist *src,
struct scatterlist *dst,
unsigned int datalen,
+ struct scatterlist *assoc,
+ u32 aad_len,
void *iv, u32 iv_len);
extern int crypto4xx_setkey_aes_cbc(struct crypto_ablkcipher *cipher,
const u8 *key, unsigned int keylen);
+extern int crypto4xx_setkey_3des_cbc(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen);
+extern int crypto4xx_setkey_3des_ecb(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen);
extern int crypto4xx_encrypt(struct ablkcipher_request *req);
extern int crypto4xx_decrypt(struct ablkcipher_request *req);
extern int crypto4xx_sha1_alg_init(struct crypto_tfm *tfm);
@@ -193,4 +241,315 @@ extern int crypto4xx_hash_digest(struct ahash_request *req);
extern int crypto4xx_hash_final(struct ahash_request *req);
extern int crypto4xx_hash_update(struct ahash_request *req);
extern int crypto4xx_hash_init(struct ahash_request *req);
+extern int crypto4xx_md5_alg_init(struct crypto_tfm *tfm);
+extern int crypto4xx_hash_hmac_setkey(struct crypto_ahash *hash,
+ const u8 *key,
+ unsigned int keylen,
+ unsigned int sa_len,
+ unsigned char ha,
+ unsigned char hm,
+ unsigned int max_keylen);
+extern int crypto4xx_md5_hmac_setkey(struct crypto_ahash *hash, const u8 *key,
+ unsigned int keylen);
+extern int crypto4xx_sha1_alg_init(struct crypto_tfm *tfm);
+extern int crypto4xx_sha2_alg_init(struct crypto_tfm *tfm);
+extern int crypto4xx_sha2_hmac_setkey(struct crypto_ahash *hash,
+ const u8 *key,
+ unsigned int keylen);
+extern int crypto4xx_sha1_hmac_setkey(struct crypto_ahash *hash, const u8 *key,
+ unsigned int keylen);
+extern u32 get_dynamic_sa_offset_inner_digest(struct crypto4xx_ctx *ctx);
+extern u32 get_dynamic_sa_offset_outer_digest(struct crypto4xx_ctx *ctx);
+extern int crypto4xx_pre_compute_hmac(struct crypto4xx_ctx *ctx,
+ void *key,
+ unsigned int keylen,
+ unsigned int bs,
+ unsigned char ha,
+ unsigned char digs);
+int crypto4xx_setkey_aes_ecb(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen);
+int crypto4xx_setkey_aes_ofb(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen);
+int crypto4xx_setkey_aes_cfb(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen);
+int crypto4xx_setkey_aes_ctr(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen);
+int crypto4xx_setkey_aes_gcm(struct crypto_aead *cipher,
+ const u8 *key, unsigned int keylen);
+int crypto4xx_setkey_aes_ccm(struct crypto_aead *cipher,
+ const u8 *key, unsigned int keylen);
+
+int crypto4xx_encrypt_aes_gcm(struct aead_request *req);
+int crypto4xx_decrypt_aes_gcm(struct aead_request *req);
+int crypto4xx_encrypt_aes_ccm(struct aead_request *req);
+int crypto4xx_decrypt_aes_ccm(struct aead_request *req);
+int crypto4xx_encrypt_ctr(struct ablkcipher_request *req);
+int crypto4xx_decrypt_ctr(struct ablkcipher_request *req);
+int crypto4xx_setauthsize_aes(struct crypto_aead *ciper,
+ unsigned int authsize);
+int crypto4xx_givencrypt_aes_ccm(struct aead_givcrypt_request *req);
+int crypto4xx_givencrypt_aes_gcm(struct aead_givcrypt_request *req);
+int crypto4xx_givdecrypt_aes_ccm(struct aead_givcrypt_request *req);
+int crypto4xx_givdecrypt_aes_gcm(struct aead_givcrypt_request *req);
+int crypto4xx_setkey_kasumi_f8(struct crypto_ablkcipher *cipher,
+ const u8 *key,
+ unsigned int keylen);
+
+int crypto4xx_encrypt_kasumi_f8(struct ablkcipher_request *req);
+int crypto4xx_decrypt_kasumi_f8(struct ablkcipher_request *req);
+int crypto4xx_setkey_kasumi_p(struct crypto_ablkcipher *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_kasumi_f9_digest(struct ahash_request *req);
+int crypto4xx_kasumi_f9_setkey(struct crypto_ahash *hash,
+ const u8 *key, unsigned int keylen);
+int crypto4xx_xcbc_setkey(struct crypto_ahash *hash,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_arc4(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen);
+int crypto4xx_arc4_decrypt(struct ablkcipher_request *req);
+int crypto4xx_arc4_encrypt(struct ablkcipher_request *req);
+u32 crypto4xx_alloc_arc4_state_record(struct crypto4xx_ctx *ctx);
+int crypto4xx_setauthsize_aes_ccm(struct crypto_aead *ciper,
+ unsigned int authsize);
+
+/* From crypto/md5.c */
+extern void md5_get_immediate_hash(struct crypto_tfm *tfm, u8 *data);
+extern unsigned int crypto4xx_sa_hash_tbl[3][6];
+
+/** IPSec Deneric Tunnel Related Routine Declarations */
+int crypto4xx_encrypt_esp_cbc(struct aead_request *req);
+int crypto4xx_decrypt_esp_cbc(struct aead_request *req);
+int crypto4xx_givencrypt_esp_cbc(struct aead_givcrypt_request *req);
+
+/** IPSec Tunnel AES Routine Declarations */
+int crypto4xx_setkey_tunnel_esp_cbc_aes_md5(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_tunnel_esp_cbc_aes_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_tunnel_esp_cbc_3des_md5(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_tunnel_esp_cbc_aes_sha512(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_tunnel_esp_cbc_aes_sha384(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_tunnel_esp_cbc_aes_sha224(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_tunnel_esp_cbc_aes_sha256(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+/** DES Tunnel Mode ipsec Related Algorithms */
+int crypto4xx_setkey_tunnel_esp_cbc_des_md5(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_tunnel_esp_cbc_des_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_tunnel_esp_cbc_des_sha384(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_tunnel_esp_cbc_des_sha224(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_tunnel_esp_cbc_des_sha256(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_tunnel_esp_cbc_des_sha512(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+/** 3DES Tunnel Mode IPSEC Related Algorithms */
+int crypto4xx_setkey_tunnel_esp_cbc_3des_md5(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_tunnel_esp_cbc_3des_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_tunnel_esp_cbc_3des_sha384(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_tunnel_esp_cbc_3des_sha224(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_tunnel_esp_cbc_3des_sha256(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+
+int crypto4xx_setkey_tunnel_esp_cbc_3des_sha512(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+
+/** Generic Transport Mode IPSEC Related Algorithms */
+int crypto4xx_encrypt_transport_esp_cbc(struct aead_request *req);
+int crypto4xx_givencrypt_transport_esp_cbc(struct aead_givcrypt_request *req);
+int crypto4xx_decrypt_transport_esp_cbc(struct aead_request *req);
+
+/** AES Transport Mode IPSEC Related Algorithms */
+int crypto4xx_setkey_transport_esp_cbc_aes_md5(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_transport_esp_cbc_aes_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_transport_esp_cbc_aes_sha224(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_transport_esp_cbc_aes_sha256(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_transport_esp_cbc_aes_sha384(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_transport_esp_cbc_aes_sha512(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+
+/** DES Transport Mode IPSEC Related Algorithms */
+int crypto4xx_setkey_transport_esp_cbc_des_md5(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_transport_esp_cbc_des_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_transport_esp_cbc_des_sha224(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_transport_esp_cbc_des_sha256(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_transport_esp_cbc_des_sha384(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_transport_esp_cbc_des_sha512(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+
+/** 3DES Transport Mode IPSEC Related Algorithms */
+int crypto4xx_setkey_transport_esp_cbc_3des_md5(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_transport_esp_cbc_3des_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_transport_esp_cbc_3des_sha224(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_transport_esp_cbc_3des_sha256(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_transport_esp_cbc_3des_sha384(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_transport_esp_cbc_3des_sha512(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+/**Macsec Related Declarations */
+int crypto4xx_encrypt_macsec(struct aead_request *req);
+int crypto4xx_decrypt_macsec(struct aead_request *req);
+int crypto4xx_setkey_macsec_gcm(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+
+/** DTLS/SSL/TLS Related Setkey Algorithms */
+int crypto4xx_setkey_dtls_aes_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_dtls_des_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_dtls_des3_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_dtls_null_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_dtls_null_md5(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_ssl_aes_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_ssl_des_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_ssl_null_md5(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_ssl_null_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_ssl_des3_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_ssl_arc4_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_ssl_arc4_md5(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+
+int crypto4xx_setkey_tls_aes_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_tls_des_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+
+int crypto4xx_setkey_tls_des3_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_tls_arc4_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_tls_arc4_md5(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_tls_null_md5(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_tls_null_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_tls1_1_aes_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_tls1_1_des_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_tls1_1_des3_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_tls1_1_arc4_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_tls1_1_arc4_md5(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_tls1_1_null_md5(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_tls1_1_null_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+
+/** DTLS/SSL/TLS Related Encrypt/Decrypt Algorithms */
+int crypto4xx_encrypt_dtls(struct aead_request *req);
+int crypto4xx_decrypt_dtls(struct aead_request *req);
+int crypto4xx_encrypt_ssl_aes(struct aead_request *req);
+int crypto4xx_decrypt_ssl_aes(struct aead_request *req);
+int crypto4xx_encrypt_ssl_des(struct aead_request *req);
+int crypto4xx_decrypt_ssl_des(struct aead_request *req);
+int crypto4xx_encrypt_ssl_arc4(struct aead_request *req);
+int crypto4xx_decrypt_ssl_arc4(struct aead_request *req);
+int crypto4xx_encrypt_ssl_null(struct aead_request *req);
+int crypto4xx_decrypt_ssl_null(struct aead_request *req);
+int crypto4xx_setkey_transport_esp_rfc4106_gcm(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
#endif
diff --git a/drivers/crypto/amcc/crypto4xx_reg_def.h b/drivers/crypto/amcc/crypto4xx_reg_def.h
index 7d4edb00261..d964f5d5551 100644
--- a/drivers/crypto/amcc/crypto4xx_reg_def.h
+++ b/drivers/crypto/amcc/crypto4xx_reg_def.h
@@ -54,6 +54,10 @@
#define CRYPTO4XX_SEQ_RD 0x00000408
#define CRYPTO4XX_SEQ_MASK_RD 0x0000040C
+#define CRYPTO4XX_SPI 0x000106B0
+#define CRYPTO4XX_SEQ_NUM0 0x000106B4
+#define CRYPTO4XX_SEQ_NUM1 0x000106B8
+
#define CRYPTO4XX_SA_CMD_0 0x00010600
#define CRYPTO4XX_SA_CMD_1 0x00010604
@@ -112,6 +116,7 @@
#define CRYPTO4XX_PRNG_LFSR_L 0x00070030
#define CRYPTO4XX_PRNG_LFSR_H 0x00070034
+
/**
* Initilize CRYPTO ENGINE registers, and memory bases.
*/
@@ -121,18 +126,21 @@
#define PPC4XX_PD_SIZE 6
#define PPC4XX_CTX_DONE_INT 0x2000
#define PPC4XX_PD_DONE_INT 0x8000
+#define PPC4XX_TMO_ERR_INT 0x40000
#define PPC4XX_BYTE_ORDER 0x22222
#define PPC4XX_INTERRUPT_CLR 0x3ffff
+#define PPC4XX_INTERRUPT_CLR_REVB 0x7ffff
#define PPC4XX_PRNG_CTRL_AUTO_EN 0x3
#define PPC4XX_DC_3DES_EN 1
-#define PPC4XX_INT_DESCR_CNT 4
+#define PPC4XX_INT_DESCR_CNT 7
#define PPC4XX_INT_TIMEOUT_CNT 0
+#define PPC4XX_INT_TIMEOUT_CNT_REVB 0x3FF
#define PPC4XX_INT_CFG 1
/**
* all follow define are ad hoc
*/
-#define PPC4XX_RING_RETRY 100
-#define PPC4XX_RING_POLL 100
+#define PPC4XX_RING_RETRY 1
+#define PPC4XX_RING_POLL 1
#define PPC4XX_SDR_SIZE PPC4XX_NUM_SD
#define PPC4XX_GDR_SIZE PPC4XX_NUM_GD
diff --git a/drivers/crypto/amcc/crypto4xx_sa.c b/drivers/crypto/amcc/crypto4xx_sa.c
index 466fd94cd4a..fa4ff7ac66a 100644
--- a/drivers/crypto/amcc/crypto4xx_sa.c
+++ b/drivers/crypto/amcc/crypto4xx_sa.c
@@ -84,6 +84,119 @@ u32 get_dynamic_sa_offset_state_ptr_field(struct crypto4xx_ctx *ctx)
return sizeof(struct dynamic_sa_ctl) + offset * 4;
}
+u32 get_dynamic_sa_offset_arc4_state_ptr(struct crypto4xx_ctx *ctx)
+{
+ u32 offset;
+ union dynamic_sa_contents cts;
+
+ if (ctx->direction == DIR_INBOUND)
+ cts.w = ((struct dynamic_sa_ctl *)(ctx->sa_in))->sa_contents;
+ else
+ cts.w = ((struct dynamic_sa_ctl *)(ctx->sa_out))->sa_contents;
+ offset = cts.bf.key_size
+ + cts.bf.inner_size
+ + cts.bf.outer_size
+ + cts.bf.spi
+ + cts.bf.seq_num0
+ + cts.bf.seq_num1
+ + cts.bf.seq_num_mask0
+ + cts.bf.seq_num_mask1
+ + cts.bf.seq_num_mask2
+ + cts.bf.seq_num_mask3
+ + cts.bf.iv0
+ + cts.bf.iv1
+ + cts.bf.iv2
+ + cts.bf.iv3
+ + cts.bf.state_ptr
+ + cts.bf.arc4_ij_ptr;
+
+ return sizeof(struct dynamic_sa_ctl) + offset * 4;
+}
+
+u32 get_dynamic_sa_offset_inner_digest(struct crypto4xx_ctx *ctx)
+{
+ u32 offset;
+ union dynamic_sa_contents cts;
+
+ if (ctx->direction == DIR_INBOUND)
+ cts.w = ((struct dynamic_sa_ctl *)(ctx->sa_in))->sa_contents;
+ else
+ cts.w = ((struct dynamic_sa_ctl *)(ctx->sa_out))->sa_contents;
+ offset = cts.bf.key_size;
+
+ return sizeof(struct dynamic_sa_ctl) + offset * 4;
+}
+
+u32 get_dynamic_sa_offset_outer_digest(struct crypto4xx_ctx *ctx)
+{
+ u32 offset;
+ union dynamic_sa_contents cts;
+
+ if (ctx->direction == DIR_INBOUND)
+ cts.w = ((struct dynamic_sa_ctl *)(ctx->sa_in))->sa_contents;
+ else
+ cts.w = ((struct dynamic_sa_ctl *)(ctx->sa_out))->sa_contents;
+
+ offset = cts.bf.key_size
+ + cts.bf.inner_size;
+
+ return sizeof(struct dynamic_sa_ctl) + offset * 4;
+}
+
+u32 get_dynamic_sa_offset_spi(struct crypto4xx_ctx *ctx)
+{
+ u32 offset;
+ union dynamic_sa_contents cts;
+
+ if (ctx->direction == DIR_INBOUND)
+ cts.w = ((struct dynamic_sa_ctl *)(ctx->sa_in))->sa_contents;
+ else
+ cts.w = ((struct dynamic_sa_ctl *)(ctx->sa_out))->sa_contents;
+
+ offset = cts.bf.key_size
+ + cts.bf.inner_size
+ + cts.bf.outer_size;
+
+ return sizeof(struct dynamic_sa_ctl) + offset * 4;
+}
+
+u32 get_dynamic_sa_offset_seq_num(struct crypto4xx_ctx *ctx)
+{
+ u32 offset;
+ union dynamic_sa_contents cts;
+
+ if (ctx->direction == DIR_INBOUND)
+ cts.w = ((struct dynamic_sa_ctl *)(ctx->sa_in))->sa_contents;
+ else
+ cts.w = ((struct dynamic_sa_ctl *)(ctx->sa_out))->sa_contents;
+
+ offset = cts.bf.key_size
+ + cts.bf.inner_size
+ + cts.bf.outer_size
+ + cts.bf.spi;
+ return sizeof(struct dynamic_sa_ctl) + offset * 4;
+}
+
+u32 get_dynamic_sa_offset_seq_num_mask(struct crypto4xx_ctx *ctx)
+{
+ u32 offset;
+ union dynamic_sa_contents cts;
+
+ if (ctx->direction == DIR_INBOUND)
+ cts.w = ((struct dynamic_sa_ctl *)(ctx->sa_in))->sa_contents;
+ else
+ cts.w = ((struct dynamic_sa_ctl *)(ctx->sa_out))->sa_contents;
+
+ offset = cts.bf.key_size
+ + cts.bf.inner_size
+ + cts.bf.outer_size
+ + cts.bf.spi
+ + cts.bf.seq_num0
+ + cts.bf.seq_num1;
+
+ return sizeof(struct dynamic_sa_ctl) + offset * 4;
+}
+
u32 get_dynamic_sa_iv_size(struct crypto4xx_ctx *ctx)
{
union dynamic_sa_contents cts;
@@ -92,6 +205,7 @@ u32 get_dynamic_sa_iv_size(struct crypto4xx_ctx *ctx)
cts.w = ((struct dynamic_sa_ctl *) ctx->sa_in)->sa_contents;
else
cts.w = ((struct dynamic_sa_ctl *) ctx->sa_out)->sa_contents;
+
return (cts.bf.iv0 + cts.bf.iv1 + cts.bf.iv2 + cts.bf.iv3) * 4;
}
diff --git a/drivers/crypto/amcc/crypto4xx_sa.h b/drivers/crypto/amcc/crypto4xx_sa.h
index 4b83ed7e557..5350c4efbc5 100644
--- a/drivers/crypto/amcc/crypto4xx_sa.h
+++ b/drivers/crypto/amcc/crypto4xx_sa.h
@@ -50,12 +50,42 @@ union dynamic_sa_contents {
u32 w;
} __attribute__((packed));
+#define SA_OPCODE_ESP 0
+#define SA_OPCODE_AH 1
+#define SA_OPCODE_SSL 4
+#define SA_OPCODE_TLS 5
+#define SA_OPCODE_SRTP 7
+#define SA_OPCODE_DTLS 1
+#define SA_OPCODE_TLS1_1 6
+
+#define SA_OP_GROUP_BASIC 0
+#define SA_OP_GROUP_PROTOCOL 1
+#define SA_OP_GROUP_EXTEND_PROTOCOL 3
+
+#define SA_OPCODE_EXT_PROT_DTLS 1
+#define SA_OPCODE_EXT_PROT_MACSEC 2
+#define SA_OPCODE_EXT_PROT_SSL 4
+#define SA_OPCODE_EXT_PROT_TLS10 5
+#define SA_OPCODE_EXT_PROT_TLS11 6
+
#define DIR_OUTBOUND 0
#define DIR_INBOUND 1
-#define SA_OP_GROUP_BASIC 0
#define SA_OPCODE_ENCRYPT 0
#define SA_OPCODE_DECRYPT 0
+#define SA_OPCODE_ENCRYPT_HASH 1
+#define SA_OPCODE_HASH_DECRYPT 1
#define SA_OPCODE_HASH 3
+#define SA_OPCODE_HASH_ENCRYPT 4
+#define SA_OPCODE_DECRYPT_HASH 4
+
+#define SA_OPCODE_ESP 0
+#define SA_OPCODE_AH 1
+#define SA_OPCODE_SSL 4
+#define SA_OPCODE_TLS 5
+#define SA_OPCODE_SRTP 7
+#define SA_OPCODE_DTLS 1
+#define SA_OPCODE_TLS1_1 6
+
#define SA_CIPHER_ALG_DES 0
#define SA_CIPHER_ALG_3DES 1
#define SA_CIPHER_ALG_ARC4 2
@@ -65,8 +95,17 @@ union dynamic_sa_contents {
#define SA_HASH_ALG_MD5 0
#define SA_HASH_ALG_SHA1 1
+#define SA_HASH_ALG_SHA224 2
+#define SA_HASH_ALG_SHA256 3
+#define SA_HASH_ALG_SHA384 4
+#define SA_HASH_ALG_SHA512 5
+#define HASH_ALG_MAX_CNT 6
+#define SA_HASH_ALG_AES_XCBC_MAC_128 8
+#define SA_HASH_ALG_KASUMI_f9 9
+#define SA_HASH_ALG_GHASH 12
+#define SA_HASH_ALG_GMAC 13
+#define SA_HASH_ALG_CBC_MAC 14
#define SA_HASH_ALG_NULL 15
-#define SA_HASH_ALG_SHA1_DIGEST_SIZE 20
#define SA_LOAD_HASH_FROM_SA 0
#define SA_LOAD_HASH_FROM_STATE 2
@@ -84,9 +123,22 @@ union dynamic_sa_contents {
#define SA_SAVE_HASH 1
#define SA_NOT_SAVE_IV 0
#define SA_SAVE_IV 1
+#define SA_GEN_IV 3
+
#define SA_HEADER_PROC 1
#define SA_NO_HEADER_PROC 0
+#define SA_HASH_ALG_MD5_DIGEST_SIZE 16
+#define SA_HASH_ALG_SHA1_DIGEST_SIZE 20
+#define SA_HASH_ALG_SHA224_DIGEST_SIZE 28
+#define SA_HASH_ALG_SHA256_DIGEST_SIZE 32
+#define SA_HASH_ALG_SHA384_DIGEST_SIZE 48
+#define SA_HASH_ALG_SHA512_DIGEST_SIZE 64
+
+#define CRYPTO4XX_CRYPTO_PRIORITY_IPSEC 300
+
+#define CRYPTO4XX_MAC_ALGS { "md5", "sha1", \
+ "sha224", "sha256", "sha384", "sha512" }
union sa_command_0 {
struct {
u32 scatter:1;
@@ -111,7 +163,13 @@ union sa_command_0 {
} __attribute__((packed));
#define CRYPTO_MODE_ECB 0
+#define CRYPTO_MODE_KASUMI 0
#define CRYPTO_MODE_CBC 1
+#define CRYPTO_MODE_OFB 2
+#define CRYPTO_MODE_CFB 3
+#define CRYPTO_MODE_AES_CTR 4
+#define CRYPTO_MODE_KASUMI_f8 4
+#define CRYPTO_MODE_AES_ICM 5
#define CRYPTO_FEEDBACK_MODE_NO_FB 0
#define CRYPTO_FEEDBACK_MODE_64BIT_OFB 0
@@ -124,7 +182,7 @@ union sa_command_0 {
#define SA_AES_KEY_LEN_256 4
#define SA_REV2 1
-/**
+/*
* The follow defines bits sa_command_1
* In Basic hash mode this bit define simple hash or hmac.
* In IPsec mode, this bit define muting control.
@@ -177,13 +235,46 @@ struct dynamic_sa_ctl {
/**
* State Record for Security Association (SA)
*/
-struct sa_state_record {
+struct sa_state_record {
u32 save_iv[4];
u32 save_hash_byte_cnt[2];
u32 save_digest[16];
} __attribute__((packed));
/**
+ * Arc4 State Record for Security Association (SA)
+ */
+struct arc4_sr {
+ u32 arc4_state[64];
+} __attribute__((packed));
+
+/**
+ * Security Association (SA) for DES
+ */
+struct dynamic_sa_des {
+ struct dynamic_sa_ctl ctrl;
+ u32 key[2];
+ u32 iv[2];
+ u32 state_ptr;
+ u32 reserved;
+} __attribute__((packed));
+#define SA_DES_LEN (sizeof(struct dynamic_sa_des)/4)
+#define SA_DES_CONTENTS 0x26000022
+
+/**
+ * Security Association (SA) for 3DES
+ */
+struct dynamic_sa_3des {
+ struct dynamic_sa_ctl ctrl;
+ u32 key[6];
+ u32 iv[2]; /* for CBC, OFC, and CFB mode */
+ u32 state_ptr;
+ u32 reserved;
+} __attribute__((packed));
+#define SA_3DES_LEN (sizeof(struct dynamic_sa_3des)/4)
+#define SA_3DES_CONTENTS 0x26000062
+
+/**
* Security Association (SA) for AES128
*
*/
@@ -194,11 +285,10 @@ struct dynamic_sa_aes128 {
u32 state_ptr;
u32 reserved;
} __attribute__((packed));
-
#define SA_AES128_LEN (sizeof(struct dynamic_sa_aes128)/4)
#define SA_AES128_CONTENTS 0x3e000042
-/*
+/**
* Security Association (SA) for AES192
*/
struct dynamic_sa_aes192 {
@@ -208,7 +298,6 @@ struct dynamic_sa_aes192 {
u32 state_ptr;
u32 reserved;
} __attribute__((packed));
-
#define SA_AES192_LEN (sizeof(struct dynamic_sa_aes192)/4)
#define SA_AES192_CONTENTS 0x3e000062
@@ -228,6 +317,19 @@ struct dynamic_sa_aes256 {
#define SA_AES_CONTENTS 0x3e000002
/**
+ * Security Association (SA) for HASH128: HMAC-MD5
+ */
+struct dynamic_sa_hash128 {
+ struct dynamic_sa_ctl ctrl;
+ u32 inner_digest[4];
+ u32 outer_digest[4];
+ u32 state_ptr;
+ u32 reserved;
+} __attribute__((packed));
+#define SA_HASH128_LEN (sizeof(struct dynamic_sa_hash128)/4)
+#define SA_HASH128_CONTENTS 0x20008402
+
+/**
* Security Association (SA) for HASH160: HMAC-SHA1
*/
struct dynamic_sa_hash160 {
@@ -240,4 +342,418 @@ struct dynamic_sa_hash160 {
#define SA_HASH160_LEN (sizeof(struct dynamic_sa_hash160)/4)
#define SA_HASH160_CONTENTS 0x2000a502
+/**
+ * Security Association (SA) for HASH256: HMAC-SHA224, HMAC-SHA256
+ */
+struct dynamic_sa_hash256 {
+ struct dynamic_sa_ctl ctrl;
+ u32 inner_digest[8];
+ u32 outer_digest[8];
+ u32 state_ptr;
+ u32 reserved;
+} __attribute__((packed));
+#define SA_HASH256_LEN (sizeof(struct dynamic_sa_hash256)/4)
+#define SA_HASH256_CONTENTS 0x20010802
+
+/*
+ * Security Association (SA) for HASH512: HMAC-SHA512
+ */
+struct dynamic_sa_hash512 {
+ struct dynamic_sa_ctl ctrl;
+ u32 inner_digest[16];
+ u32 outer_digest[16];
+ u32 state_ptr;
+ u32 reserved;
+} __attribute__((packed));
+#define SA_HASH512_LEN (sizeof(struct dynamic_sa_hash512)/4)
+#define SA_HASH512_CONTENTS 0x20021002
+
+/**
+ * Security Association (SA) for AES128_XCBC_MAC
+ */
+struct dynamic_sa_aes128_xcbc_mac {
+ struct dynamic_sa_ctl ctrl;
+ u32 key[4];
+ u32 inner_digest[8];
+ u32 outer_digest[8];
+ u32 iv[4]; /* for CBC, OFC, and CFB mode */
+ u32 state_ptr;
+ u32 reserved;
+} __attribute__((packed));
+#define SA_AES128_XCBC_MAC_LEN (sizeof(struct dynamic_sa_aes128_xcbc_mac)/4)
+#define SA_AES128_XCBC_MAC_CONTENTS 0x3e010842
+
+/**
+ * Security Association (SA) for AES128_GCM
+ */
+struct dynamic_sa_aes128_gcm {
+ struct dynamic_sa_ctl ctrl;
+ u32 key[4];
+ u32 inner_digest[4];
+ u32 outer_digest[4];
+ u32 spi;
+ u32 seq;
+ u32 iv[4]; /* for CBC, OFC, and CFB mode */
+ u32 state_ptr;
+ u32 reserved;
+} __attribute__((packed));
+#define SA_AES128_GCM_LEN (sizeof(struct dynamic_sa_aes128_gcm)/4)
+#define SA_AES128_GCM_CONTENTS 0x3e0c8442
+
+/**
+ * Security Association (SA) for AES192_XCBC_MAC
+ */
+struct dynamic_sa_aes192_xcbc_mac {
+ struct dynamic_sa_ctl ctrl;
+ u32 key[6];
+ u32 inner_digest[8];
+ u32 outer_digest[8];
+ u32 iv[4]; /* for CBC, OFC, and CFB mode */
+ u32 state_ptr;
+ u32 reserved;
+} __attribute__((packed));
+#define SA_AES192_XCBC_MAC_LEN (sizeof(struct dynamic_sa_aes192_xcbc_mac)/4)
+#define SA_AES192_XCBC_MAC_CONTENTS 0x3e010862
+
+/**
+ * Security Association (SA) for AES192_GCM
+ */
+struct dynamic_sa_aes192_gcm {
+ struct dynamic_sa_ctl ctrl;
+ u32 key[6];
+ u32 inner_digest[4];
+ u32 outer_digest[4];
+ u32 spi;
+ u32 seq;
+ u32 iv[4]; /* for CBC, OFC, and CFB mode */
+ u32 state_ptr;
+ u32 reserved;
+} __attribute__((packed));
+#define SA_AES192_GCM_LEN (sizeof(struct dynamic_sa_aes192_gcm)/4)
+#define SA_AES192_GCM_CONTENTS 0x3e0c8462
+
+
+/**
+ * Security Association (SA) for AES256_XCBC_MAC
+ */
+struct dynamic_sa_aes256_xcbc_mac {
+ struct dynamic_sa_ctl ctrl;
+ u32 key[8];
+ u32 inner_digest[8];
+ u32 outer_digest[8];
+ u32 iv[4]; /* for CBC, OFC, and CFB mode */
+ u32 state_ptr;
+ u32 reserved;
+} __attribute__((packed));
+#define SA_AES256_XCBC_MAC_LEN (sizeof(struct dynamic_sa_aes256_xcbc_mac)/4)
+#define SA_AES256_XCBC_MAC_CONTENTS 0x3e010882
+
+/**
+ * Security Association (SA) for AES256_GCM
+ */
+struct dynamic_sa_aes256_gcm {
+ struct dynamic_sa_ctl ctrl;
+ u32 key[8];
+ u32 inner_digest[4];
+ u32 outer_digest[4];
+ u32 spi;
+ u32 seq;
+ u32 iv[4]; /* for CBC, OFC, and CFB mode */
+ u32 state_ptr;
+ u32 reserved;
+} __attribute__((packed));
+#define SA_AES256_GCM_LEN (sizeof(struct dynamic_sa_aes256_gcm)/4)
+#define SA_AES256_GCM_CONTENTS 0x3e0c8482
+#define SA_AES_GCM_CONTENTS 0x3e0c8402
+
+/**
+ * Security Association (SA) for Kasumi
+ */
+struct dynamic_sa_kasumi {
+ struct dynamic_sa_ctl ctrl;
+ u32 key[4];
+ u32 state_ptr;
+ u32 reserved;
+} __attribute__((packed));
+#define SA_KASUMI_LEN (sizeof(struct dynamic_sa_kasumi)/4)
+#define SA_KASUMI_CONTENTS 0x20000042
+
+/**
+ * Security Association (SA) for Kasumi f8
+ */
+struct dynamic_sa_kasumi_f8 {
+ struct dynamic_sa_ctl ctrl;
+ u32 key[4];
+ u32 iv[2];
+ u32 state_ptr;
+ u32 reserved;
+} __attribute__((packed));
+#define SA_KASUMI_F8_LEN (sizeof(struct dynamic_sa_kasumi_f8)/4)
+#define SA_KASUMI_F8_CONTENTS 0x26000042
+
+#define KASUMI_BLOCK_SIZE 8
+#define KASUMI_KEY_SIZE 16
+
+/**
+ * Security Association (SA) for Kasumi f8
+ */
+struct dynamic_sa_kasumi_f9 {
+ struct dynamic_sa_ctl ctrl;
+ u32 inner_digest[4];
+ u32 outter_digest[3];
+ u32 state_ptr;
+ u32 reserved;
+} __attribute__((packed));
+#define SA_KASUMI_F9_LEN (sizeof(struct dynamic_sa_kasumi_f9)/4)
+#define SA_KASUMI_F9_CONTENTS 0x20006402
+
+/**
+ * Security Association (SA) for AES256 CCM
+ */
+struct dynamic_sa_aes256_ccm {
+ struct dynamic_sa_ctl ctrl;
+ u32 key[8];
+ u32 iv[4]; /* for CBC, OFC, and CFB mode */
+ u32 state_ptr;
+ u32 reserved;
+} __attribute__((packed));
+#define SA_AES256_CCM_LEN (sizeof(struct dynamic_sa_aes256_ccm)/4)
+#define SA_AES256_CCM_CONTENTS 0x3e000082
+#define SA_AES_CCM_CONTENTS 0x3e000002
+
+/**
+ * Security Association (SA) for AES192 CCM
+ */
+struct dynamic_sa_aes192_ccm {
+ struct dynamic_sa_ctl ctrl;
+ u32 key[6];
+ u32 iv[4]; /* for CBC, OFC, and CFB mode */
+ u32 state_ptr;
+ u32 reserved;
+} __attribute__((packed));
+#define SA_AES192_CCM_LEN (sizeof(struct dynamic_sa_aes192_ccm)/4)
+#define SA_AES192_CCM_CONTENTS 0x3e000062
+
+/**
+ * Security Association (SA) for AES128 CCM
+ */
+struct dynamic_sa_aes128_ccm {
+ struct dynamic_sa_ctl ctrl;
+ u32 key[4];
+ u32 iv[4]; /* for CBC, OFC, and CFB mode */
+ u32 state_ptr;
+ u32 reserved;
+} __attribute__((packed));
+#define SA_AES128_CCM_LEN (sizeof(struct dynamic_sa_aes128_ccm)/4)
+#define SA_AES128_CCM_CONTENTS 0x3e000042
+
+/**
+ * Security Association (SA) for ARC4
+ */
+struct arc4_ij_ptr {
+ u32 rsv:16;
+ u32 j:8;
+ u32 i:8;
+} __attribute__((packed));
+
+struct dynamic_sa_arc4 {
+ struct dynamic_sa_ctl ctrl;
+ u32 key[4];
+ struct arc4_ij_ptr ij;
+ u32 arc4_state_ptr;
+ u32 reserved;
+} __attribute__((packed));
+
+#define SA_ARC4_LEN (sizeof(struct dynamic_sa_arc4)/4)
+#define SA_ARC4_CONTENTS 0xc0000042
+
+/**
+ * Security Association (SA) for IPsec ESP md5 or ESP sha1
+ */
+struct dynamic_sa_esp_md5_sha1
+{
+ struct dynamic_sa_ctl ctrl;
+ u32 key[8];
+ u32 inner_digest[5];
+ u32 outter_digest[5];
+ u32 spi;
+ u32 seq[2];
+ u32 seq_mask[4];
+ u32 iv[4];
+ u32 state_ptr;
+ u32 reserved;
+} __attribute__((packed));
+
+#define SA_ESP_MD5_SHA1_LEN sizeof(struct dynamic_sa_esp_md5_sha1)/4
+#define SA_ESP_MD5_SHA1_CONTENTS 0x3ffca582
+
+struct dynamic_sa_esp_des_md5_sha1
+{
+ struct dynamic_sa_ctl ctrl;
+ u32 key[2];
+ u32 inner_digest[5];
+ u32 outter_digest[5];
+ u32 spi;
+ u32 seq[2];
+ u32 seq_mask[4];
+ u32 iv[4];
+ u32 state_ptr;
+ u32 reserved;
+} __attribute__((packed));
+
+#define SA_ESP_DES_MD5_SHA1_LEN sizeof(struct dynamic_sa_esp_des_md5_sha1)/4
+#define SA_ESP_DES_MD5_SHA1_CONTENTS 0x3ffca522
+
+/**
+ * Security Association (SA) for IPsec ESP 3des md5 sha1
+ */
+struct dynamic_sa_esp_3des_md5_sha1
+{
+ struct dynamic_sa_ctl ctrl;
+ u32 key[6];
+ u32 inner_digest[5];
+ u32 outter_digest[5];
+ u32 spi;
+ u32 seq[2];
+ u32 seq_mask[4];
+ u32 iv[4];
+ u32 state_ptr;
+ u32 reserved;
+} __attribute__((packed));
+
+#define SA_ESP_3DES_MD5_SHA1_LEN sizeof(struct dynamic_sa_esp_3des_md5_sha1)/4
+#define SA_ESP_3DES_MD5_SHA1_CONTENTS 0x3ffca562
+
+/**
+ * Security Association (SA) for IPsec ESP sha512
+ */
+struct dynamic_sa_esp_sha512
+{
+ struct dynamic_sa_ctl ctrl;
+ u32 key[8];
+ u32 inner_digest[16];
+ u32 outter_digest[16];
+ u32 spi;
+ u32 seq[2];
+ u32 seq_mask[4];
+ u32 iv[4];
+ u32 state_ptr;
+ u32 reserved;
+} __attribute__((packed));
+
+#define SA_ESP_SHA512_LEN sizeof(struct dynamic_sa_esp_sha512)/4
+#define SA_ESP_SHA512_CONTENTS 0x3ffe1082
+
+
+/**
+ * Security Association (SA) for IPsec ESP gcm
+ */
+struct dynamic_sa_esp_gcm
+{
+ struct dynamic_sa_ctl ctrl;
+ u32 key[4];
+ u32 inner_digest[4];
+ u32 outter_digest[4];
+ u32 spi;
+ u32 seq[2];
+ u32 seq_mask[4];
+ u32 iv[4];
+ u32 state_ptr;
+ u32 reserved;
+} __attribute__((packed));
+
+#define SA_ESP_GCM_LEN sizeof(struct dynamic_sa_esp_gcm)/4
+#define SA_ESP_GCM_CONTENTS 0x3ffc8442
+/**
+ * Security Association (SA) for IPsec ESP aes sha256
+ */
+struct dynamic_sa_esp_sha256
+{
+ struct dynamic_sa_ctl ctrl;
+ u32 key[8];
+ u32 inner_digest[8];
+ u32 outter_digest[8];
+ u32 spi;
+ u32 seq[2];
+ u32 seq_mask[4];
+ u32 iv[4];
+ u32 state_ptr;
+ u32 reserved;
+} __attribute__((packed));
+
+#define SA_ESP_SHA256_LEN sizeof(struct dynamic_sa_esp_sha256)/4
+#define SA_ESP_SHA256_CONTENTS 0x3ffd0882
+
+/**
+ * Security Association (SA) for MACsec GCM
+ */
+struct dynamic_sa_macsec_gcm
+{
+ struct dynamic_sa_ctl ctrl;
+ u32 key[4];
+ u32 inner_digest[4];
+ u32 spi;
+ u32 seq;
+ u32 seq_mask[4];
+ u32 iv[2];
+ u32 state_ptr;
+ u32 reserved;
+} __attribute__((packed));
+
+#define SA_MACSEC_GCM_LEN sizeof(struct dynamic_sa_macsec_gcm)/4
+#define SA_MACSEC_GCM_CONTENTS 0x27ec0442
+
+/**
+ * Security Association (SA) for DTLS
+ */
+union dynamic_sa_dtls_spi
+{
+ struct {
+ u32 rsv:8;
+ u32 version:16;
+ u32 type:8;
+ }bf;
+ u32 w;
+}__attribute__((packed));
+
+struct dynamic_sa_dtls
+{
+ struct dynamic_sa_ctl ctrl;
+ u32 key[8];
+ u32 inner_digest[5];
+ u32 outter_digest[5];
+ union dynamic_sa_dtls_spi spi;
+ u32 seq[2];
+ u32 seq_mask[4];
+ u32 iv[4];
+ u32 state_ptr;
+ u32 reserved;
+} __attribute__((packed));
+
+#define SA_DTLS_LEN sizeof(struct dynamic_sa_dtls)/4
+#define SA_DTLS_CONTENTS 0x3ffca582
+
+/**
+ *
+ * Security Association (SA) for SSL/TLS ARC4
+ */
+ struct dynamic_sa_ssl_tls_arc4
+{
+ struct dynamic_sa_ctl ctrl;
+ u32 key[4];
+ u32 inner_digest[5];
+ u32 outter_digest[5];
+ union dynamic_sa_dtls_spi spi;
+ u32 seq[2];
+ u32 seq_mask[4];
+ u32 iv[4];
+ u32 state_ptr;
+ struct arc4_ij_ptr ij;
+ u32 arc4_state_ptr;
+} __attribute__((packed));
+
+//typedef struct dynamic_sa_ssl_tls_arc4 dynamic_sa_ssl_tls_arc4_t;
+#define SA_SSL_ARC4_LEN sizeof(struct dynamic_sa_ssl_tls_arc4)/4
+#define SA_SSL_ARC4_CONTENTS 0xfffca542
+
#endif
diff --git a/drivers/crypto/pka_4xx.c b/drivers/crypto/pka_4xx.c
new file mode 100644
index 00000000000..4dea3eb4b3c
--- /dev/null
+++ b/drivers/crypto/pka_4xx.c
@@ -0,0 +1,1333 @@
+/*******************************************************************************
+ *
+ * Copyright (c) 2008 Loc Ho <lho@amcc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ * Detail Description:
+ * This file defines ioctl structures for the Linux CryptoAPI interface. It
+ * provides user space applications accesss into the Linux CryptoAPI
+ * functionalities.
+ *
+ * @file pka4xx.c
+ *
+ * This file provides access and implementation of the high layer API to the
+ * PKA registers.
+ *
+ *******************************************************************************
+ */
+#include <asm/delay.h>
+#include <asm/dcr-native.h>
+#include <linux/irq.h>
+#include "pka_4xx_access.h"
+#include <crypto/pka_4xx.h>
+#include "pka_4xx_firmware.h"
+
+/**
+ * PKA Functions
+ *
+ */
+/* # of time to poll for synchronous operation */
+#define PKA4XX_POLL_DONE_MAX_CNT 5000
+
+#define PKA4XX_CSR_WRITE_RETURN(a, v) \
+ do { \
+ rc = pka4xx_csr_hw_write32((a), (v)); \
+ if (rc != RC_OK) \
+ return rc; \
+ } while(0);
+
+#define PKA4XX_CSR_READ_RETURN(a, v) \
+ do { \
+ rc = pka4xx_csr_hw_read32((a), (v)); \
+ if (rc != RC_OK) \
+ return rc; \
+ } while(0);
+
+
+#define PKA_ALIGN(x, a) do { \
+ (x) += ((a)-1); \
+ (x) &= ~((a)-1); \
+ } while(0);
+#define PKA_ALIGN_RVAL(x, a) (((x) + ((a)-1)) & (~((a)-1)))
+
+static u32 pkt_firmware_sizedw = PKA_FIRMWARE_1_3_SIZEDW;
+static const u32 *pka_firmware = pka_firmware_1_3;
+
+u32 msg_buf[20][10];
+int msg_idx;
+
+u32 pka4xx_pkcp_set_vec(u32 vecA_cnt,
+ u32 *vecA,
+ u32 vecB_cnt,
+ u32 *vecB)
+{
+ u32 addr;
+ int rc, i;
+ u32 val32;
+
+ addr = PKA_RAM_ADDR;
+ /* Set PKA RAM address and load input A - multiplicand */
+ PKA4XX_CSR_WRITE_RETURN(PKA_ALENGTH_ADDR, vecA_cnt);
+ PKA4XX_CSR_WRITE_RETURN(PKA_APTR_ADDR, addr >> 2);
+ PKA4XX_CSR_READ_RETURN(PKA_APTR_ADDR, &val32);
+ PKA4XX_CSR_READ_RETURN(PKA_ALENGTH_ADDR, &val32);
+ for(i = 0; i < vecA_cnt; i++, addr += 4) {
+ PKA4XX_CSR_WRITE_RETURN(addr, vecA[i]);
+ PKA4XX_CSR_READ_RETURN(addr, &val32);
+ LPRINTF(LL_INFO, "addr %08X val %08X", addr, val32);
+ }
+ PKA_ALIGN(addr, 8); /* Align 8-byte */
+ /* Align 8-byte but use 2 as it is DWORD */
+ /* Set PKA RAM address and load for input B - multiplier */
+ PKA4XX_CSR_WRITE_RETURN(PKA_BLENGTH_ADDR, vecB_cnt);
+ PKA4XX_CSR_WRITE_RETURN(PKA_BPTR_ADDR, addr >> 2);
+ PKA4XX_CSR_READ_RETURN(PKA_BPTR_ADDR, &val32);
+ PKA4XX_CSR_READ_RETURN(PKA_BLENGTH_ADDR, &val32);
+ for(i = 0; i < vecB_cnt; i++, addr += 4) {
+ PKA4XX_CSR_WRITE_RETURN(addr, vecB[i]);
+ PKA4XX_CSR_READ_RETURN(addr, &val32);
+ LPRINTF(LL_INFO,
+ "addr %08X val %08X", addr, val32);
+ }
+ PKA_ALIGN(addr, 8); /* Align 8-byte */
+ /* Set PKA RAM address for output C - product */
+ PKA4XX_CSR_WRITE_RETURN(PKA_CPTR_ADDR, addr >> 2);
+
+ return addr;
+}
+
+u32 pka4xx_addsub_set_vec(u32 input_cnt,
+ u32 *addendA,
+ u32 *subtrahend,
+ u32 *addendC)
+{
+ u32 addr;
+ int rc, i;
+
+ addr = PKA_RAM_ADDR;
+ /* Set PKA RAM address and load input A - addendA */
+ PKA4XX_CSR_WRITE_RETURN(PKA_ALENGTH_ADDR, input_cnt);
+ PKA4XX_CSR_WRITE_RETURN(PKA_APTR_ADDR, addr >> 2);
+ for(i = 0; i < input_cnt; i++, addr += 4)
+ PKA4XX_CSR_WRITE_RETURN(addr, addendA[i]);
+ PKA_ALIGN(addr, 8); /* Align 8-byte */
+ /* Set PKA RAM address and load input B - subtrahend */
+ PKA4XX_CSR_WRITE_RETURN(PKA_BPTR_ADDR, addr >> 2);
+ for(i = 0; i < input_cnt; i++, addr += 4)
+ PKA4XX_CSR_WRITE_RETURN(addr, subtrahend[i]);
+ PKA_ALIGN(addr, 8); /* Align 8-byte */
+ /* Set PKA RAM address and load input C - addendC */
+ PKA4XX_CSR_WRITE_RETURN(PKA_CPTR_ADDR, addr >> 2);
+ for(i = 0; i < input_cnt; i++, addr += 4)
+ PKA4XX_CSR_WRITE_RETURN(addr, addendC[i]);
+ PKA_ALIGN(addr, 8); /* Align 8-byte */
+ /* Set PKA RAM address for output - result */
+ PKA4XX_CSR_WRITE_RETURN(PKA_DPTR_ADDR, addr >> 2);
+
+ return addr;
+}
+
+
+u32 pka4xx_shift_set_vec(u32 input_cnt,
+ u32 *input,
+ u8 shift)
+{
+ u32 addr;
+ int rc, i;
+
+ addr = PKA_RAM_ADDR;
+ /* Set PKA RAM address and load input A - input */
+ PKA4XX_CSR_WRITE_RETURN(PKA_ALENGTH_ADDR, input_cnt);
+ PKA4XX_CSR_WRITE_RETURN(PKA_APTR_ADDR, addr >> 2);
+ for(i = 0; i < input_cnt; i++, addr += 4)
+ PKA4XX_CSR_WRITE_RETURN(addr, input[i]);
+ PKA_ALIGN(addr, 8); /* Align 8-byte */
+ /* Set shift value */
+ PKA4XX_CSR_WRITE_RETURN(PKA_SHIFT_ADDR, shift);
+ /* Set PKA RAM address for output - result */
+ PKA4XX_CSR_WRITE_RETURN(PKA_CPTR_ADDR, addr >> 2);
+ /* Save callback for asynchronous operation */
+
+ return addr;
+}
+
+u32 pka4xx_expmod_crt_set_vec(u32 exp_len,
+ u32 *expP,
+ u32 *expQ,
+ u32 mod_inverse_len,
+ u32 *modP,
+ u32 *modQ,
+ u32 *inverseQ,
+ u32 *input)
+{
+ u32 addr;
+ u32 oaddr_start = 0x00000000;
+ u32 Daddr;
+ int i, rc;
+ u32 val32;
+
+ addr = PKA_RAM_ADDR + (oaddr_start << 2);
+
+ PKA4XX_CSR_WRITE_RETURN(PKA_ALENGTH_ADDR, exp_len);
+ PKA4XX_CSR_WRITE_RETURN(PKA_APTR_ADDR, addr >> 2);
+ for(i = 0; i < exp_len; i++, addr += 4) {
+ PKA4XX_CSR_WRITE_RETURN(addr, expP[i]);
+ PKA4XX_CSR_READ_RETURN(addr, &val32);
+ LPRINTF(LL_INFO, "addr 0x%08X expP val 0x%08X\n",
+ addr, val32);
+ }
+ PKA_ALIGN(addr, 8); /* Align 8-byte */
+
+ for(i = 0; i < exp_len; i++, addr += 4) {
+ PKA4XX_CSR_WRITE_RETURN(addr, expQ[i]);
+ PKA4XX_CSR_READ_RETURN(addr, &val32);
+ LPRINTF(LL_INFO, "addr 0x%08X expQ val 0x%08X\n",
+ addr, val32);
+ }
+ PKA_ALIGN(addr, 8); /* Align 8-byte */
+
+ /* Set PKA RAM address and load input modP and modQ */
+ PKA4XX_CSR_WRITE_RETURN(PKA_BLENGTH_ADDR, mod_inverse_len);
+ PKA4XX_CSR_WRITE_RETURN(PKA_BPTR_ADDR, addr >> 2);
+ for(i = 0; i < mod_inverse_len; i++, addr += 4) {
+ PKA4XX_CSR_WRITE_RETURN(addr, modP[i]);
+ PKA4XX_CSR_READ_RETURN(addr, &val32);
+ LPRINTF(LL_INFO, "addr 0x%08X modP val 0x%08X\n",
+ addr, val32);
+ }
+ addr += 8;/*mm */ /* Require 1 extra DWORD */
+ PKA_ALIGN(addr, 8); /* Align 8-byte */
+
+ for(i = 0; i < mod_inverse_len; i++, addr += 4) {
+ PKA4XX_CSR_WRITE_RETURN(addr, modQ[i]);
+ PKA4XX_CSR_READ_RETURN(addr, &val32);
+ LPRINTF(LL_INFO, "addr 0x%08X mod Q val 0x%08X\n",
+ addr, val32);
+ }
+ addr += 4; /* Require 1 extra DWORD */
+ PKA_ALIGN(addr, 8); /* Align 8-byte */
+
+ /* Set PKA RAM address and load input inverseQ */
+ PKA4XX_CSR_WRITE_RETURN(PKA_CPTR_ADDR, addr >> 2);
+ for(i = 0; i < mod_inverse_len; i++, addr += 4) {
+ PKA4XX_CSR_WRITE_RETURN(addr, inverseQ[i]);
+ PKA4XX_CSR_READ_RETURN(addr, &val32);
+ LPRINTF(LL_INFO, "addr 0x%08X invQ val 0x%08X\n",
+ addr, val32);
+ }
+ PKA_ALIGN(addr, 8); /* Align 8-byte */
+
+ /* Set PKA RAM address for output - result */
+ PKA4XX_CSR_WRITE_RETURN(PKA_DPTR_ADDR, addr >> 2);
+ Daddr = addr;
+ for(i = 0; i < (mod_inverse_len<<1); i++, addr += 4) {
+ PKA4XX_CSR_WRITE_RETURN(addr, input[i]);
+ PKA4XX_CSR_READ_RETURN(addr, &val32);
+ LPRINTF(LL_INFO, "addr 0x%08X input val 0x%08X\n",
+ addr, val32);
+ }
+
+ return Daddr;
+}
+
+u32 pka4xx_expmod_set_vec(u32 base_mod_cnt, u32 *base,
+ u32 *modulus,
+ u32 exponent_cnt,
+ u32 *exponent)
+{
+ u32 addr;
+ u32 oaddr_start = 0x00000000;
+ u32 val32;
+ int rc, i;
+
+ addr = PKA_RAM_ADDR + (oaddr_start << 2);
+
+ PKA4XX_CSR_WRITE_RETURN(PKA_ALENGTH_ADDR, exponent_cnt);
+ PKA4XX_CSR_WRITE_RETURN(PKA_APTR_ADDR, addr >> 2);
+ for(i = 0; i < exponent_cnt; i++, addr += 4) {
+ PKA4XX_CSR_WRITE_RETURN(addr, exponent[i]);
+ PKA4XX_CSR_READ_RETURN(addr, &val32);
+ LPRINTF(LL_INFO, "addr 0x%08X A val 0x%08X",
+ addr, val32);
+ }
+ PKA_ALIGN(addr, 8); /* Align 8-byte */
+ /* Set PKA RAM address and load input B - modulus */
+ PKA4XX_CSR_WRITE_RETURN(PKA_BLENGTH_ADDR, base_mod_cnt);
+ PKA4XX_CSR_WRITE_RETURN(PKA_BPTR_ADDR, addr >> 2);
+ for(i = 0; i < base_mod_cnt; i++, addr += 4) {
+ PKA4XX_CSR_WRITE_RETURN(addr, modulus[i]);
+ PKA4XX_CSR_READ_RETURN(addr, &val32);
+ LPRINTF(LL_INFO, "addr 0x%08X B val 0x%08X",
+ addr, val32);
+ }
+ addr += 4; /* Require 1 extra DWORD */
+ PKA_ALIGN(addr, 8); /* Align 8-byte */
+ /* Set PKA RAM address and load input C - base */
+ PKA4XX_CSR_WRITE_RETURN(PKA_CPTR_ADDR, addr >> 2);
+ for(i = 0; i < base_mod_cnt; i++, addr += 4) {
+ PKA4XX_CSR_WRITE_RETURN(addr, base[i]);
+ PKA4XX_CSR_READ_RETURN(addr, &val32);
+ LPRINTF(LL_INFO, "addr 0x%08X C val 0x%08X",
+ addr, val32);
+ }
+ addr += 4; /* Require 1 extra DWORD */
+ PKA_ALIGN(addr, 8); /* Align 8-byte */
+ /* Set PKA RAM address for output - result */
+ PKA4XX_CSR_WRITE_RETURN(PKA_DPTR_ADDR, addr >> 2);
+
+ return addr;
+}
+
+void pka4xx_process_completed_event (struct pka4xx_op *op)
+{
+ int status = RC_OK;
+ pka4xx_cb callback = NULL;
+
+ callback = op->cb;
+ op->cb = NULL;
+ if (callback)
+ (*callback)(op->ctx, status);
+}
+
+void pka4xx_tasklet_cb (unsigned long data)
+{
+ struct list_head *pos;
+ struct list_head *tmp;
+
+ unsigned long flags;
+
+ spin_lock_irqsave(&pka_get_ctx()->lock, flags);
+
+ list_for_each_safe(pos, tmp, &pka_get_ctx()->completed_event_queue) {
+ struct pka4xx_op *item;
+ item = list_entry(pos, struct pka4xx_op, next);
+ list_del(pos);
+ spin_unlock_irqrestore(&pka_get_ctx()->lock,flags);
+ pka4xx_process_completed_event(item);
+ spin_lock_irqsave(&pka_get_ctx()->lock, flags);
+ }
+
+ spin_unlock_irqrestore(&pka_get_ctx()->lock,flags);
+}
+
+static u8 pka4xx_pending_op(void)
+{
+ return pka_get_ctx()->op_head != pka_get_ctx()->op_tail;
+}
+
+static struct pka4xx_op * pka4xx_get_op_item(void)
+{
+ u32 tail;
+
+ if (pka_get_ctx()->op_tail == PKA4XX_PENDING_OP_MAX-1)
+ tail = 0;
+ else
+ tail = pka_get_ctx()->op_tail + 1;
+
+ if (tail == pka_get_ctx()->op_head) {
+ printk(LL_ERR "No free descriptor available for operation "
+ "queuing\n");
+ return NULL;
+ }
+ return &pka_get_ctx()->op[pka_get_ctx()->op_tail];
+}
+
+static int pka4xx_start_op(struct pka4xx_op *op, int interrupt_mode)
+{
+ int rc;
+ u8 restart = 0;
+ u32 Daddr;
+ u32 Caddr, addr;
+ u32 val32;
+
+ if (!interrupt_mode) {
+ restart = !(pka_get_ctx()->op_head != pka_get_ctx()->op_tail);
+
+ if (pka_get_ctx()->op_tail == PKA4XX_PENDING_OP_MAX-1)
+ pka_get_ctx()->op_tail = 0;
+ else
+ pka_get_ctx()->op_tail++;
+ }
+
+ if (restart || interrupt_mode) {
+ switch(op->opcode) {
+ case 0: /* Canceled */
+ return RC_OK;
+ case PKA_FUNCTION_DIV:
+ /* Passing to pka4xx_div_set_vec the order of
+ * dividend_cnt, dividend, divisor_cnt, divisor
+ */
+ LPRINTF(LL_INFO, "Starting async Div PKA operation \n");
+ Caddr = pka4xx_pkcp_set_vec(op->async_pkcp.vecA_cnt,
+ op->async_pkcp.vecA,
+ op->async_pkcp.vecB_cnt,
+ op->async_pkcp.vecB);
+ op->ramC_addr = Caddr;
+ addr = Caddr;
+ addr += (op->async_pkcp.vecB_cnt + 1) * 4;
+ PKA_ALIGN(addr, 8); /* Align 8-byte */
+ /* Select PKA RAM address for output D - quotient */
+ PKA4XX_CSR_WRITE_RETURN(PKA_DPTR_ADDR, addr >> 2);
+ PKA4XX_CSR_READ_RETURN(PKA_DPTR_ADDR, &val32);
+ op->ramD_addr = addr;
+ break;
+ case PKA_FUNCTION_MUL:
+ case PKA_FUNCTION_MOD:
+ case PKA_FUNCTION_ADD:
+ case PKA_FUNCTION_SUB:
+ case PKA_FUNCTION_COMPARE:
+ Caddr = pka4xx_pkcp_set_vec(op->async_pkcp.vecA_cnt,
+ op->async_pkcp.vecA,
+ op->async_pkcp.vecB_cnt,
+ op->async_pkcp.vecB);
+ op->ramC_addr = Caddr;
+ break;
+ case PKA_FUNCTION_ADDSUB:
+ LPRINTF(LL_INFO, "Starting async ADDSUB PKA operation\n");
+ Daddr = pka4xx_addsub_set_vec(op->async_pkcp.vecA_cnt,
+ op->async_pkcp.vecA,
+ op->async_pkcp.vecB,
+ op->async_pkcp.vec_addsub_C);
+ op->ramD_addr = Daddr;
+ break;
+ case PKA_FUNCTION_RSHIFT:
+ case PKA_FUNCTION_LSHIFT:
+ Caddr = pka4xx_shift_set_vec(op->async_pkcp.vecA_cnt,
+ op->async_pkcp.vecA,
+ op->async_pkcp.shift_val);
+ op->ramC_addr = Caddr;
+ break;
+ case PKA_FUNCTION_SEQOP_EXPMOD_ACT2:
+ case PKA_FUNCTION_SEQOP_EXPMOD_ACT4:
+ case PKA_FUNCTION_SEQOP_EXPMOD_VAR:
+ Daddr = pka4xx_expmod_set_vec
+ (op->async_expmod.base_mod_cnt,
+ op->async_expmod.base,
+ op->async_expmod.modulus,
+ op->async_expmod.exp_cnt,
+ op->async_expmod.exp);
+ op->ramD_addr = Daddr;
+ break;
+ case PKA_FUNCTION_SEQOP_EXPMOD_CRT:
+ /* No pending operation before adding this operation
+ * id restart = 1
+ */
+ Daddr = pka4xx_expmod_crt_set_vec
+ (op->async_expmod_crt.exp_len,
+ op->async_expmod_crt.expP,
+ op->async_expmod_crt.expQ,
+ op->async_expmod_crt.mod_inverse_len,
+ op->async_expmod_crt.modP,
+ op->async_expmod_crt.modQ,
+ op->async_expmod_crt.inverseQ,
+ op->async_expmod_crt.input);
+ op->ramD_addr = Daddr;
+ break;
+ default:
+ printk(LL_ERR "No operation in async mode\n");
+ return RC_OK;
+ }
+ if (op->opcode == PKA_FUNCTION_SEQOP_EXPMOD_VAR ||
+ op->opcode == PKA_FUNCTION_SEQOP_EXPMOD_CRT) {
+ PKA4XX_CSR_WRITE_RETURN(PKA_SHIFT_ADDR, op->resultC_cnt);
+ }
+ PKA4XX_CSR_WRITE_RETURN(PKA_FUNCTION_ADDR,
+ PKA_FUNCTION_RUN | op->opcode);
+ }
+ return RC_OK;
+}
+
+irqreturn_t pka4xx_irq_handler(int irq, void * id)
+{
+ int rc;
+ u32 i;
+ u32 val;
+ struct pka4xx_op *op;
+ struct pka4xx_op *next_op;
+ unsigned long flags;
+
+ if (!pka4xx_pending_op()) {
+ LPRINTF(LL_INFO,
+ "No pending op in pka4xx_irq_handler !!\n");
+ return 0;
+ }
+ op = &pka_get_ctx()->op[pka_get_ctx()->op_head];
+ switch(op->opcode) {
+ case 0: /* Canceled */
+ op->cb = NULL;
+ break;
+ case PKA_FUNCTION_COMPARE:
+ PKA4XX_CSR_READ_RETURN(PKA_COMPARE_ADDR, &val);
+ if (val & PKA_COMPARE_EQUAL)
+ *op->resultC_addr = 0;
+ else if (val & PKA_COMPARE_LESSTHAN)
+ *op->resultC_addr = -1;
+ else
+ *op->resultC_addr = 1;
+ break;
+ case PKA_FUNCTION_SEQOP_EXPMOD_ACT2:
+ case PKA_FUNCTION_SEQOP_EXPMOD_ACT4:
+ case PKA_FUNCTION_SEQOP_EXPMOD_VAR:
+ case PKA_FUNCTION_SEQOP_EXPMOD_CRT:
+ for(i = 0; i < op->resultD_cnt; op->ramD_addr += 4) {
+ pka4xx_csr_hw_read32(op->ramD_addr,
+ &op->resultD_addr[i]);
+ msg_buf[msg_idx][i] = op->resultD_addr[i];
+ LPRINTF(LL_INFO, "res expmod 0x%08x",
+ msg_buf[msg_idx][i]);
+ i++;
+ }
+ break;
+ case PKA_FUNCTION_ADDSUB:
+ for(i = 0; i < op->resultD_cnt; op->ramD_addr += 4)
+ pka4xx_csr_hw_read32(op->ramD_addr,
+ &op->resultD_addr[i++]);
+ break;
+ case PKA_FUNCTION_DIV:
+ for(i = 0; i < op->resultC_cnt; op->ramC_addr += 4)
+ pka4xx_csr_hw_read32(op->ramC_addr,
+ &op->resultC_addr[i++]);
+ for(i = 0; i < op->resultD_cnt; op->ramD_addr += 4)
+ pka4xx_csr_hw_read32(op->ramD_addr,
+ &op->resultD_addr[i++]);
+ break;
+ default:
+ for(i = 0; i < op->resultC_cnt; op->ramC_addr += 4)
+ pka4xx_csr_hw_read32(op->ramC_addr,
+ &op->resultC_addr[i++]);
+ break;
+ }
+
+ if (pka_get_ctx()->op_head == PKA4XX_PENDING_OP_MAX - 1)
+ pka_get_ctx()->op_head = 0;
+ else
+ pka_get_ctx()->op_head =
+ (pka_get_ctx()->op_head + 1) % PKA4XX_PENDING_OP_MAX;
+
+ next_op = &pka_get_ctx()->op[pka_get_ctx()->op_head];
+
+ spin_lock_irqsave(&pka_get_ctx()->lock, flags);
+ list_add_tail(&op->next, &pka_get_ctx()->completed_event_queue);
+ spin_unlock_irqrestore(&pka_get_ctx()->lock,flags);
+
+ if (!pka4xx_pending_op()) {
+ LPRINTF(LL_INFO, "No pending op in pka4xx_irq_handler\n");
+ tasklet_schedule(&pka_get_ctx()->tasklet);
+ return IRQ_HANDLED;
+ }
+ pka4xx_start_op(next_op, 1);
+ tasklet_schedule(&pka_get_ctx()->tasklet);
+
+ return IRQ_HANDLED;
+}
+
+static int pka4xx_wait2complete(void)
+{
+ int rc;
+ u32 val;
+ u32 tried = 0;
+
+ do {
+ udelay(1);
+ PKA4XX_CSR_READ_RETURN(PKA_FUNCTION_ADDR, &val);
+ if (!(val & PKA_FUNCTION_RUN)) {
+ return RC_OK;
+ }
+ tried++;
+ } while (tried < PKA4XX_POLL_DONE_MAX_CNT);
+
+ LPRINTF(LL_INFO "Returning busy after tried count = %d", tried);
+ return RC_EBUSY;
+}
+
+int pka4xx_mul(pka4xx_cb cb, void *ctx, u32 *op_id,
+ u32 multiplicand_cnt, u32 *multiplicand,
+ u32 multiplier_cnt, u32 *multiplier,
+ u32 *product)
+{
+ int rc;
+ u32 addr; /* Address of PKA RAM */
+ struct pka4xx_op *pka_op;
+ u32 i;
+
+#ifdef PPR_PKA_DEBUG
+ if (multiplicand_cnt > PKA4XX_VECTOR_MAXSIZE ||
+ multiplier_cnt > PKA4XX_VECTOR_MAXSIZE)
+ return RC_INVALID_PARM;
+#endif
+ if (cb == NULL) {
+ if (pka4xx_pending_op())
+ return RC_EBUSY;
+ pka_op = NULL;
+ } else {
+ pka_op = pka4xx_get_op_item();
+ if (pka_op == NULL)
+ return RC_EBUSY;
+ }
+
+ /* Save callback for asynchronous operation */
+ if (!cb) {
+ addr = pka4xx_pkcp_set_vec(multiplicand_cnt, multiplicand,
+ multiplier_cnt, multiplier);
+ /* Start the operation */
+ PKA4XX_CSR_WRITE_RETURN(PKA_FUNCTION_ADDR,
+ PKA_FUNCTION_RUN | PKA_FUNCTION_MUL);
+ rc = pka4xx_wait2complete();
+
+ if (rc != RC_OK)
+ return rc;
+ multiplicand_cnt += multiplier_cnt;
+ for(i = 0; i < multiplicand_cnt; i++) {
+ PKA4XX_CSR_READ_RETURN(addr, &product[i]);
+ LPRINTF(LL_INFO, "result addr 0x%08x value 0x%08x",
+ addr, product[i]);
+ addr += 4;
+ }
+ return RC_OK;
+ }
+ /* Asynchronous operation */
+ pka_op->opcode = PKA_FUNCTION_MUL;
+ pka_op->cb = cb;
+ pka_op->ctx = ctx;
+ pka_op->resultC_cnt = multiplicand_cnt+multiplier_cnt;
+ pka_op->resultC_addr = product;
+ pka_op->async_pkcp.vecA_cnt = multiplicand_cnt;
+ pka_op->async_pkcp.vecA = multiplicand;
+ pka_op->async_pkcp.vecB_cnt = multiplier_cnt;
+ pka_op->async_pkcp.vecB = multiplier;
+
+ if (op_id)
+ *op_id = pka_op->id;
+ pka4xx_start_op(pka_op, 0);
+ return RC_EINPROGRESS;
+}
+
+int pka4xx_div(pka4xx_cb cb, void *ctx, u32 *op_id,
+ u32 dividend_cnt, u32 *dividend,
+ u32 divisor_cnt, u32 *divisor,
+ u32 *remainder, u32 *quotient)
+{
+ int rc;
+ u32 addr; /* Address of PKA RAM */
+ struct pka4xx_op *pka_op;
+ u32 resultC_addr;
+ u32 resultD_addr;
+ u32 i;
+ u32 val32;
+
+#ifdef PPR_PKA_DEBUG
+ if (dividend_cnt > PKA4XX_VECTOR_MAXSIZE ||
+ divisor_cnt > PKA4XX_VECTOR_MAXSIZE ||
+ divisor_cnt > dividend_cnt)
+ return RC_INVALID_PARM;
+#endif
+ if (cb == NULL) {
+ if (pka4xx_pending_op())
+ return RC_EBUSY;
+ pka_op = NULL;
+ } else {
+ pka_op = pka4xx_get_op_item();
+ if (pka_op == NULL)
+ return RC_EBUSY;
+ }
+
+ /* Save callback for asynchronous operation */
+ if (!cb) {
+ resultC_addr = pka4xx_pkcp_set_vec(dividend_cnt, dividend,
+ divisor_cnt, divisor);
+ addr = resultC_addr;
+ addr += (divisor_cnt + 1) * 4;
+ PKA_ALIGN(addr, 8); /* Align 8-byte */
+ /* Select PKA RAM address for output D - quotient */
+ PKA4XX_CSR_WRITE_RETURN(PKA_DPTR_ADDR, addr >> 2);
+ PKA4XX_CSR_READ_RETURN(PKA_DPTR_ADDR, &val32);
+ resultD_addr = addr;
+
+ /* Start the operation */
+ PKA4XX_CSR_WRITE_RETURN(PKA_FUNCTION_ADDR,
+ PKA_FUNCTION_RUN | PKA_FUNCTION_DIV);
+ rc = pka4xx_wait2complete();
+ if (rc != RC_OK)
+ return rc;
+ for(i = 0; i < divisor_cnt; i++) {
+ PKA4XX_CSR_READ_RETURN(resultC_addr, &remainder[i]);
+ LPRINTF(LL_INFO, "C remaider : 0x%08x",
+ remainder[i]);
+ resultC_addr += 4;
+ }
+ dividend_cnt -= divisor_cnt;
+ for(i = 0; i <= dividend_cnt /* Use = for + 1 */; ) {
+ PKA4XX_CSR_READ_RETURN(resultD_addr,
+ &quotient[i++]);
+ resultD_addr += 4;
+ }
+
+ return RC_OK;
+ }
+ /* Setting params for Asynchronous operation */
+ pka_op->opcode = PKA_FUNCTION_DIV;
+ pka_op->cb = cb;
+ pka_op->ctx = ctx;
+ pka_op->resultC_cnt = divisor_cnt;
+ pka_op->resultD_cnt = dividend_cnt-divisor_cnt+1;
+ pka_op->resultC_addr = remainder;
+ pka_op->resultD_addr = quotient;
+ pka_op->async_pkcp.vecA_cnt = dividend_cnt;
+ pka_op->async_pkcp.vecA = dividend;
+ pka_op->async_pkcp.vecB_cnt = divisor_cnt;
+ pka_op->async_pkcp.vecB = divisor;
+
+ if (op_id)
+ *op_id = pka_op->id;
+ pka4xx_start_op(pka_op, 0);
+ return RC_EINPROGRESS;
+}
+
+int pka4xx_mod(pka4xx_cb cb, void *ctx, u32 *op_id,
+ u32 dividend_cnt, u32 *dividend,
+ u32 divisor_cnt, u32 *divisor,
+ u32 *remainder)
+{
+ int rc;
+ u32 addr; /* Address of PKA RAM */
+ struct pka4xx_op *pka_op;
+ u32 i;
+
+#ifdef PPR_PKA_DEBUG
+ if (dividend_cnt > PKA4XX_VECTOR_MAXSIZE ||
+ divisor_cnt > PKA4XX_VECTOR_MAXSIZE)
+ return RC_INVALID_PARM;
+#endif
+ if (cb == NULL) {
+ if (pka4xx_pending_op())
+ return RC_EBUSY;
+ pka_op = NULL;
+ } else {
+ pka_op = pka4xx_get_op_item();
+ if (pka_op == NULL)
+ return RC_EBUSY;
+ }
+
+ /* Save callback for asynchronous operation */
+ if (!cb) {
+ addr = pka4xx_pkcp_set_vec(dividend_cnt, dividend,
+ divisor_cnt, divisor);
+ /* Start the operation */
+ PKA4XX_CSR_WRITE_RETURN(PKA_FUNCTION_ADDR,
+ PKA_FUNCTION_RUN | PKA_FUNCTION_MOD);
+ rc = pka4xx_wait2complete();
+ if (rc != RC_OK)
+ return rc;
+ for(i = 0; i < divisor_cnt; ) {
+ PKA4XX_CSR_READ_RETURN(addr, &remainder[i++]);
+ addr += 4;
+ }
+ return RC_OK;
+ }
+ /* Asynchronous operation */
+ pka_op->opcode = PKA_FUNCTION_MOD;
+ pka_op->cb = cb;
+ pka_op->ctx = ctx;
+ pka_op->resultC_cnt = divisor_cnt;
+ pka_op->resultC_addr = remainder;
+ pka_op->async_pkcp.vecA_cnt = dividend_cnt;
+ pka_op->async_pkcp.vecA = dividend;
+ pka_op->async_pkcp.vecB_cnt = divisor_cnt;
+ pka_op->async_pkcp.vecB = divisor;
+ if (op_id)
+ *op_id = pka_op->id;
+ pka4xx_start_op(pka_op, 0);
+ return RC_EINPROGRESS;
+}
+
+int pka4xx_add(pka4xx_cb cb, void *ctx, u32 *op_id,
+ u32 addendA_cnt, u32 *addendA,
+ u32 addendB_cnt, u32 *addendB, u32 *sum)
+{
+ int rc;
+ u32 addr; /* Address of PKA RAM */
+ struct pka4xx_op *pka_op;
+ u32 result_len;
+ u32 i;
+
+#ifdef PPR_PKA_DEBUG
+ if (addendA_cnt > PKA4XX_VECTOR_MAXSIZE ||
+ addendB_cnt > PKA4XX_VECTOR_MAXSIZE)
+ return RC_INVALID_PARM;
+#endif
+ result_len = addendA_cnt > addendB_cnt ? (addendA_cnt+1) :
+ (addendB_cnt+1);
+ if (cb == NULL) {
+ if (pka4xx_pending_op())
+ return RC_EBUSY;
+ pka_op = NULL;
+ } else {
+ pka_op = pka4xx_get_op_item();
+ if (pka_op == NULL)
+ return RC_EBUSY;
+ }
+
+ /* Save callback for asynchronous operation */
+ if (!cb) {
+ addr = pka4xx_pkcp_set_vec(addendA_cnt, addendA,
+ addendB_cnt, addendB);
+ /* Start the operation */
+ PKA4XX_CSR_WRITE_RETURN(PKA_FUNCTION_ADDR,
+ PKA_FUNCTION_ADD | PKA_FUNCTION_RUN);
+ rc = pka4xx_wait2complete();
+ if (rc != RC_OK)
+ return rc;
+ for(i = 0; i < result_len; ) {
+ PKA4XX_CSR_READ_RETURN(addr, &sum[i++]);
+ addr += 4;
+ }
+ LPRINTF(LL_INFO, "result = %d,addr = 0x%08x",
+ *sum, (unsigned int)addr);
+ return RC_OK;
+ }
+ /* Asynchronous operation */
+ pka_op->opcode = PKA_FUNCTION_ADD;
+ pka_op->cb = cb;
+ pka_op->ctx = ctx;
+ pka_op->resultC_cnt = result_len;
+ pka_op->resultC_addr = sum;
+ pka_op->async_pkcp.vecA_cnt = addendA_cnt;
+ pka_op->async_pkcp.vecA = addendA;
+ pka_op->async_pkcp.vecB_cnt = addendB_cnt;
+ pka_op->async_pkcp.vecB = addendB;
+
+ if (op_id)
+ *op_id = pka_op->id;
+ pka4xx_start_op(pka_op, 0);
+ return RC_EINPROGRESS;
+}
+
+int pka4xx_sub(pka4xx_cb cb, void *ctx, u32 *op_id,
+ u32 minuend_cnt, u32 *minuend,
+ u32 subtrahend_cnt, u32 *subtrahend, u32 *difference)
+{
+ int rc;
+ u32 addr; /* Address of PKA RAM */
+ struct pka4xx_op *pka_op;
+ u32 result_len;
+ u32 i;
+
+#ifdef PPR_PKA_DEBUG
+ if (minuend_cnt > PKA4XX_VECTOR_MAXSIZE ||
+ subtrahend_cnt > PKA4XX_VECTOR_MAXSIZE)
+ return RC_INVALID_PARM;
+#endif
+ result_len = minuend_cnt > subtrahend_cnt ? minuend_cnt :
+ subtrahend_cnt;
+ if (cb == NULL) {
+ if (pka4xx_pending_op())
+ return RC_EBUSY;
+ pka_op = NULL;
+ } else {
+ pka_op = pka4xx_get_op_item();
+ if (pka_op == NULL)
+ return RC_EBUSY;
+ }
+
+ /* Save callback for asynchronous operation */
+ if (!cb) {
+ addr = pka4xx_pkcp_set_vec(minuend_cnt, minuend,
+ subtrahend_cnt, subtrahend);
+ /* Start the operation */
+ PKA4XX_CSR_WRITE_RETURN(PKA_FUNCTION_ADDR,
+ PKA_FUNCTION_SUB | PKA_FUNCTION_RUN);
+ rc = pka4xx_wait2complete();
+ if (rc != RC_OK)
+ return rc;
+ for(i = 0; i < result_len; ) {
+ PKA4XX_CSR_READ_RETURN(addr, &difference[i++]);
+ addr += 4;
+ }
+ return RC_OK;
+ }
+ /* Asynchronous operation */
+ pka_op->opcode = PKA_FUNCTION_SUB;
+ pka_op->cb = cb;
+ pka_op->ctx = ctx;
+ pka_op->resultC_cnt = result_len;
+ pka_op->resultC_addr = difference;
+ pka_op->async_pkcp.vecA_cnt = minuend_cnt;
+ pka_op->async_pkcp.vecA = minuend;
+ pka_op->async_pkcp.vecB_cnt = subtrahend_cnt;
+ pka_op->async_pkcp.vecB = subtrahend;
+
+ if (op_id)
+ *op_id = pka_op->id;
+ pka4xx_start_op(pka_op, 0);
+
+ return RC_EINPROGRESS;
+}
+
+int pka4xx_addsub(pka4xx_cb cb, void *ctx, u32 *op_id,
+ u32 input_cnt, u32 *addendA,
+ u32 *addendC, u32 *subtrahend, u32 *result)
+{
+ int rc;
+ u32 addr; /* Address of PKA RAM */
+ struct pka4xx_op * pka_op;
+ u32 i;
+
+#ifdef PPR_PKA_DEBUG
+ if (input_cnt > PKA4XX_VECTOR_MAXSIZE)
+ return RC_INVALID_PARM;
+#endif
+ if (cb == NULL) {
+ if (pka4xx_pending_op())
+ return RC_EBUSY;
+ pka_op = NULL;
+ } else {
+ pka_op = pka4xx_get_op_item();
+ if (pka_op == NULL)
+ return RC_EBUSY;
+ }
+
+ /* Save callback for asynchronous operation */
+ if (!cb) {
+ addr = pka4xx_addsub_set_vec(input_cnt, addendA,
+ subtrahend, addendC);
+ /* Start the operation */
+ PKA4XX_CSR_WRITE_RETURN(PKA_FUNCTION_ADDR,
+ PKA_FUNCTION_ADDSUB | PKA_FUNCTION_RUN);
+ rc = pka4xx_wait2complete();
+ if (rc != RC_OK)
+ return rc;
+ for(i = 0; i <= input_cnt /* Use = for + 1 */; ) {
+ PKA4XX_CSR_READ_RETURN(addr, &result[i++]);
+ addr += 4;
+ }
+ return RC_OK;
+ }
+ /* Asynchronous operation */
+ pka_op->opcode = PKA_FUNCTION_ADDSUB;
+ pka_op->cb = cb;
+ pka_op->ctx = ctx;
+ pka_op->resultD_cnt = input_cnt+1;
+ pka_op->resultD_addr = result;
+ pka_op->async_pkcp.vecA_cnt = input_cnt;
+ pka_op->async_pkcp.vecA = addendA;
+ pka_op->async_pkcp.vecB_cnt = 0;
+ pka_op->async_pkcp.vecB = subtrahend;
+ pka_op->async_pkcp.vec_addsub_C = addendC;
+
+ if (op_id)
+ *op_id = pka_op->id;
+ pka4xx_start_op(pka_op, 0);
+
+ return RC_EINPROGRESS;
+}
+
+int pka4xx_rshift(pka4xx_cb cb, void *ctx, u32 *op_id,
+ u32 input_cnt, u32 *input,
+ u8 shift, u32 *result)
+{
+ int rc;
+ u32 addr; /* Address of PKA RAM */
+ struct pka4xx_op *pka_op;
+ u32 i;
+
+#ifdef PPR_PKA_DEBUG
+ if (input_cnt > PKA4XX_VECTOR_MAXSIZE)
+ return RC_INVALID_PARM;
+#endif
+ if (cb == NULL) {
+ if (pka4xx_pending_op())
+ return RC_EBUSY;
+ pka_op = NULL;
+ } else {
+ pka_op = pka4xx_get_op_item();
+ if (pka_op == NULL)
+ return RC_EBUSY;
+ }
+
+ /* Save callback for asynchronous operation */
+ if (!cb) {
+ addr = pka4xx_shift_set_vec(input_cnt, input, shift);
+ /* Start the operation */
+ PKA4XX_CSR_WRITE_RETURN(PKA_FUNCTION_ADDR,
+ PKA_FUNCTION_RSHIFT | PKA_FUNCTION_RUN);
+ rc = pka4xx_wait2complete();
+ if (rc != RC_OK)
+ return rc;
+ for(i = 0; i < input_cnt;) {
+ PKA4XX_CSR_READ_RETURN(addr, &result[i++]);
+ addr += 4;
+ }
+ return RC_OK;
+ }
+ /* Asynchronous operation */
+ pka_op->opcode = PKA_FUNCTION_RSHIFT;
+ pka_op->cb = cb;
+ pka_op->ctx = ctx;
+ pka_op->resultC_cnt = input_cnt;
+ pka_op->resultC_addr = result;
+ pka_op->async_pkcp.vecA_cnt = input_cnt;
+ pka_op->async_pkcp.vecA = input;
+ pka_op->async_pkcp.shift_val = shift;
+
+ if (op_id)
+ *op_id = pka_op->id;
+ pka4xx_start_op(pka_op, 0);
+
+ return RC_EINPROGRESS;
+}
+
+int pka4xx_lshift(pka4xx_cb cb, void *ctx,
+ u32 *op_id, u32 input_cnt,
+ u32 *input, u8 shift,
+ u32 *result)
+{
+ int rc;
+ u32 addr; /* Address of PKA RAM */
+ struct pka4xx_op *pka_op;
+ u32 result_len;
+ u32 i;
+
+#ifdef PPR_PKA_DEBUG
+ if (input_cnt > PKA4XX_VECTOR_MAXSIZE)
+ return RC_INVALID_PARM;
+#endif
+ result_len = shift == 0 ? input_cnt : (input_cnt+1);
+ if (cb == NULL) {
+ if (pka4xx_pending_op())
+ return RC_EBUSY;
+ pka_op = NULL;
+ } else {
+ pka_op = pka4xx_get_op_item();
+ if (pka_op == NULL)
+ return RC_EBUSY;
+ }
+
+ /* Save callback for asynchronous operation */
+ if (!cb) {
+ addr = pka4xx_shift_set_vec(input_cnt, input, shift);
+ /* Start the operation */
+ PKA4XX_CSR_WRITE_RETURN(PKA_FUNCTION_ADDR,
+ PKA_FUNCTION_LSHIFT | PKA_FUNCTION_RUN);
+ rc = pka4xx_wait2complete();
+ if (rc != RC_OK)
+ return rc;
+ for(i = 0; i < result_len; ) {
+ PKA4XX_CSR_READ_RETURN(addr, &result[i++]);
+ addr += 4;
+ }
+ return RC_OK;
+ }
+ /* Asynchronous operation */
+ pka_op->opcode = PKA_FUNCTION_LSHIFT;
+ pka_op->cb = cb;
+ pka_op->ctx = ctx;
+ pka_op->resultC_cnt = result_len;
+ pka_op->resultC_addr = result;
+ pka_op->async_pkcp.vecA_cnt = input_cnt;
+ pka_op->async_pkcp.vecA = input;
+ pka_op->async_pkcp.shift_val = shift;
+
+ if (op_id)
+ *op_id = pka_op->id;
+ pka4xx_start_op(pka_op, 0);
+
+ return RC_EINPROGRESS;
+}
+
+int pka4xx_compare(pka4xx_cb cb, void *ctx, u32 *op_id,
+ u32 input1_cnt, u32 *input1,
+ u32 input2_cnt, u32 *input2,
+ int *result)
+{
+ int rc;
+ struct pka4xx_op *pka_op;
+ u32 val;
+
+#ifdef PPR_PKA_DEBUG
+ if (input1_cnt > PKA4XX_VECTOR_MAXSIZE ||
+ input2_cnt > PKA4XX_VECTOR_MAXSIZE)
+ return RC_INVALID_PARM;
+#endif
+ if (cb == NULL) {
+ if (pka4xx_pending_op())
+ return RC_EBUSY;
+ pka_op = NULL;
+ } else {
+ pka_op = pka4xx_get_op_item();
+ if (pka_op == NULL)
+ return RC_EBUSY;
+ }
+
+ /* Save callback for asynchronous operation */
+ if (!cb) {
+ pka4xx_pkcp_set_vec(input1_cnt, input1, input2_cnt,
+ input2);
+ /* Start the operation */
+ PKA4XX_CSR_WRITE_RETURN(PKA_FUNCTION_ADDR,
+ PKA_FUNCTION_COMPARE | PKA_FUNCTION_RUN);
+ rc = pka4xx_wait2complete();
+ if (rc != RC_OK)
+ return rc;
+ PKA4XX_CSR_READ_RETURN(PKA_COMPARE_ADDR, &val);
+ if (val & PKA_COMPARE_EQUAL)
+ *result = 0;
+ else if (val & PKA_COMPARE_LESSTHAN)
+ *result = -1;
+ else
+ *result = 1;
+ return RC_OK;
+ }
+ /* Asynchronous operation */
+ pka_op->opcode = PKA_FUNCTION_COMPARE;
+ pka_op->cb = cb;
+ pka_op->ctx = ctx;
+ pka_op->resultC_cnt = 1;
+ pka_op->resultC_addr = (u32 *)result;
+ pka_op->async_pkcp.vecA_cnt = input1_cnt;
+ pka_op->async_pkcp.vecA = input1;
+ pka_op->async_pkcp.vecB_cnt = input2_cnt;
+ pka_op->async_pkcp.vecB = input2;
+
+ if (op_id)
+ *op_id = pka_op->id;
+ pka4xx_start_op(pka_op, 0);
+
+ return RC_EINPROGRESS;
+}
+
+int pka4xx_expmod(pka4xx_cb cb, void *ctx, u32 *op_id,
+ u8 odd_pwr_cnt,
+ u32 base_mod_cnt, u32 *base,
+ u32 *modulus,
+ u32 exponent_cnt, u32 *exponent,
+ u32 *result)
+{
+ int rc;
+ u32 addr; /* Address of PKA RAM */
+ struct pka4xx_op * pka_op;
+ u32 cmd;
+ u32 i;
+
+#ifdef PPR_PKA_DEBUG
+ if (odd_pwr_cnt > 16 || odd_pwr_cnt == 0 ||
+ base_mod_cnt > PKA4XX_VECTOR_MAXSIZE ||
+ exponent_cnt > PKA4XX_VECTOR_MAXSIZE)
+ return RC_INVALID_PARM;
+#endif
+
+ if (cb == NULL) {
+ if (pka4xx_pending_op())
+ return RC_EBUSY;
+ pka_op = NULL;
+ } else {
+ pka_op = pka4xx_get_op_item();
+ if (pka_op == NULL)
+ return RC_EBUSY;
+ }
+
+ /* Start the operation */
+ if (odd_pwr_cnt == 2) {
+ cmd = PKA_FUNCTION_SEQOP_EXPMOD_ACT2;
+ } else if (odd_pwr_cnt == 8) {
+ cmd = PKA_FUNCTION_SEQOP_EXPMOD_ACT4;
+ } else {
+ PKA4XX_CSR_WRITE_RETURN(PKA_SHIFT_ADDR, odd_pwr_cnt);
+ cmd = PKA_FUNCTION_SEQOP_EXPMOD_VAR;
+ }
+ /* Save callback for asynchronous operation */
+ if (!cb) {
+ addr = pka4xx_expmod_set_vec(base_mod_cnt, base, modulus,
+ exponent_cnt, exponent);
+ PKA4XX_CSR_WRITE_RETURN(PKA_FUNCTION_ADDR,
+ cmd | PKA_FUNCTION_RUN);
+ rc = pka4xx_wait2complete();
+ if (rc != RC_OK)
+ return rc;
+ for(i = 0; i < base_mod_cnt; i++) {
+ PKA4XX_CSR_READ_RETURN(addr, &result[i]);
+ LPRINTF(LL_INFO, "output = 0x%08x ",
+ result[i]);
+ addr += 4;
+ }
+ return RC_OK;
+
+ }
+ /* Asynchronous operation */
+ pka_op->opcode = cmd;
+ pka_op->cb = cb;
+ pka_op->ctx = ctx;
+ pka_op->resultC_cnt = odd_pwr_cnt; /* Save odd power cnt in here */
+ pka_op->resultD_cnt = base_mod_cnt;
+ pka_op->resultC_addr = NULL;
+ pka_op->resultD_addr = result;
+ pka_op->async_expmod.base = base;
+ pka_op->async_expmod.exp = exponent;
+ pka_op->async_expmod.modulus = modulus;
+ pka_op->async_expmod.base_mod_cnt = base_mod_cnt;
+ pka_op->async_expmod.exp_cnt = exponent_cnt;
+
+ if (op_id)
+ *op_id = pka_op->id;
+ pka4xx_start_op(pka_op, 0);
+
+ return RC_EINPROGRESS;
+}
+
+int pka4xx_expmod_crt(pka4xx_cb cb, void *ctx, u32 *op_id,
+ u8 odd_pwr_cnt,
+ u32 exp_len, u32 *expP, u32 *expQ,
+ u32 mod_inverse_len, u32 *modP, u32 *modQ,
+ u32 *inverseQ, u32 *input,
+ u32 *result)
+{
+ int rc;
+ struct pka4xx_op *pka_op;
+ u32 i;
+ u32 Daddr;
+
+#ifdef PPR_PKA_DEBUG
+ if (exp_len > PKA4XX_VECTOR_MAXSIZE ||
+ mod_inverse_len > PKA4XX_VECTOR_MAXSIZE ||
+ odd_pwr_cnt > 16)
+ return RC_INVALID_PARM;
+#endif
+ if (cb == NULL) {
+ if (pka4xx_pending_op())
+ return RC_EBUSY;
+ pka_op = NULL;
+ } else {
+ pka_op = pka4xx_get_op_item();
+ if (pka_op == NULL)
+ return RC_EBUSY;
+ }
+
+ if (!cb) {
+ Daddr = pka4xx_expmod_crt_set_vec(exp_len, expP, expQ,
+ mod_inverse_len,
+ modP, modQ,
+ inverseQ, input);
+ } else {
+ /* Asynchronous operation */
+ pka_op->opcode = PKA_FUNCTION_SEQOP_EXPMOD_CRT;
+ pka_op->cb = cb;
+ pka_op->ctx = ctx;
+ pka_op->resultD_cnt = mod_inverse_len<<1;
+ pka_op->resultC_cnt = odd_pwr_cnt; /* Use result C cnt for pwr cnt */
+ pka_op->resultD_addr = result;
+ pka_op->async_expmod_crt.expP = expP;
+ pka_op->async_expmod_crt.expQ = expQ;
+ pka_op->async_expmod_crt.modP = modP;
+ pka_op->async_expmod_crt.modQ = modQ;
+ pka_op->async_expmod_crt.inverseQ = inverseQ;
+ pka_op->async_expmod_crt.exp_len = exp_len;
+ pka_op->async_expmod_crt.mod_inverse_len = mod_inverse_len;
+ pka_op->async_expmod_crt.input = input;
+ }
+
+ /* Save callback for asynchronous operation */
+ if (!cb) {
+ /* Start the operation */
+ PKA4XX_CSR_WRITE_RETURN(PKA_SHIFT_ADDR, odd_pwr_cnt);
+ PKA4XX_CSR_WRITE_RETURN(PKA_FUNCTION_ADDR,
+ PKA_FUNCTION_SEQOP_EXPMOD_CRT | PKA_FUNCTION_RUN);
+ rc = pka4xx_wait2complete();
+ if (rc != RC_OK)
+ return rc;
+ for(i = 0; i < (mod_inverse_len<<1); i++) {
+ PKA4XX_CSR_READ_RETURN(Daddr, &result[i]);
+ LPRINTF(LL_INFO, "D addr : 0x%08x val 0x%08x",
+ Daddr, result[i]);
+ Daddr += 4;
+ }
+ return RC_OK;
+ }
+
+ if (op_id)
+ *op_id = pka_op->id;
+ pka4xx_start_op(pka_op, 0);
+
+ return RC_EINPROGRESS;
+}
+
+int pka4xx_hw_init(void)
+{
+ int rc;
+ u32 i;
+ int result;
+ u32 prog_addr;
+
+ printk(LL_INFO "Initializing PKA...\n");
+
+ /* Initialize context variable */
+ for(i = 0; i < PKA4XX_PENDING_OP_MAX; i++) {
+ pka_get_ctx()->op[i].id = i+1;
+ pka_get_ctx()->op[i].opcode = 0;
+ }
+ INIT_LIST_HEAD(&pka_get_ctx()->completed_event_queue);
+
+ /* Load PKA firmware */
+ LPRINTF(LL_INFO, "Loading PKA firmware PKA RAM Addr: 0x%08X size "
+ "(DW): %d...",
+ pka_get_ctx()->csr_paddr,
+ pkt_firmware_sizedw);
+
+ /* Put PKA Sequencer into reset to access firmware area */
+ rc = pka4xx_csr_hw_write32(PKA_SEQ_CTRL_ADDR, PKA_SEQ_CTRL_RESET);
+ if (rc != RC_OK) {
+ LPRINTF(LL_ERR,
+ "Failed to put PKA Sequencer into reset error 0x%08X",
+ rc);
+ return rc;
+ }
+ /* Now, load the firmware */
+ prog_addr = PKA_PROGRAM_ADDR;
+ for(i = 0; i < pkt_firmware_sizedw; i++, prog_addr += 4) {
+ rc = pka4xx_csr_hw_write32(prog_addr, pka_firmware[i]);
+
+ if (rc != RC_OK) {
+ LPRINTF(LL_ERR,
+ "Failed to load PKA firmware error 0x%08X", rc);
+ return rc;
+ }
+ }
+ /* Put PKA Sequencer into normal operation */
+ rc = pka4xx_csr_hw_write32(PKA_SEQ_CTRL_ADDR, 0);
+ if (rc != RC_OK) {
+ LPRINTF(LL_ERR,
+ "Failed to put PKA Sequencer into reset error 0x%08X",
+ rc);
+ return rc;
+ }
+
+ /* Register for interrupt */
+ tasklet_init(&pka_get_ctx()->tasklet,
+ pka4xx_tasklet_cb, (unsigned long)pka_get_ctx()->op);
+
+ result = request_irq(pka_get_ctx()->irq, pka4xx_irq_handler,
+ 0, "PKA", NULL);
+ if (result != 0)
+ return result;
+
+ set_irq_type(pka_get_ctx()->irq, IRQ_TYPE_EDGE_RISING);
+ /* Comment this out to enable interrupt mode -- Now doing only polling mode */
+ /* disable_irq(pka_get_ctx()->irq); */
+
+ return RC_OK;
+}
+
+int pka4xx_hw_deinit(void)
+{
+ disable_irq(pka_get_ctx()->irq);
+ free_irq(pka_get_ctx()->irq, NULL);
+ return RC_OK;
+}
diff --git a/drivers/crypto/pka_4xx_access.c b/drivers/crypto/pka_4xx_access.c
new file mode 100644
index 00000000000..c2452bdc749
--- /dev/null
+++ b/drivers/crypto/pka_4xx_access.c
@@ -0,0 +1,201 @@
+/*******************************************************************************
+ *
+ * Copyright (c) 2008 Loc Ho <lho@amcc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ * Detail Description:
+ * This file defines ioctl structures for the Linux CryptoAPI interface. It
+ * provides user space applications accesss into the Linux CryptoAPI
+ * functionalities.
+ *
+ * @file pka4xx_acccess.c
+ *
+ * This file provides access and implementation of the PKA hardware
+ * under Linux.
+ *
+ *******************************************************************************
+ */
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/of_platform.h>
+#include <linux/interrupt.h>
+#include <asm/io.h>
+#include <crypto/pka_4xx.h>
+#include "pka_4xx_access.h"
+
+#define PKA4XX_VER_STR "0.1"
+
+struct pka4xx_dev {
+ struct pka4xx_ctx ctx;
+ struct resource csr_res;
+ struct resource pka_ram_res;
+};
+
+struct hal_config {
+ struct of_device *ofdev;
+};
+
+static struct pka4xx_dev pka_dev = {
+ .ctx = {
+ 0,
+ { { 1, PKA4XX_RAM_FREE_SIZE/4, 0 },
+ { 0, 0, 0 } },
+ 0, 0
+ }
+};
+
+struct pka4xx_ctx *pka_get_ctx(void)
+{
+ return &pka_dev.ctx;
+}
+
+int pka4xx_config_set(struct hal_config *cfg)
+{
+ struct device_node *pka_np = cfg->ofdev->node;
+ int rc = 0;
+
+ rc = of_address_to_resource(pka_np, 0, &pka_dev.csr_res);
+ if (rc) {
+ LPRINTF(LL_INFO, "error getting address to resource");
+ return -ENODEV;
+ }
+ pka_dev.ctx.csr_paddr = pka_dev.csr_res.start;
+ pka_dev.ctx.csr = ioremap(pka_dev.csr_res.start,
+ pka_dev.csr_res.end - pka_dev.csr_res.start + 1);
+
+ if (pka_dev.ctx.csr == NULL) {
+ LPRINTF(LL_ERR,
+ "unable to ioremap 0x%02X_%08X size %d",
+ (u32) (pka_dev.csr_res.start >> 32),
+ (u32) pka_dev.csr_res.start,
+ (u32) (pka_dev.csr_res.end - pka_dev.csr_res.start + 1));
+ return -ENOMEM;
+ }
+
+ pka_dev.ctx.irq = of_irq_to_resource(pka_np, 0, NULL);
+
+ if (pka_dev.ctx.irq == NO_IRQ) {
+ /* Un-map CSR */
+ iounmap(pka_dev.ctx.csr);
+ pka_dev.ctx.csr = NULL;
+ LPRINTF(LL_ERR, "no irq");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int pka4xx_config_clear(void)
+{
+ iounmap(pka_dev.ctx.csr);
+ return 0;
+}
+
+int pka4xx_csr_hw_read32(u32 reg_addr, u32 *data_val)
+{
+ *data_val = in_le32((volatile unsigned __iomem *)
+ (pka_dev.ctx.csr + reg_addr));
+ return 0;
+}
+
+int pka4xx_csr_hw_write32(u32 reg_addr, u32 data_val)
+{
+ out_le32((volatile unsigned __iomem *) (pka_dev.ctx.csr + reg_addr),
+ data_val);
+ return 0;
+}
+
+/**
+ * Setup Driver with platform registration
+ */
+static int __devinit pka4xx_probe(struct of_device *ofdev,
+ const struct of_device_id *match)
+{
+ struct hal_config hw_cfg;
+ int rc;
+
+ hw_cfg.ofdev = ofdev;
+ rc = pka4xx_config_set(&hw_cfg);
+ if (rc != 0)
+ return rc;
+
+ printk(KERN_INFO "AMCC 4xx PKA v%s @0x%02X_%08X size %d IRQ %d\n",
+ PKA4XX_VER_STR,
+ (u32) (pka_dev.csr_res.start >> 32),
+ (u32) pka_dev.csr_res.start,
+ (u32) (pka_dev.csr_res.end - pka_dev.csr_res.start + 1),
+ pka_dev.ctx.irq);
+
+ rc = pka4xx_hw_init();
+ if (rc < 0) {
+ LPRINTF(LL_ERR, "failed to initialize PKA");
+ goto err;
+ }
+ printk(KERN_INFO "PKA Driver Successfully Initialized\n");
+ return rc;
+
+err:
+ pka4xx_config_clear();
+ return rc;
+}
+
+static int __devexit pka4xx_remove(struct of_device *dev)
+{
+ pka4xx_hw_deinit();
+ pka4xx_config_clear();
+ return 0;
+}
+
+static struct of_device_id pka4xx_match[] = {
+ { .compatible = "ppc4xx-pka", },
+ { .compatible = "amcc,ppc4xx-pka", },
+ { },
+};
+
+static struct of_platform_driver pka4xx_driver = {
+ .name = "ppc4xx-pka",
+ .match_table = pka4xx_match,
+ .probe = pka4xx_probe,
+ .remove = pka4xx_remove,
+};
+
+static int __init mod_init(void)
+{
+ return of_register_platform_driver(&pka4xx_driver);
+}
+
+static void __exit mod_exit(void)
+{
+ of_unregister_platform_driver(&pka4xx_driver);
+}
+
+module_init(mod_init);
+module_exit(mod_exit);
+
+MODULE_DESCRIPTION("AMCC 4xx Public Key Accelerator");
+MODULE_LICENSE("GPL");
+
+EXPORT_SYMBOL_GPL(pka4xx_mul);
+EXPORT_SYMBOL_GPL(pka4xx_div);
+EXPORT_SYMBOL_GPL(pka4xx_mod);
+EXPORT_SYMBOL_GPL(pka4xx_add);
+EXPORT_SYMBOL_GPL(pka4xx_sub);
+EXPORT_SYMBOL_GPL(pka4xx_addsub);
+EXPORT_SYMBOL_GPL(pka4xx_rshift);
+EXPORT_SYMBOL_GPL(pka4xx_lshift);
+EXPORT_SYMBOL_GPL(pka4xx_compare);
+EXPORT_SYMBOL_GPL(pka4xx_expmod);
+EXPORT_SYMBOL_GPL(pka4xx_expmod_crt);
+
diff --git a/drivers/crypto/pka_4xx_access.h b/drivers/crypto/pka_4xx_access.h
new file mode 100644
index 00000000000..b7baa664f8c
--- /dev/null
+++ b/drivers/crypto/pka_4xx_access.h
@@ -0,0 +1,86 @@
+/*******************************************************************************
+ *
+ * Copyright (c) 2008 Loc Ho <lho@amcc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ * Detail Description:
+ * This file defines ioctl structures for the Linux CryptoAPI interface. It
+ * provides user space applications accesss into the Linux CryptoAPI
+ * functionalities.
+ *
+ * @file pka4xx_access.h
+ *
+ * This module provides access to the AMCC SoC PKA hardware under Linux.
+ *
+ *******************************************************************************
+ */
+#ifndef __PKA4XX_ACCESS_H__
+#define __PKA4XX_ACCESS_H__
+
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <asm/errno.h>
+
+#ifndef AHB_BASE_ADDR_LO
+ /* FIXME */
+# define AHB_BASE_ADDR_LO 0
+#endif
+
+/* Debugging Flags */
+
+#ifndef LL_EMERG
+#define LL_EMERG KERN_EMERG
+#define LL_ALERT KERN_ALERT
+#define LL_CRIT KERN_CRIT
+#define LL_ERR KERN_ERR
+#define LL_WARNING KERN_WARNING
+#define LL_NOTICE KERN_NOTICE
+#define LL_INFO KERN_INFO
+#define LL_DEBUG KERN_DEBUG
+#define LL_EXTRADEBUG KERN_DEBUG
+#endif
+
+#define PKA4XX_HDR "PKA4XX: "
+/* #define PKA4XX_DEBUG */
+
+#if !defined(PKA4XX_DEBUG)
+# define LPRINTF(ll, fmt, ...)
+#else
+# define LPRINTF(ll, fmt, ...) \
+ do { \
+ printk(ll PKA4XX_HDR fmt "\n", ##__VA_ARGS__); \
+ } while(0);
+#endif
+
+#ifndef RC_OK
+ #define RC_OK 0
+ #define RC_INVALID_PARM -EINVAL
+ #define RC_NODEV -ENODEV
+ #define RC_NO_IMPLEMENTATION -ENOSYS
+ #define RC_ENOMEM -ENOMEM
+ #define RC_EINPROGRESS -EINPROGRESS
+ #define RC_EALREADY -EALREADY
+ #define RC_EBUSY -EBUSY
+ #define RC_EIO -EIO
+
+ /* Error code base specify to AMCC */
+ #define RC_ERROR_BASE 5000
+ #define RC_HWERROR -(RC_ERROR_BASE+0)
+ #define RC_FATAL -(RC_ERROR_BASE+1)
+#endif /* RC_OK */
+
+#ifndef ASSERT
+ #define ASSERT(x)
+#endif /* ASSERT */
+
+#endif
diff --git a/drivers/crypto/pka_4xx_firmware.h b/drivers/crypto/pka_4xx_firmware.h
new file mode 100644
index 00000000000..64f79dada4b
--- /dev/null
+++ b/drivers/crypto/pka_4xx_firmware.h
@@ -0,0 +1,515 @@
+/*
+// ASMSEQ Generic Sequencer Assembler V1.15
+// EIP28 sequencer firmware including CRT V1.0
+// Written by KLa/AVe, October 2005
+//==========================================
+*/
+#define PKA_FIRMWARE_1_3_SIZEDW 506
+const unsigned int pka_firmware_1_3[PKA_FIRMWARE_1_3_SIZEDW] =
+ { 0x007001C9,
+ 0x008C0200,
+ 0x007000C9,
+ 0x0068401C,
+ 0x00210008,
+ 0x00601FEE,
+ 0x003CA000,
+ 0x00C80011,
+ 0x00210002,
+ 0x003C6000,
+ 0x00C80011,
+ 0x00694018,
+ 0x003C2000,
+ 0x00C80011,
+ 0x003C7000,
+ 0x00CC00A3,
+ 0x002C0001,
+ 0x0070001D,
+ 0x00611FEC,
+ 0x006A4010,
+ 0x006B4014,
+ 0x00621FE6,
+ 0x00631FE2,
+ 0x00684000,
+ 0x00694004,
+ 0x00601FE4,
+ 0x00611FE0,
+ 0x006A4008,
+ 0x006B400C,
+ 0x00621FE8,
+ 0x00631FEA,
+ 0x00C800A8,
+ 0x00601FD2,
+ 0x00611FD0,
+ 0x00631FD6,
+ 0x00691FE2,
+ 0x00621FD4,
+ 0x00004000,
+ 0x00240002,
+ 0x0034FFFE,
+ 0x00601FDC,
+ 0x00054000,
+ 0x006A1FD6,
+ 0x00614010,
+ 0x00624000,
+ 0x00060000,
+ 0x00621FD8,
+ 0x00060000,
+ 0x00624008,
+ 0x00621FDA,
+ 0x00218200,
+ 0x008701AD,
+ 0x006B1FD0,
+ 0x00060000,
+ 0x00070000,
+ 0x00634004,
+ 0x00624008,
+ 0x008701AD,
+ 0x00020000,
+ 0x00681FD6,
+ 0x00691FDA,
+ 0x00068000,
+ 0x008701B9,
+ 0x00681FDC,
+ 0x006A1FD0,
+ 0x00691FD2,
+ 0x006B1FE6,
+ 0x00060000,
+ 0x00270001,
+ 0x0037FFFE,
+ 0x0005C000,
+ 0x00611FE4,
+ 0x00681FD8,
+ 0x00621FE0,
+ 0x00601FE8,
+ 0x00601FEA,
+ 0x008500A7,
+ 0x006A1FDC,
+ 0x00691FD6,
+ 0x00681FDA,
+ 0x008701B9,
+ 0x00691FD8,
+ 0x00681FD6,
+ 0x00068000,
+ 0x008701B9,
+ 0x00691FD2,
+ 0x006A1FD0,
+ 0x00611FE4,
+ 0x00621FE0,
+ 0x008500A7,
+ 0x00681FD6,
+ 0x006A1FD8,
+ 0x00691FE2,
+ 0x00604004,
+ 0x00624000,
+ 0x00624008,
+ 0x00044000,
+ 0x0030FFFE,
+ 0x00500000,
+ 0x00500000,
+ 0x00064000,
+ 0x0032FFFE,
+ 0x00520000,
+ 0x00520000,
+ 0x00250001,
+ 0x00614010,
+ 0x00614014,
+ 0x00218020,
+ 0x008701AD,
+ 0x00681FD0,
+ 0x00691FE2,
+ 0x00604004,
+ 0x00614010,
+ 0x00614014,
+ 0x00250001,
+ 0x002A0002,
+ 0x008C0400,
+ 0x004B8000,
+ 0x006A1FD8,
+ 0x003FFFFF,
+ 0x0015C000,
+ 0x00064000,
+ 0x00624008,
+ 0x00218010,
+ 0x008701AD,
+ 0x00691FD4,
+ 0x006A1FDA,
+ 0x00614004,
+ 0x00624008,
+ 0x00218001,
+ 0x008701AD,
+ 0x00624000,
+ 0x00604004,
+ 0x00691FD8,
+ 0x006B1FE2,
+ 0x00614008,
+ 0x0007C000,
+ 0x00634010,
+ 0x00218200,
+ 0x008701AD,
+ 0x00624008,
+ 0x00691FD8,
+ 0x006B1FDC,
+ 0x00614000,
+ 0x00070000,
+ 0x00681FE2,
+ 0x00634004,
+ 0x00604010,
+ 0x00218001,
+ 0x008701AD,
+ 0x00624004,
+ 0x006A1FD6,
+ 0x00040000,
+ 0x00624000,
+ 0x00624008,
+ 0x00604014,
+ 0x00218018,
+ 0x008701AD,
+ 0x00691FD4,
+ 0x00621FEA,
+ 0x00611FE8,
+ 0x00200001,
+ 0x008001E9,
+ 0x007000C8,
+ 0x00200013,
+ 0x008C0400,
+ 0x008001F7,
+ 0x00611FDE,
+ 0x007000C8,
+ 0x00691FE0,
+ 0x006A1FE2,
+ 0x00004000,
+ 0x008701B9,
+ 0x00020000,
+ 0x0030FFFE,
+ 0x00490000,
+ 0x00310001,
+ 0x00200003,
+ 0x00D401DA,
+ 0x00200007,
+ 0x008C0400,
+ 0x006B4024,
+ 0x000B8000,
+ 0x00C801DA,
+ 0x0006C000,
+ 0x00270002,
+ 0x00631FE2,
+ 0x00681FEA,
+ 0x002E0001,
+ 0x0004C000,
+ 0x002C0001,
+ 0x00601FF8,
+ 0x0032FFFE,
+ 0x00520000,
+ 0x00520000,
+ 0x00691FE4,
+ 0x006A1FE6,
+ 0x00004000,
+ 0x008701B9,
+ 0x00681FE0,
+ 0x00210010,
+ 0x00220000,
+ 0x0030FFFE,
+ 0x00480000,
+ 0x00230001,
+ 0x00330001,
+ 0x00D400D3,
+ 0x0033FFFF,
+ 0x00070000,
+ 0x00330001,
+ 0x003A0001,
+ 0x00320001,
+ 0x002DFFFF,
+ 0x00CC00CD,
+ 0x00621FFC,
+ 0x00018000,
+ 0x00072000,
+ 0x00681FE0,
+ 0x00631FFE,
+ 0x0030FFFE,
+ 0x00240002,
+ 0x00480000,
+ 0x00072000,
+ 0x006B1FFE,
+ 0x002E0001,
+ 0x00078000,
+ 0x00681FE0,
+ 0x00210010,
+ 0x00220000,
+ 0x0030FFFE,
+ 0x00480000,
+ 0x00330001,
+ 0x00D400ED,
+ 0x0033FFFF,
+ 0x00070000,
+ 0x00330001,
+ 0x003A0001,
+ 0x00320001,
+ 0x002DFFFF,
+ 0x00CC00E7,
+ 0x00621FFE,
+ 0x00691FE4,
+ 0x008C0400,
+ 0x00684024,
+ 0x006A4028,
+ 0x00180000,
+ 0x00D001C6,
+ 0x003E000F,
+ 0x00238000,
+ 0x00138000,
+ 0x00631FF2,
+ 0x00320003,
+ 0x00360002,
+ 0x00030000,
+ 0x0033FFFE,
+ 0x00078000,
+ 0x00631FF4,
+ 0x004BC000,
+ 0x00084000,
+ 0x00631FF0,
+ 0x0030FFFE,
+ 0x00048000,
+ 0x00601FF6,
+ 0x0018C000,
+ 0x003C0001,
+ 0x00C801D1,
+ 0x00681FF8,
+ 0x006A1FE2,
+ 0x002E0001,
+ 0x00068000,
+ 0x008701B1,
+ 0x006A1FE2,
+ 0x00681FF8,
+ 0x00691FE8,
+ 0x00048000,
+ 0x002A0001,
+ 0x008701B9,
+ 0x00691FE0,
+ 0x00681FF8,
+ 0x00614004,
+ 0x00691FE2,
+ 0x00604000,
+ 0x00290001,
+ 0x00614014,
+ 0x00054000,
+ 0x00250001,
+ 0x006A1FF8,
+ 0x00614010,
+ 0x00064000,
+ 0x00260003,
+ 0x00624008,
+ 0x00218200,
+ 0x008701AD,
+ 0x006B1FEC,
+ 0x00018000,
+ 0x00681FEA,
+ 0x006A1FE2,
+ 0x002B0002,
+ 0x00D0013F,
+ 0x00631FFA,
+ 0x008701B9,
+ 0x00010000,
+ 0x0087019A,
+ 0x00681FF8,
+ 0x00260001,
+ 0x00068000,
+ 0x00048000,
+ 0x00010000,
+ 0x006A1FE2,
+ 0x00240001,
+ 0x00048000,
+ 0x008701B9,
+ 0x00691FEA,
+ 0x0087019A,
+ 0x006B1FFA,
+ 0x002B0001,
+ 0x00631FFA,
+ 0x00D40133,
+ 0x00681FEA,
+ 0x00691FF8,
+ 0x00058000,
+ 0x00058000,
+ 0x00250002,
+ 0x008701B9,
+ 0x00200000,
+ 0x0080017A,
+ 0x00681FEA,
+ 0x00010000,
+ 0x0087019A,
+ 0x00200000,
+ 0x00040000,
+ 0x006B1FF0,
+ 0x00691FF2,
+ 0x00174000,
+ 0x00C80150,
+ 0x00240001,
+ 0x00691FEC,
+ 0x00084000,
+ 0x000C4000,
+ 0x00D0017A,
+ 0x00601FF0,
+ 0x00300001,
+ 0x00D40155,
+ 0x00601FFA,
+ 0x00347FFF,
+ 0x00C80161,
+ 0x00601FFA,
+ 0x00681FEA,
+ 0x00010000,
+ 0x0087019A,
+ 0x00681FFA,
+ 0x00300001,
+ 0x00800158,
+ 0x00681FF0,
+ 0x00300001,
+ 0x00D40162,
+ 0x00347FFF,
+ 0x00240003,
+ 0x00691FE2,
+ 0x00250001,
+ 0x00072000,
+ 0x00691FEA,
+ 0x00058000,
+ 0x006A1FE2,
+ 0x00681FEA,
+ 0x0087019A,
+ 0x00681FF0,
+ 0x00300001,
+ 0x00D00176,
+ 0x00601FF0,
+ 0x00681FEA,
+ 0x00010000,
+ 0x0087019A,
+ 0x0080016E,
+ 0x006B1FF4,
+ 0x004BC000,
+ 0x00631FF0,
+ 0x00200000,
+ 0x00691FF2,
+ 0x00310001,
+ 0x00D40189,
+ 0x006B1FF6,
+ 0x002B0002,
+ 0x00D40183,
+ 0x00140000,
+ 0x00C8018D,
+ 0x00800154,
+ 0x00631FF6,
+ 0x006B1FF4,
+ 0x002B0002,
+ 0x00631FF4,
+ 0x004BC000,
+ 0x00631FF0,
+ 0x00611FF2,
+ 0x00140000,
+ 0x00CC014A,
+ 0x00800146,
+ 0x00681FF8,
+ 0x00691FEA,
+ 0x008701B9,
+ 0x00048000,
+ 0x008701B1,
+ 0x00681FEA,
+ 0x00691FF8,
+ 0x006A1FE2,
+ 0x008701A5,
+ 0x002EFFFF,
+ 0x00621FE2,
+ 0x00200001,
+ 0x008001DA,
+ 0x00604000,
+ 0x00624010,
+ 0x00614004,
+ 0x00624014,
+ 0x00691FF8,
+ 0x00614008,
+ 0x00218001,
+ 0x008C0480,
+ 0x00BC01BE,
+ 0x0061401C,
+ 0x00691FF8,
+ 0x00614008,
+ 0x00624010,
+ 0x00691FE0,
+ 0x00614000,
+ 0x002107FF,
+ 0x00614004,
+ 0x0060400C,
+ 0x00218004,
+ 0x008C0480,
+ 0x00BC01BE,
+ 0x0061401C,
+ 0x008B2000,
+ 0x00010000,
+ 0x0030FFFE,
+ 0x00500000,
+ 0x00500000,
+ 0x00500000,
+ 0x00500000,
+ 0x00300002,
+ 0x002A0002,
+ 0x00614000,
+ 0x00624010,
+ 0x00604008,
+ 0x00218808,
+ 0x008001AD,
+ 0x00707FC8,
+ 0x0070001D,
+ 0x00200000,
+ 0x00010000,
+ 0x00220001,
+ 0x008701B9,
+ 0x0020000F,
+ 0x008001DA,
+ 0x00681FEA,
+ 0x00691FE2,
+ 0x0030FFFE,
+ 0x00500001,
+ 0x0031FFFF,
+ 0x002DFFFF,
+ 0x00500000,
+ 0x002DFFFF,
+ 0x00CC01CC,
+ 0x00200005,
+ 0x008001DA,
+ 0x00691FE8,
+ 0x00681FEA,
+ 0x001C4000,
+ 0x00C801D9,
+ 0x0070001D,
+ 0x00681FEA,
+ 0x006A1FE2,
+ 0x008701B9,
+ 0x00200009,
+ 0x00691FEE,
+ 0x0070001F,
+ 0x003D9000,
+ 0x00CC01E0,
+ 0x00691FDE,
+ 0x00890000,
+ 0x00300002,
+ 0x00D001E8,
+ 0x00601FEC,
+ 0x00681FEA,
+ 0x006A1FE2,
+ 0x00010000,
+ 0x008701B9,
+ 0x00681FEC,
+ 0x0030FFFE,
+ 0x006A1FE4,
+ 0x006B1FE6,
+ 0x00624000,
+ 0x00634010,
+ 0x006A1FE0,
+ 0x006B1FE2,
+ 0x00624004,
+ 0x00634014,
+ 0x006A1FE8,
+ 0x006B1FEE,
+ 0x00624008,
+ 0x00377FFF,
+ 0x008C0400,
+ 0x0063401C,
+ 0x006440C9,
+ 0x00800001,
+ 0x00780003
+};