aboutsummaryrefslogtreecommitdiff
path: root/crypto
diff options
context:
space:
mode:
Diffstat (limited to 'crypto')
-rw-r--r--crypto/Kconfig6
-rw-r--r--crypto/Makefile1
-rw-r--r--crypto/async_tx/async_memcpy.c4
-rw-r--r--crypto/async_tx/async_r6recov.c292
-rw-r--r--crypto/async_tx/async_xor.c3
-rw-r--r--crypto/cryptodev.c2309
-rw-r--r--crypto/md5.c17
-rw-r--r--crypto/sha1_generic.c17
-rw-r--r--crypto/sha256_generic.c18
-rw-r--r--crypto/sha512_generic.c23
-rw-r--r--crypto/shash.c19
-rw-r--r--crypto/testmgr.c403
-rw-r--r--crypto/testmgr.h1644
13 files changed, 4713 insertions, 43 deletions
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 26b5dd0cb56..f3072993874 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -247,6 +247,12 @@ config CRYPTO_FPU
tristate
select CRYPTO_BLKCIPHER
select CRYPTO_MANAGER
+config CRYPTO_CRYPTODEV
+ tristate "Cryptodev (/dev/crypto) interface"
+ depends on CRYPTO
+ help
+ Device /dev/crypto gives userspace programs access to
+ kernel crypto algorithms(Sync and Async Support).
comment "Hash modes"
diff --git a/crypto/Makefile b/crypto/Makefile
index 9e8f61908cb..a327f18c8df 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -31,6 +31,7 @@ obj-$(CONFIG_CRYPTO_PCOMP) += pcompress.o
cryptomgr-objs := algboss.o testmgr.o
obj-$(CONFIG_CRYPTO_MANAGER2) += cryptomgr.o
+obj-$(CONFIG_CRYPTO_CRYPTODEV) += cryptodev.o
obj-$(CONFIG_CRYPTO_HMAC) += hmac.o
obj-$(CONFIG_CRYPTO_VMAC) += vmac.o
obj-$(CONFIG_CRYPTO_XCBC) += xcbc.o
diff --git a/crypto/async_tx/async_memcpy.c b/crypto/async_tx/async_memcpy.c
index 0ec1fb69d4e..10746a6afd6 100644
--- a/crypto/async_tx/async_memcpy.c
+++ b/crypto/async_tx/async_memcpy.c
@@ -64,6 +64,9 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
dma_src = dma_map_page(device->dev, src, src_offset, len,
DMA_TO_DEVICE);
+ if(&submit->depend_tx)
+ async_tx_quiesce(&submit->depend_tx);
+
tx = device->device_prep_dma_memcpy(chan, dma_dest, dma_src,
len, dma_prep_flags);
}
@@ -71,6 +74,7 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
if (tx) {
pr_debug("%s: (async) len: %zu\n", __func__, len);
async_tx_submit(chan, tx, submit);
+
} else {
void *dest_buf, *src_buf;
pr_debug("%s: (sync) len: %zu\n", __func__, len);
diff --git a/crypto/async_tx/async_r6recov.c b/crypto/async_tx/async_r6recov.c
new file mode 100644
index 00000000000..028f57ab191
--- /dev/null
+++ b/crypto/async_tx/async_r6recov.c
@@ -0,0 +1,292 @@
+/*
+ * Copyright(c) 2007 Yuri Tikhonov <yur@emcraft.com>
+ *
+ * Developed for DENX Software Engineering GmbH
+ *
+ * Asynchronous RAID-6 recovery calculations ASYNC_TX API.
+ *
+ * based on async_xor.c code written by:
+ * Dan Williams <dan.j.williams@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 51
+ * Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/raid/xor.h>
+#include <linux/async_tx.h>
+
+#include "../drivers/md/raid6.h"
+
+/**
+ * async_r6_dd_recov - attempt to calculate two data misses using dma engines.
+ * @disks: number of disks in the RAID-6 array
+ * @bytes: size of strip
+ * @faila: first failed drive index
+ * @failb: second failed drive index
+ * @ptrs: array of pointers to strips (last two must be p and q, respectively)
+ * @flags: ASYNC_TX_ACK, ASYNC_TX_DEP_ACK
+ * @depend_tx: depends on the result of this transaction.
+ * @cb: function to call when the operation completes
+ * @cb_param: parameter to pass to the callback routine
+ */
+struct dma_async_tx_descriptor *
+async_r6_dd_recov(int disks, size_t bytes, int faila, int failb,
+ struct page **ptrs, enum async_tx_flags flags,
+ struct dma_async_tx_descriptor *depend_tx,
+ dma_async_tx_callback cb, void *cb_param)
+{
+ struct dma_async_tx_descriptor *tx = NULL;
+ struct page *lptrs[disks];
+ unsigned char lcoef[disks-4];
+ int i = 0, k = 0, fc = -1;
+ uint8_t bc[2];
+ dma_async_tx_callback lcb = NULL;
+ void *lcb_param = NULL;
+
+ /* Assume that failb > faila */
+ if (faila > failb) {
+ fc = faila;
+ faila = failb;
+ failb = fc;
+ }
+
+ /* Try to compute missed data asynchronously. */
+ if (disks == 4) {
+ /*
+ * Pxy and Qxy are zero in this case so we already have
+ * P+Pxy and Q+Qxy in P and Q strips respectively.
+ */
+ tx = depend_tx;
+ lcb = cb;
+ lcb_param = cb_param;
+ goto do_mult;
+ }
+
+ /*
+ * (1) Calculate Qxy and Pxy:
+ * Qxy = A(0)*D(0) + ... + A(n-1)*D(n-1) + A(n+1)*D(n+1) + ... +
+ * A(m-1)*D(m-1) + A(m+1)*D(m+1) + ... + A(disks-1)*D(disks-1),
+ * where n = faila, m = failb.
+ */
+ for (i = 0, k = 0; i < disks - 2; i++) {
+ if (i != faila && i != failb) {
+ lptrs[k] = ptrs[i];
+ lcoef[k] = raid6_gfexp[i];
+ k++;
+ }
+ }
+
+ lptrs[k] = ptrs[faila];
+ lptrs[k+1] = ptrs[failb];
+ tx = async_pq(lptrs, lcoef, 0, k, bytes,
+ ASYNC_TX_PQ_ZERO_P | ASYNC_TX_PQ_ZERO_Q |
+ ASYNC_TX_ASYNC_ONLY, depend_tx, NULL, NULL);
+ if (!tx) {
+ /* Here may go to the synchronous variant */
+ if (flags & ASYNC_TX_ASYNC_ONLY)
+ return NULL;
+ goto ddr_sync;
+ }
+
+ /*
+ * The following operations will 'damage' P/Q strips;
+ * so now we condemned to move in an asynchronous way.
+ */
+
+ /* (2) Calculate Q+Qxy */
+ lptrs[0] = ptrs[failb];
+ lptrs[1] = ptrs[disks-1];
+ lptrs[2] = NULL;
+ tx = async_pq(lptrs, NULL, 0, 1, bytes, ASYNC_TX_DEP_ACK,
+ tx, NULL, NULL);
+
+ /* (3) Calculate P+Pxy */
+ lptrs[0] = ptrs[faila];
+ lptrs[1] = ptrs[disks-2];
+ lptrs[2] = NULL;
+ tx = async_pq(lptrs, NULL, 0, 1, bytes, ASYNC_TX_DEP_ACK,
+ tx, NULL, NULL);
+
+do_mult:
+ /*
+ * (4) Compute (P+Pxy) * Bxy. Compute (Q+Qxy) * Cxy. XOR them and get
+ * faila.
+ * B = (2^(y-x))*((2^(y-x) + {01})^(-1))
+ * C = (2^(-x))*((2^(y-x) + {01})^(-1))
+ * B * [p] + C * [q] -> [failb]
+ */
+ bc[0] = raid6_gfexi[failb-faila];
+ bc[1] = raid6_gfinv[raid6_gfexp[faila]^raid6_gfexp[failb]];
+
+ lptrs[0] = ptrs[disks - 2];
+ lptrs[1] = ptrs[disks - 1];
+ lptrs[2] = NULL;
+ lptrs[3] = ptrs[failb];
+ tx = async_pq(lptrs, bc, 0, 2, bytes,
+ ASYNC_TX_PQ_ZERO_Q | ASYNC_TX_DEP_ACK,
+ tx, NULL, NULL);
+
+ /* (5) Compute failed Dy using recovered [failb] and P+Pnm in [p] */
+ lptrs[0] = ptrs[disks-2];
+ lptrs[1] = ptrs[failb];
+ lptrs[2] = ptrs[faila];
+ lptrs[3] = NULL;
+ tx = async_pq(lptrs, NULL, 0, 2, bytes,
+ ASYNC_TX_PQ_ZERO_P | ASYNC_TX_DEP_ACK,
+ tx, lcb, lcb_param);
+
+ if (disks == 4)
+ return tx;
+
+ /* (6) Restore the parities back */
+ flags |= ASYNC_TX_DEP_ACK;
+
+ memcpy(lptrs, ptrs, (disks - 2) * sizeof(struct page *));
+ lptrs[disks - 2] = ptrs[disks-2];
+ lptrs[disks - 1] = ptrs[disks-1];
+ return async_gen_syndrome(lptrs, 0, disks - 2, bytes, flags,
+ tx, cb, cb_param);
+
+ddr_sync:
+ {
+ void **sptrs = (void **)lptrs;
+ /*
+ * Failed to compute asynchronously, do it in
+ * synchronous manner
+ */
+
+ /* wait for any prerequisite operations */
+ async_tx_quiesce(&depend_tx);
+
+ i = disks;
+ while (i--)
+ sptrs[i] = kmap(ptrs[i]);
+ raid6_2data_recov(disks, bytes, faila, failb, sptrs);
+ i = disks;
+ while (i--)
+ kunmap(ptrs[i]);
+
+ async_tx_sync_epilog(cb, cb_param);
+ }
+
+ return tx;
+}
+EXPORT_SYMBOL_GPL(async_r6_dd_recov);
+
+/**
+ * async_r6_dp_recov - attempt to calculate one data miss using dma engines.
+ * @disks: number of disks in the RAID-6 array
+ * @bytes: size of strip
+ * @faila: failed drive index
+ * @ptrs: array of pointers to strips (last two must be p and q, respectively)
+ * @flags: ASYNC_TX_ACK, ASYNC_TX_DEP_ACK
+ * @depend_tx: depends on the result of this transaction.
+ * @cb: function to call when the operation completes
+ * @cb_param: parameter to pass to the callback routine
+ */
+struct dma_async_tx_descriptor *
+async_r6_dp_recov(int disks, size_t bytes, int faila, struct page **ptrs,
+ enum async_tx_flags flags, struct dma_async_tx_descriptor *depend_tx,
+ dma_async_tx_callback cb, void *cb_param)
+{
+ struct dma_async_tx_descriptor *tx = NULL;
+ struct page *lptrs[disks];
+ unsigned char lcoef[disks-2];
+ int i = 0, k = 0;
+
+ /* Try compute missed data asynchronously. */
+
+ /*
+ * (1) Calculate Qn + Q:
+ * Qn = A(0)*D(0) + .. + A(n-1)*D(n-1) + A(n+1)*D(n+1) + ..,
+ * where n = faila;
+ * then subtract Qn from Q and place result to Pn.
+ */
+ for (i = 0; i < disks - 2; i++) {
+ if (i != faila) {
+ lptrs[k] = ptrs[i];
+ lcoef[k++] = raid6_gfexp[i];
+ }
+ }
+ lptrs[k] = ptrs[disks-1]; /* Q-parity */
+ lcoef[k++] = 1;
+
+ lptrs[k] = NULL;
+ lptrs[k+1] = ptrs[disks-2];
+
+ tx = async_pq(lptrs, lcoef, 0, k, bytes,
+ ASYNC_TX_PQ_ZERO_Q | ASYNC_TX_ASYNC_ONLY,
+ depend_tx, NULL, NULL);
+ if (!tx) {
+ if (flags & ASYNC_TX_ASYNC_ONLY)
+ return NULL;
+ goto dpr_sync;
+ }
+
+ /*
+ * (2) Compute missed Dn:
+ * Dn = (Q + Qn) * [A(n)^(-1)]
+ */
+ lptrs[0] = ptrs[disks-2];
+ lptrs[1] = NULL;
+ lptrs[2] = ptrs[faila];
+ return async_pq(lptrs, (u8 *)&raid6_gfexp[faila ? 255-faila : 0], 0, 1,
+ bytes, ASYNC_TX_DEP_ACK | ASYNC_TX_PQ_ZERO_Q,
+ tx, cb, cb_param);
+
+dpr_sync:
+ {
+ void **sptrs = (void **) lptrs;
+ /*
+ * Failed to compute asynchronously, do it in
+ * synchronous manner
+ */
+
+ /* wait for any prerequisite operations */
+ async_tx_quiesce(&depend_tx);
+
+ i = disks;
+ while (i--)
+ sptrs[i] = kmap(ptrs[i]);
+ raid6_datap_recov(disks, bytes, faila, (void *)sptrs);
+ i = disks;
+ while (i--)
+ kunmap(ptrs[i]);
+
+ async_tx_sync_epilog(cb, cb_param);
+ }
+
+ return tx;
+}
+EXPORT_SYMBOL_GPL(async_r6_dp_recov);
+
+static int __init async_r6recov_init(void)
+{
+ return 0;
+}
+
+static void __exit async_r6recov_exit(void)
+{
+ do { } while (0);
+}
+
+module_init(async_r6recov_init);
+module_exit(async_r6recov_exit);
+
+MODULE_AUTHOR("Yuri Tikhonov <yur@emcraft.com>");
+MODULE_DESCRIPTION("asynchronous RAID-6 recovery api");
+MODULE_LICENSE("GPL");
diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c
index 079ae8ca590..027b2e8411d 100644
--- a/crypto/async_tx/async_xor.c
+++ b/crypto/async_tx/async_xor.c
@@ -49,6 +49,7 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list,
/* map the dest bidrectional in case it is re-used as a source */
dma_dest = dma_map_page(dma->dev, dest, offset, len, DMA_BIDIRECTIONAL);
+
for (i = 0; i < src_cnt; i++) {
/* only map the dest once */
if (!src_list[i])
@@ -84,6 +85,7 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list,
dma_flags |= DMA_PREP_INTERRUPT;
if (submit->flags & ASYNC_TX_FENCE)
dma_flags |= DMA_PREP_FENCE;
+
/* Since we have clobbered the src_list we are committed
* to doing this asynchronously. Drivers force forward progress
* in case they can not provide a descriptor
@@ -104,6 +106,7 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list,
}
async_tx_submit(chan, tx, submit);
+
submit->depend_tx = tx;
if (src_cnt > xor_src_cnt) {
diff --git a/crypto/cryptodev.c b/crypto/cryptodev.c
new file mode 100644
index 00000000000..205e13949f5
--- /dev/null
+++ b/crypto/cryptodev.c
@@ -0,0 +1,2309 @@
+/**************************************************************************
+ * Linux CryptoAPI user space interface module
+ *
+ * Copyright (c) 2008 Applied Micro Circuits Corporation.
+ * All rights reserved. Shasi Pulijala <spulijala@amcc.com>
+ * Loc Ho <lho@amcc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * @file cryptodev.h
+ *
+ * This file defines ioctl structures for the Linux CryptoAPI interface. It
+ * provides user space applications accesss into the Linux CryptoAPI
+ * functionalities.
+ *
+ **************************************************************************
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/pagemap.h>
+#include <linux/miscdevice.h>
+#include <linux/ioctl.h>
+#include <linux/scatterlist.h>
+#include <linux/cryptodev.h>
+#include <linux/aio.h>
+#include <linux/mutex.h>
+#include <linux/interrupt.h>
+#include <asm/atomic.h>
+#include <crypto/aead.h>
+#include <crypto/authenc.h>
+#include <crypto/hash.h>
+#include <crypto/pka_4xx.h>
+#include <crypto/internal/hash.h>
+#include <linux/proc_fs.h>
+
+/* /dev/crypto is a char block device with major 10 and minor below */
+#define CRYPTODEV_MINOR 70
+
+/* Debug Mode Setting */
+#define CRYPTODEV_DEBUG
+
+/* Version Number */
+#define CRYPTODEV_VER "0.1"
+
+/*Pin Max and Min Sizes*/
+#define PAGE_PIN_MIN_SIZE (8)
+#define PAGE_PIN_MAX_SIZE (48 * 1024)
+
+static int debug;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "0: normal, 1: verbose, 2: debug");
+
+static int sg_single;
+module_param(sg_single, int, 0644);
+MODULE_PARM_DESC(sg_single, "0: scatter user buffers to page size, "
+ "1: single buffer for user buffer");
+
+static int page_pin_min_size = PAGE_PIN_MIN_SIZE;
+module_param(page_pin_min_size, int, 0644);
+MODULE_PARM_DESC(page_pin_min_size,
+ "min value to decide copy to/from user or pin pages");
+
+static int page_pin_max_size = PAGE_PIN_MAX_SIZE;
+module_param(page_pin_max_size, int, 0644);
+MODULE_PARM_DESC(page_pin_max_size,
+ "max value to decide copy to/from user or pin pages");
+
+#ifdef CRYPTODEV_STATS
+static int enable_stats;
+module_param(enable_stats, int, 0644);
+MODULE_PARM_DESC(enable_stats, "collect statictics about cryptodev usage");
+#endif
+
+#define PFX "cryptodev: "
+
+#ifdef CRYPTODEV_DEBUG
+#define CD_HEXDUMP(b, l) \
+ print_hex_dump(KERN_CONT, "", DUMP_PREFIX_OFFSET, \
+ 16, 1, (b), (l), false);
+#define CDPRINTK(level, severity, format, a...) \
+ do { \
+ if (level <= debug) \
+ printk(severity PFX "%s[%u]: " format, \
+ current->comm, current->pid, ##a); \
+ } while (0)
+#else
+#define CD_HEXDUMP(b, l)
+#define CDPRINTK(level, severity, format, a...)
+#endif
+
+/* Enable this for PKA Debug statements */
+/* #define CD_PKA_DEBUG */
+
+#define CRYPTO_MODE_NOTSET 0
+#define CRYPTO_MODE_ACIPHER 1
+#define CRYPTO_MODE_AHASH 2
+#define CRYPTO_MODE_AEAD 3
+#define CRYPTO_MODE_PKA_RSA 4
+#define CRYPTO_MODE_PKA_RSA_CRT 5
+
+struct crypto_item_op {
+ struct crypt_op *udata;
+ char *iv;
+ char *assoc;
+ char __user *src_data;
+ char __user *dst_data;
+ u16 src_size;
+};
+
+#define iv_len udata->iv_size
+#define assoc_len udata->assoc_size
+#define eop udata->op
+
+#define tfm_ablkcipher crt_tfm.acipher_tfm
+#define tfm_aead crt_tfm.aead_tfm
+#define tfm_ahash crt_tfm.ahash_tfm
+
+struct pka_rsa_key {
+ u32 *exp;
+ u32 *modulus;
+ u32 exp_cnt;
+ u32 base_mod_cnt;
+};
+
+struct pka_rsa_key_crt {
+ u32 *modP;
+ u32 *modQ;
+ u32 mod_inverse_len;
+ u32 *expP;
+ u32 *expQ;
+ u32 exp_len;
+ u32 *inverseQ;
+};
+
+#define rsa_key pka_key.rsa
+#define rsa_crt_key pka_key.rsa_crt
+struct csession {
+ atomic_t refcnt;
+ int mode; /* See CRYPTO_MODE_XXX */
+ union {
+ struct crypto_ablkcipher *acipher_tfm;
+ struct crypto_ahash *ahash_tfm;
+ struct crypto_aead *aead_tfm;
+ } crt_tfm;
+ int (*destroy)(struct csession *ses_ptr);
+ int (*runop)(struct csession *ses_ptr,
+ struct crypto_item_op *cop,
+ struct kiocb *iocb);
+ int (*getivsize)(struct csession *ses_ptr);
+ int (*setkey)(struct csession *ses_ptr, char *key, int key_size);
+
+ union {
+ struct pka_rsa_key rsa;
+ struct pka_rsa_key_crt rsa_crt;
+ } pka_key;
+};
+
+struct cryptodev_ctx {
+ struct csession *session;
+ struct mutex lock;
+};
+
+#define crypto_completion async.syncio.completion
+#define session_ptr async.aio.ses_ptr
+#define iocbvec async.aio.iocb
+#define nopin_data async.aio.data
+#define aio_enc async.aio.enc
+#define aio_dst_data async.aio.dst_data
+#define aio_size_data async.aio.data_size
+
+struct async_result {
+ struct list_head next; /* Pending AIO requests ready for read */
+ int nr_spages;
+ int nr_dpages;
+ struct page **spages;
+ struct page **dpages;
+
+ char *null_buf;
+ void *udata;
+ atomic_t opcnt;
+
+ union {
+ struct {
+ struct csession *ses_ptr;
+ struct kiocb *iocb;
+ char *data;
+ char __user *dst_data;
+ int enc;
+ size_t data_size;
+ } aio;
+ struct {
+ struct completion completion;
+ } syncio;
+ } async;
+ int err;
+};
+
+static int cryptodev_run_acipher(struct csession *ses_ptr,
+ struct crypto_item_op *cop,
+ struct kiocb *iocb);
+static int cryptodev_run_ahash(struct csession *ses_ptr,
+ struct crypto_item_op *cop,
+ struct kiocb *iocb);
+static int cryptodev_run_aead(struct csession *ses_ptr,
+ struct crypto_item_op *cop,
+ struct kiocb *iocb);
+static void cryptodev_async_aio_complete(struct crypto_async_request *req,
+ int err);
+static void cryptodev_async_aio_pka_complete(void *ctx, int err);
+/**
+ * Synchronous handling Routine
+ *
+ */
+static void cryptodev_destroy_res(struct async_result *result)
+{
+ if (result->null_buf)
+ kfree(result->null_buf);
+ if (result->udata)
+ kfree(result->udata);
+ kfree(result);
+}
+
+static void cryptodev_destroy_session(struct csession *ses_ptr)
+{
+ if (ses_ptr->destroy)
+ ses_ptr->destroy(ses_ptr);
+ kfree(ses_ptr);
+}
+
+void cryptodev_release_pages(struct page **pages, int nr_pages)
+{
+ int x;
+ struct page *mpage;
+
+ for (x = 0; x < nr_pages; x++) {
+ mpage = pages[x];
+ SetPageDirty(mpage);
+ page_cache_release(mpage);
+ }
+}
+
+static void cryptodev_sync_complete(struct crypto_async_request *req,
+ int err)
+{
+ struct async_result *res;
+
+ CDPRINTK(2, KERN_INFO, "Synchrnous call-back Called\n");
+
+ res = req->data;
+
+ if (err == -EINPROGRESS)
+ return;
+
+ res->err = err;
+ if (atomic_dec_and_test(&res->opcnt)) {
+ cryptodev_release_pages(res->dpages, res->nr_dpages);
+ cryptodev_release_pages(res->spages, res->nr_spages);
+ cryptodev_destroy_res(res);
+ return;
+ }
+ complete(&(res->crypto_completion));
+}
+
+/**
+ * Destroy Alg Sessions
+ *
+ */
+int cryptodev_destroy_ablkcipher_tfm(struct csession *ses_ptr)
+{
+ CDPRINTK(1, KERN_INFO, "ABLKCIPHER sid %p deleting\n", ses_ptr);
+ crypto_free_ablkcipher(ses_ptr->tfm_ablkcipher);
+ return 0;
+}
+
+int cryptodev_destroy_ahash_tfm(struct csession *ses_ptr)
+{
+ CDPRINTK(1, KERN_INFO, "AHASH sid %p deleting\n", ses_ptr);
+ crypto_free_ahash(ses_ptr->tfm_ahash);
+ return 0;
+}
+
+int cryptodev_destroy_aead_tfm(struct csession *ses_ptr)
+{
+ CDPRINTK(1, KERN_INFO, "AEAD sid %p deleting\n", ses_ptr);
+ crypto_free_aead(ses_ptr->tfm_aead);
+ return 0;
+}
+
+int cryptodev_destroy_pka_rsa(struct csession *ses_ptr)
+{
+ if (ses_ptr->rsa_key.exp)
+ kfree(ses_ptr->rsa_key.exp);
+
+ CDPRINTK(1, KERN_INFO, "PKA RSA sid %p deleting\n", ses_ptr);
+
+ return 0;
+}
+
+int cryptodev_destroy_pka_rsa_crt(struct csession *ses_ptr)
+{
+ if (ses_ptr->rsa_crt_key.modP)
+ kfree(ses_ptr->rsa_crt_key.modP);
+
+ CDPRINTK(1, KERN_INFO, "PKA RSA CRT sid %p deleting\n", ses_ptr);
+ return 0;
+}
+/**
+ * ivsize return functions
+ *
+ */
+int cryptodev_ablkcipher_getivsize(struct csession *ses_ptr)
+{
+ return crypto_ablkcipher_ivsize(ses_ptr->tfm_ablkcipher);
+}
+
+int cryptodev_aead_getivsize(struct csession *ses_ptr)
+{
+ return crypto_aead_ivsize(ses_ptr->tfm_aead);
+}
+
+int cryptodev_ahash_getivsize(struct csession *ses_ptr)
+{
+ return 0;
+}
+
+/**
+ * setkey functions
+ *
+ */
+int cryptodev_ablkcipher_setkey(struct csession *ses_ptr, char *key,
+ int key_size)
+{
+ int ret;
+
+ ret = crypto_ablkcipher_setkey(ses_ptr->tfm_ablkcipher,
+ key, key_size);
+ if (ret) {
+ printk(KERN_ERR PFX
+ "failed to set key for %zu: flags=0x%X\n", key_size*8,
+ crypto_ablkcipher_get_flags(ses_ptr->tfm_ablkcipher));
+ printk(KERN_ERR PFX
+ "(see CRYPTO_TFM_RES_* in <linux/crypto.h> for "
+ "details)\n");
+
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+int cryptodev_aead_setkey(struct csession *ses_ptr, char *key, int key_size)
+{
+ int ret;
+
+ ret = crypto_aead_setkey(ses_ptr->tfm_aead, key, key_size);
+ if (ret) {
+ printk(KERN_ERR PFX
+ "failed to set key field for %zu: flags=0x%X\n",
+ key_size * 8,
+ crypto_aead_get_flags(ses_ptr->tfm_aead));
+ printk(KERN_ERR PFX
+ "(see CRYPTO_TFM_RES_* in <linux/crypto.h> "
+ "for details)\n");
+
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+int cryptodev_ahash_setkey(struct csession *ses_ptr, char *key, int key_size)
+{
+ int ret = 0;
+
+ if (!key_size)
+ return ret;
+ ret = crypto_ahash_setkey(ses_ptr->tfm_ahash, key, key_size);
+ if (ret) {
+ printk(KERN_ERR PFX
+ "failed to set key field for %zu: flags=0x%X\n"
+ "(see CRYPTO_TFM_RES_* in "
+ "<linux/crypto.h> for details)\n",
+ key_size * 8,
+ crypto_ahash_get_flags(ses_ptr->tfm_ahash));
+
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+/**
+ * Routine for creating a session for AEAD type algorithm
+ *
+ */
+struct csession *create_session_aead(struct crypto_aead *tfm,
+ char *alg_name,
+ struct session_op *sop,
+ char *keyp)
+{
+ struct csession *ses_new;
+ int ret = 0;
+
+ crypto_aead_clear_flags(tfm, ~0);
+
+ ret = crypto_aead_setkey(tfm, keyp, sop->key_size);
+ if (ret) {
+ printk(KERN_ERR PFX
+ "failed to set key field for %s-%zu: flags=0x%X\n",
+ alg_name, sop->key_size * 8,
+ crypto_aead_get_flags(tfm));
+ printk(KERN_ERR PFX
+ "(see CRYPTO_TFM_RES_* in <linux/crypto.h> "
+ "for details)\n");
+
+ ret = -EINVAL;
+ goto free_aead;
+ }
+
+ ret = crypto_aead_setauthsize(tfm, sop->icv_size);
+ if (ret) {
+ printk(KERN_ERR "failed to set authsize = %u\n", sop->icv_size);
+ ret = -EINVAL;
+ goto free_aead;
+ }
+
+ ses_new = kzalloc(sizeof(*ses_new), GFP_KERNEL);
+ if (!ses_new) {
+ ret = -ENOMEM;
+ goto free_aead;
+ }
+ ses_new->tfm_aead = tfm;
+ ses_new->mode = CRYPTO_MODE_AEAD;
+ ses_new->destroy = cryptodev_destroy_aead_tfm;
+ ses_new->runop = cryptodev_run_aead;
+ ses_new->getivsize = cryptodev_aead_getivsize;
+ ses_new->setkey = cryptodev_aead_setkey;
+
+ atomic_set(&ses_new->refcnt, 1);
+
+ CDPRINTK(1, KERN_INFO, "AEAD sid %p alg %s created\n",
+ ses_new, alg_name);
+ return ses_new;
+
+free_aead:
+ crypto_free_aead(tfm);
+ return ERR_PTR(ret);
+}
+
+/**
+ * Routine for creating a session for HASH type algorithm
+ *
+ */
+struct csession *create_session_ahash(struct crypto_ahash *tfm,
+ char *alg_name,
+ struct session_op *sop,
+ char *keyp)
+{
+ struct csession *ses_new;
+ int ret = 0;
+
+ crypto_ahash_clear_flags(tfm, ~0);
+
+ /* Copy the key(hmac) from user and set to TFM. */
+ if (sop->hmackey_size) {
+ ret = crypto_ahash_setkey(tfm, keyp, sop->hmackey_size);
+ if (ret) {
+ printk(KERN_ERR PFX
+ "failed to set key field for %s-%zu: "
+ "flags=0x%X\n"
+ "(see CRYPTO_TFM_RES_* in "
+ "<linux/crypto.h> for details)\n",
+ alg_name, sop->hmackey_size * 8,
+ crypto_ahash_get_flags(tfm));
+
+ ret = -EINVAL;
+ goto free_ahash;
+ }
+ }
+
+ ses_new = kzalloc(sizeof(*ses_new), GFP_KERNEL);
+ if (!ses_new) {
+ ret = -ENOMEM;
+ goto free_ahash;
+ }
+ ses_new->tfm_ahash = tfm;
+ ses_new->mode = CRYPTO_MODE_AHASH;
+ ses_new->destroy = cryptodev_destroy_ahash_tfm;
+ ses_new->runop = cryptodev_run_ahash;
+ ses_new->getivsize = cryptodev_ahash_getivsize;
+ ses_new->setkey = cryptodev_ahash_setkey;
+
+ atomic_set(&ses_new->refcnt, 1);
+
+ CDPRINTK(1, KERN_INFO, "AHASH sid %p alg %s created\n",
+ ses_new, alg_name);
+ return ses_new;
+
+free_ahash:
+ crypto_free_ahash(tfm);
+ return ERR_PTR(ret);
+}
+
+int set_session_pka_rsa(struct csession *ses, struct pka_op op)
+{
+ void *tmp;
+ u32 *ivec_tmp;
+ int i, ret;
+ int size;
+
+ /* Expecting two vectors Exp and Modules */
+ if (op.num_iovecs > 2 || op.num_iovecs <=0) {
+ printk(KERN_ERR "Expecting two vectors, Exp and Modules\n");
+ return -EINVAL;
+ }
+
+ size = op.iov[0].iov_len + op.iov[1].iov_len;
+ ses->rsa_key.exp_cnt = op.iov[0].iov_len;
+ ses->rsa_key.base_mod_cnt = op.iov[1].iov_len;
+
+ if ((ses->rsa_key.base_mod_cnt <=1) ||
+ (ses->rsa_key.base_mod_cnt > PKA4XX_VECTOR_MAXSIZE) ||
+ (ses->rsa_key.exp_cnt <= 0) ||
+ (ses->rsa_key.exp_cnt > PKA4XX_VECTOR_MAXSIZE)) {
+ /* Rules for EXP and MOD values
+ * 0 < Exp_len <= Max_Len
+ * 1 < Mod_len <= Max_Len
+ * Mod value > 2^32
+ */
+ printk(KERN_ERR "PKA RSA Exp or Mod sizes incorrect\n"
+ " 0 < Exp_len <= 256 \n"
+ " 1 < Mod_len <= 256 \n"
+ " Mod value > 2^32 \n"
+ " Exp_len = %d, Mod_len = %d\n",
+ ses->rsa_key.exp_cnt,
+ ses->rsa_key.base_mod_cnt);
+ return -EINVAL;
+ }
+
+ tmp = kzalloc(size * sizeof(u32), GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ ivec_tmp = tmp;
+
+ for ( i = 0; i < op.num_iovecs ; i++) {
+ if (copy_from_user(ivec_tmp, op.iov[i].iov_base,
+ (op.iov[i].iov_len * 4))) {
+ printk(KERN_ERR "copy_from user of "
+ "PKA Key data failed \n");
+ ret = -EFAULT;
+ goto out;
+ }
+ ivec_tmp += op.iov[i].iov_len;
+ }
+
+ ses->rsa_key.exp = tmp;
+ ses->rsa_key.modulus = ses->rsa_key.exp + ses->rsa_key.exp_cnt;
+
+#ifdef CD_PKA_DEBUG
+ printk(KERN_INFO "Dumping EXP amd Mod values \n");
+ CD_HEXDUMP(ses->rsa_key.exp, ses->rsa_key.exp_cnt * 4);
+ CD_HEXDUMP(ses->rsa_key.modulus, ses->rsa_key.base_mod_cnt * 4);
+#endif
+ ses->destroy = cryptodev_destroy_pka_rsa;
+ return 0;
+out:
+ kfree(tmp);
+ return ret;
+}
+
+int set_session_pka_rsa_crt(struct csession *ses, struct pka_op op)
+{
+ void *tmp;
+ u32 *ivec_tmp;
+ int i, ret;
+ u32 size;
+
+ /* Expecting 5 vectors: ExpP, ExpQ, Modp, ModQ, InvQ */
+ if (op.num_iovecs != 5 || op.num_iovecs <=0)
+ return -EINVAL;
+
+ ses->rsa_crt_key.mod_inverse_len = op.iov[0].iov_len;
+ ses->rsa_crt_key.exp_len = op.iov[2].iov_len;
+ if ((ses->rsa_crt_key.mod_inverse_len <= 1) ||
+ (ses->rsa_crt_key.mod_inverse_len > PKA4XX_VECTOR_MAXSIZE) ||
+ (ses->rsa_crt_key.exp_len <= 0) ||
+ (ses->rsa_crt_key.exp_len > PKA4XX_VECTOR_MAXSIZE)) {
+ /* Rules for EXP and MOD values
+ * 0 < Exp_len <= Max_Len
+ * 1 < Mod_len <= Max_Len
+ * ModP, ModQ values > 2^32
+ */
+ printk(KERN_ERR "PKA CRT RSA Exp or Mod sizes incorrect\n"
+ " 0 < Exp_len <= 256 \n"
+ " 1 < Mod_len <= 256 \n"
+ " ModP, ModQ values > 2^32 \n"
+ " Exp_len = %d, Mod_len = %d\n",
+ ses->rsa_crt_key.exp_len,
+ ses->rsa_crt_key.mod_inverse_len);
+ return -EINVAL;
+ }
+ size = (op.iov[0].iov_len * 3 * sizeof(u32)) +
+ (op.iov[2].iov_len * 2 * sizeof(u32));
+
+ tmp = kzalloc(size, GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+ ivec_tmp = tmp;
+
+ for ( i = 0; i < op.num_iovecs ; i++) {
+ if (copy_from_user(ivec_tmp, op.iov[i].iov_base, (op.iov[i].iov_len * 4))) {
+ printk("copy_from user failed \n");
+ ret = -EFAULT;
+ goto out;
+ }
+ ivec_tmp += op.iov[i].iov_len;
+ }
+ ses->rsa_crt_key.modP = tmp;
+ ses->rsa_crt_key.modQ = ses->rsa_crt_key.modP +
+ ses->rsa_crt_key.mod_inverse_len;
+ ses->rsa_crt_key.expP = ses->rsa_crt_key.modQ +
+ ses->rsa_crt_key.mod_inverse_len;
+ ses->rsa_crt_key.expQ = ses->rsa_crt_key.expP +
+ ses->rsa_crt_key.exp_len;
+ ses->rsa_crt_key.inverseQ = ses->rsa_crt_key.expQ +
+ ses->rsa_crt_key.exp_len;
+#ifdef CD_PKA_DEBUG
+ printk(KERN_INFO "ModulusP\n");
+ CD_HEXDUMP(ses->rsa_crt_key.modP, ses->rsa_crt_key.mod_inverse_len * 4);
+ printk(KERN_INFO "ModulusQ\n");
+ CD_HEXDUMP(ses->rsa_crt_key.modQ, ses->rsa_crt_key.mod_inverse_len * 4);
+ printk(KERN_INFO "InverseQ\n");
+ CD_HEXDUMP(ses->rsa_crt_key.inverseQ, ses->rsa_crt_key.mod_inverse_len * 4);
+ printk(KERN_INFO "ExpP\n");
+ CD_HEXDUMP(ses->rsa_crt_key.expP, ses->rsa_crt_key.exp_len * 4);
+ printk(KERN_INFO "ExpQ\n");
+ CD_HEXDUMP(ses->rsa_crt_key.expQ, ses->rsa_crt_key.exp_len * 4);
+#endif
+ ses->destroy = cryptodev_destroy_pka_rsa_crt;
+ return 0;
+out:
+ kfree(tmp);
+ return ret;
+}
+
+struct csession *create_session_pka(void *arg)
+{
+ struct csession *ses_new;
+ struct pka_op op;
+ int ret;
+
+ if (copy_from_user(&op, (void *) arg, sizeof(op))) {
+ printk(KERN_ERR PFX "copy of session data failed\n");
+ ret = -EFAULT;
+ return ERR_PTR(ret);
+ }
+
+ ret = 0;
+ ses_new = kzalloc(sizeof(*ses_new), GFP_KERNEL);
+ if (!ses_new) {
+ ret = -ENOMEM;
+ return ERR_PTR(ret);
+
+ }
+ ses_new->mode = op.pka_alg;
+
+ atomic_set(&ses_new->refcnt, 1);
+ if (op.pka_alg == CRYPTO_MODE_PKA_RSA)
+ ret = set_session_pka_rsa(ses_new, op);
+ else
+ ret = set_session_pka_rsa_crt(ses_new, op);
+ if (ret) {
+ kfree(ses_new);
+ return ERR_PTR(ret);
+ }
+
+ CDPRINTK(1, KERN_INFO, "PKA sid %p alg rsa created\n",
+ ses_new);
+
+ return ses_new;
+}
+/**
+ * Routine for creating a session for CRYPTO block type algorithm
+ *
+ */
+struct csession *create_session_ablkcipher(struct crypto_ablkcipher *tfm,
+ char *alg_name, struct session_op *sop,
+ char *keyp)
+{
+ struct csession *ses_new;
+ int ret = 0;
+
+ crypto_ablkcipher_clear_flags(tfm, ~0);
+
+ /* Copy the key from user and set to TFM. */
+ ret = crypto_ablkcipher_setkey(tfm, keyp, sop->key_size);
+
+ if (ret) {
+ printk(KERN_ERR PFX
+ "failed to set key for %s-%zu: flags=0x%X\n",
+ alg_name, sop->key_size*8,
+ crypto_ablkcipher_get_flags(tfm));
+ printk(KERN_ERR PFX
+ "(see CRYPTO_TFM_RES_* in <linux/crypto.h> for "
+ "details)\n");
+
+ ret = -EINVAL;
+ goto free_ablkcipher;
+ }
+
+ ses_new = kzalloc(sizeof(*ses_new), GFP_KERNEL);
+ if (!ses_new) {
+ ret =