aboutsummaryrefslogtreecommitdiff
path: root/crypto/cryptodev.c
diff options
context:
space:
mode:
Diffstat (limited to 'crypto/cryptodev.c')
-rw-r--r--crypto/cryptodev.c2309
1 files changed, 2309 insertions, 0 deletions
diff --git a/crypto/cryptodev.c b/crypto/cryptodev.c
new file mode 100644
index 00000000000..205e13949f5
--- /dev/null
+++ b/crypto/cryptodev.c
@@ -0,0 +1,2309 @@
+/**************************************************************************
+ * Linux CryptoAPI user space interface module
+ *
+ * Copyright (c) 2008 Applied Micro Circuits Corporation.
+ * All rights reserved. Shasi Pulijala <spulijala@amcc.com>
+ * Loc Ho <lho@amcc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * @file cryptodev.h
+ *
+ * This file defines ioctl structures for the Linux CryptoAPI interface. It
+ * provides user space applications accesss into the Linux CryptoAPI
+ * functionalities.
+ *
+ **************************************************************************
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/pagemap.h>
+#include <linux/miscdevice.h>
+#include <linux/ioctl.h>
+#include <linux/scatterlist.h>
+#include <linux/cryptodev.h>
+#include <linux/aio.h>
+#include <linux/mutex.h>
+#include <linux/interrupt.h>
+#include <asm/atomic.h>
+#include <crypto/aead.h>
+#include <crypto/authenc.h>
+#include <crypto/hash.h>
+#include <crypto/pka_4xx.h>
+#include <crypto/internal/hash.h>
+#include <linux/proc_fs.h>
+
+/* /dev/crypto is a char block device with major 10 and minor below */
+#define CRYPTODEV_MINOR 70
+
+/* Debug Mode Setting */
+#define CRYPTODEV_DEBUG
+
+/* Version Number */
+#define CRYPTODEV_VER "0.1"
+
+/*Pin Max and Min Sizes*/
+#define PAGE_PIN_MIN_SIZE (8)
+#define PAGE_PIN_MAX_SIZE (48 * 1024)
+
+static int debug;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "0: normal, 1: verbose, 2: debug");
+
+static int sg_single;
+module_param(sg_single, int, 0644);
+MODULE_PARM_DESC(sg_single, "0: scatter user buffers to page size, "
+ "1: single buffer for user buffer");
+
+static int page_pin_min_size = PAGE_PIN_MIN_SIZE;
+module_param(page_pin_min_size, int, 0644);
+MODULE_PARM_DESC(page_pin_min_size,
+ "min value to decide copy to/from user or pin pages");
+
+static int page_pin_max_size = PAGE_PIN_MAX_SIZE;
+module_param(page_pin_max_size, int, 0644);
+MODULE_PARM_DESC(page_pin_max_size,
+ "max value to decide copy to/from user or pin pages");
+
+#ifdef CRYPTODEV_STATS
+static int enable_stats;
+module_param(enable_stats, int, 0644);
+MODULE_PARM_DESC(enable_stats, "collect statictics about cryptodev usage");
+#endif
+
+#define PFX "cryptodev: "
+
+#ifdef CRYPTODEV_DEBUG
+#define CD_HEXDUMP(b, l) \
+ print_hex_dump(KERN_CONT, "", DUMP_PREFIX_OFFSET, \
+ 16, 1, (b), (l), false);
+#define CDPRINTK(level, severity, format, a...) \
+ do { \
+ if (level <= debug) \
+ printk(severity PFX "%s[%u]: " format, \
+ current->comm, current->pid, ##a); \
+ } while (0)
+#else
+#define CD_HEXDUMP(b, l)
+#define CDPRINTK(level, severity, format, a...)
+#endif
+
+/* Enable this for PKA Debug statements */
+/* #define CD_PKA_DEBUG */
+
+#define CRYPTO_MODE_NOTSET 0
+#define CRYPTO_MODE_ACIPHER 1
+#define CRYPTO_MODE_AHASH 2
+#define CRYPTO_MODE_AEAD 3
+#define CRYPTO_MODE_PKA_RSA 4
+#define CRYPTO_MODE_PKA_RSA_CRT 5
+
+struct crypto_item_op {
+ struct crypt_op *udata;
+ char *iv;
+ char *assoc;
+ char __user *src_data;
+ char __user *dst_data;
+ u16 src_size;
+};
+
+#define iv_len udata->iv_size
+#define assoc_len udata->assoc_size
+#define eop udata->op
+
+#define tfm_ablkcipher crt_tfm.acipher_tfm
+#define tfm_aead crt_tfm.aead_tfm
+#define tfm_ahash crt_tfm.ahash_tfm
+
+struct pka_rsa_key {
+ u32 *exp;
+ u32 *modulus;
+ u32 exp_cnt;
+ u32 base_mod_cnt;
+};
+
+struct pka_rsa_key_crt {
+ u32 *modP;
+ u32 *modQ;
+ u32 mod_inverse_len;
+ u32 *expP;
+ u32 *expQ;
+ u32 exp_len;
+ u32 *inverseQ;
+};
+
+#define rsa_key pka_key.rsa
+#define rsa_crt_key pka_key.rsa_crt
+struct csession {
+ atomic_t refcnt;
+ int mode; /* See CRYPTO_MODE_XXX */
+ union {
+ struct crypto_ablkcipher *acipher_tfm;
+ struct crypto_ahash *ahash_tfm;
+ struct crypto_aead *aead_tfm;
+ } crt_tfm;
+ int (*destroy)(struct csession *ses_ptr);
+ int (*runop)(struct csession *ses_ptr,
+ struct crypto_item_op *cop,
+ struct kiocb *iocb);
+ int (*getivsize)(struct csession *ses_ptr);
+ int (*setkey)(struct csession *ses_ptr, char *key, int key_size);
+
+ union {
+ struct pka_rsa_key rsa;
+ struct pka_rsa_key_crt rsa_crt;
+ } pka_key;
+};
+
+struct cryptodev_ctx {
+ struct csession *session;
+ struct mutex lock;
+};
+
+#define crypto_completion async.syncio.completion
+#define session_ptr async.aio.ses_ptr
+#define iocbvec async.aio.iocb
+#define nopin_data async.aio.data
+#define aio_enc async.aio.enc
+#define aio_dst_data async.aio.dst_data
+#define aio_size_data async.aio.data_size
+
+struct async_result {
+ struct list_head next; /* Pending AIO requests ready for read */
+ int nr_spages;
+ int nr_dpages;
+ struct page **spages;
+ struct page **dpages;
+
+ char *null_buf;
+ void *udata;
+ atomic_t opcnt;
+
+ union {
+ struct {
+ struct csession *ses_ptr;
+ struct kiocb *iocb;
+ char *data;
+ char __user *dst_data;
+ int enc;
+ size_t data_size;
+ } aio;
+ struct {
+ struct completion completion;
+ } syncio;
+ } async;
+ int err;
+};
+
+static int cryptodev_run_acipher(struct csession *ses_ptr,
+ struct crypto_item_op *cop,
+ struct kiocb *iocb);
+static int cryptodev_run_ahash(struct csession *ses_ptr,
+ struct crypto_item_op *cop,
+ struct kiocb *iocb);
+static int cryptodev_run_aead(struct csession *ses_ptr,
+ struct crypto_item_op *cop,
+ struct kiocb *iocb);
+static void cryptodev_async_aio_complete(struct crypto_async_request *req,
+ int err);
+static void cryptodev_async_aio_pka_complete(void *ctx, int err);
+/**
+ * Synchronous handling Routine
+ *
+ */
+static void cryptodev_destroy_res(struct async_result *result)
+{
+ if (result->null_buf)
+ kfree(result->null_buf);
+ if (result->udata)
+ kfree(result->udata);
+ kfree(result);
+}
+
+static void cryptodev_destroy_session(struct csession *ses_ptr)
+{
+ if (ses_ptr->destroy)
+ ses_ptr->destroy(ses_ptr);
+ kfree(ses_ptr);
+}
+
+void cryptodev_release_pages(struct page **pages, int nr_pages)
+{
+ int x;
+ struct page *mpage;
+
+ for (x = 0; x < nr_pages; x++) {
+ mpage = pages[x];
+ SetPageDirty(mpage);
+ page_cache_release(mpage);
+ }
+}
+
+static void cryptodev_sync_complete(struct crypto_async_request *req,
+ int err)
+{
+ struct async_result *res;
+
+ CDPRINTK(2, KERN_INFO, "Synchrnous call-back Called\n");
+
+ res = req->data;
+
+ if (err == -EINPROGRESS)
+ return;
+
+ res->err = err;
+ if (atomic_dec_and_test(&res->opcnt)) {
+ cryptodev_release_pages(res->dpages, res->nr_dpages);
+ cryptodev_release_pages(res->spages, res->nr_spages);
+ cryptodev_destroy_res(res);
+ return;
+ }
+ complete(&(res->crypto_completion));
+}
+
+/**
+ * Destroy Alg Sessions
+ *
+ */
+int cryptodev_destroy_ablkcipher_tfm(struct csession *ses_ptr)
+{
+ CDPRINTK(1, KERN_INFO, "ABLKCIPHER sid %p deleting\n", ses_ptr);
+ crypto_free_ablkcipher(ses_ptr->tfm_ablkcipher);
+ return 0;
+}
+
+int cryptodev_destroy_ahash_tfm(struct csession *ses_ptr)
+{
+ CDPRINTK(1, KERN_INFO, "AHASH sid %p deleting\n", ses_ptr);
+ crypto_free_ahash(ses_ptr->tfm_ahash);
+ return 0;
+}
+
+int cryptodev_destroy_aead_tfm(struct csession *ses_ptr)
+{
+ CDPRINTK(1, KERN_INFO, "AEAD sid %p deleting\n", ses_ptr);
+ crypto_free_aead(ses_ptr->tfm_aead);
+ return 0;
+}
+
+int cryptodev_destroy_pka_rsa(struct csession *ses_ptr)
+{
+ if (ses_ptr->rsa_key.exp)
+ kfree(ses_ptr->rsa_key.exp);
+
+ CDPRINTK(1, KERN_INFO, "PKA RSA sid %p deleting\n", ses_ptr);
+
+ return 0;
+}
+
+int cryptodev_destroy_pka_rsa_crt(struct csession *ses_ptr)
+{
+ if (ses_ptr->rsa_crt_key.modP)
+ kfree(ses_ptr->rsa_crt_key.modP);
+
+ CDPRINTK(1, KERN_INFO, "PKA RSA CRT sid %p deleting\n", ses_ptr);
+ return 0;
+}
+/**
+ * ivsize return functions
+ *
+ */
+int cryptodev_ablkcipher_getivsize(struct csession *ses_ptr)
+{
+ return crypto_ablkcipher_ivsize(ses_ptr->tfm_ablkcipher);
+}
+
+int cryptodev_aead_getivsize(struct csession *ses_ptr)
+{
+ return crypto_aead_ivsize(ses_ptr->tfm_aead);
+}
+
+int cryptodev_ahash_getivsize(struct csession *ses_ptr)
+{
+ return 0;
+}
+
+/**
+ * setkey functions
+ *
+ */
+int cryptodev_ablkcipher_setkey(struct csession *ses_ptr, char *key,
+ int key_size)
+{
+ int ret;
+
+ ret = crypto_ablkcipher_setkey(ses_ptr->tfm_ablkcipher,
+ key, key_size);
+ if (ret) {
+ printk(KERN_ERR PFX
+ "failed to set key for %zu: flags=0x%X\n", key_size*8,
+ crypto_ablkcipher_get_flags(ses_ptr->tfm_ablkcipher));
+ printk(KERN_ERR PFX
+ "(see CRYPTO_TFM_RES_* in <linux/crypto.h> for "
+ "details)\n");
+
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+int cryptodev_aead_setkey(struct csession *ses_ptr, char *key, int key_size)
+{
+ int ret;
+
+ ret = crypto_aead_setkey(ses_ptr->tfm_aead, key, key_size);
+ if (ret) {
+ printk(KERN_ERR PFX
+ "failed to set key field for %zu: flags=0x%X\n",
+ key_size * 8,
+ crypto_aead_get_flags(ses_ptr->tfm_aead));
+ printk(KERN_ERR PFX
+ "(see CRYPTO_TFM_RES_* in <linux/crypto.h> "
+ "for details)\n");
+
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+int cryptodev_ahash_setkey(struct csession *ses_ptr, char *key, int key_size)
+{
+ int ret = 0;
+
+ if (!key_size)
+ return ret;
+ ret = crypto_ahash_setkey(ses_ptr->tfm_ahash, key, key_size);
+ if (ret) {
+ printk(KERN_ERR PFX
+ "failed to set key field for %zu: flags=0x%X\n"
+ "(see CRYPTO_TFM_RES_* in "
+ "<linux/crypto.h> for details)\n",
+ key_size * 8,
+ crypto_ahash_get_flags(ses_ptr->tfm_ahash));
+
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+/**
+ * Routine for creating a session for AEAD type algorithm
+ *
+ */
+struct csession *create_session_aead(struct crypto_aead *tfm,
+ char *alg_name,
+ struct session_op *sop,
+ char *keyp)
+{
+ struct csession *ses_new;
+ int ret = 0;
+
+ crypto_aead_clear_flags(tfm, ~0);
+
+ ret = crypto_aead_setkey(tfm, keyp, sop->key_size);
+ if (ret) {
+ printk(KERN_ERR PFX
+ "failed to set key field for %s-%zu: flags=0x%X\n",
+ alg_name, sop->key_size * 8,
+ crypto_aead_get_flags(tfm));
+ printk(KERN_ERR PFX
+ "(see CRYPTO_TFM_RES_* in <linux/crypto.h> "
+ "for details)\n");
+
+ ret = -EINVAL;
+ goto free_aead;
+ }
+
+ ret = crypto_aead_setauthsize(tfm, sop->icv_size);
+ if (ret) {
+ printk(KERN_ERR "failed to set authsize = %u\n", sop->icv_size);
+ ret = -EINVAL;
+ goto free_aead;
+ }
+
+ ses_new = kzalloc(sizeof(*ses_new), GFP_KERNEL);
+ if (!ses_new) {
+ ret = -ENOMEM;
+ goto free_aead;
+ }
+ ses_new->tfm_aead = tfm;
+ ses_new->mode = CRYPTO_MODE_AEAD;
+ ses_new->destroy = cryptodev_destroy_aead_tfm;
+ ses_new->runop = cryptodev_run_aead;
+ ses_new->getivsize = cryptodev_aead_getivsize;
+ ses_new->setkey = cryptodev_aead_setkey;
+
+ atomic_set(&ses_new->refcnt, 1);
+
+ CDPRINTK(1, KERN_INFO, "AEAD sid %p alg %s created\n",
+ ses_new, alg_name);
+ return ses_new;
+
+free_aead:
+ crypto_free_aead(tfm);
+ return ERR_PTR(ret);
+}
+
+/**
+ * Routine for creating a session for HASH type algorithm
+ *
+ */
+struct csession *create_session_ahash(struct crypto_ahash *tfm,
+ char *alg_name,
+ struct session_op *sop,
+ char *keyp)
+{
+ struct csession *ses_new;
+ int ret = 0;
+
+ crypto_ahash_clear_flags(tfm, ~0);
+
+ /* Copy the key(hmac) from user and set to TFM. */
+ if (sop->hmackey_size) {
+ ret = crypto_ahash_setkey(tfm, keyp, sop->hmackey_size);
+ if (ret) {
+ printk(KERN_ERR PFX
+ "failed to set key field for %s-%zu: "
+ "flags=0x%X\n"
+ "(see CRYPTO_TFM_RES_* in "
+ "<linux/crypto.h> for details)\n",
+ alg_name, sop->hmackey_size * 8,
+ crypto_ahash_get_flags(tfm));
+
+ ret = -EINVAL;
+ goto free_ahash;
+ }
+ }
+
+ ses_new = kzalloc(sizeof(*ses_new), GFP_KERNEL);
+ if (!ses_new) {
+ ret = -ENOMEM;
+ goto free_ahash;
+ }
+ ses_new->tfm_ahash = tfm;
+ ses_new->mode = CRYPTO_MODE_AHASH;
+ ses_new->destroy = cryptodev_destroy_ahash_tfm;
+ ses_new->runop = cryptodev_run_ahash;
+ ses_new->getivsize = cryptodev_ahash_getivsize;
+ ses_new->setkey = cryptodev_ahash_setkey;
+
+ atomic_set(&ses_new->refcnt, 1);
+
+ CDPRINTK(1, KERN_INFO, "AHASH sid %p alg %s created\n",
+ ses_new, alg_name);
+ return ses_new;
+
+free_ahash:
+ crypto_free_ahash(tfm);
+ return ERR_PTR(ret);
+}
+
+int set_session_pka_rsa(struct csession *ses, struct pka_op op)
+{
+ void *tmp;
+ u32 *ivec_tmp;
+ int i, ret;
+ int size;
+
+ /* Expecting two vectors Exp and Modules */
+ if (op.num_iovecs > 2 || op.num_iovecs <=0) {
+ printk(KERN_ERR "Expecting two vectors, Exp and Modules\n");
+ return -EINVAL;
+ }
+
+ size = op.iov[0].iov_len + op.iov[1].iov_len;
+ ses->rsa_key.exp_cnt = op.iov[0].iov_len;
+ ses->rsa_key.base_mod_cnt = op.iov[1].iov_len;
+
+ if ((ses->rsa_key.base_mod_cnt <=1) ||
+ (ses->rsa_key.base_mod_cnt > PKA4XX_VECTOR_MAXSIZE) ||
+ (ses->rsa_key.exp_cnt <= 0) ||
+ (ses->rsa_key.exp_cnt > PKA4XX_VECTOR_MAXSIZE)) {
+ /* Rules for EXP and MOD values
+ * 0 < Exp_len <= Max_Len
+ * 1 < Mod_len <= Max_Len
+ * Mod value > 2^32
+ */
+ printk(KERN_ERR "PKA RSA Exp or Mod sizes incorrect\n"
+ " 0 < Exp_len <= 256 \n"
+ " 1 < Mod_len <= 256 \n"
+ " Mod value > 2^32 \n"
+ " Exp_len = %d, Mod_len = %d\n",
+ ses->rsa_key.exp_cnt,
+ ses->rsa_key.base_mod_cnt);
+ return -EINVAL;
+ }
+
+ tmp = kzalloc(size * sizeof(u32), GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ ivec_tmp = tmp;
+
+ for ( i = 0; i < op.num_iovecs ; i++) {
+ if (copy_from_user(ivec_tmp, op.iov[i].iov_base,
+ (op.iov[i].iov_len * 4))) {
+ printk(KERN_ERR "copy_from user of "
+ "PKA Key data failed \n");
+ ret = -EFAULT;
+ goto out;
+ }
+ ivec_tmp += op.iov[i].iov_len;
+ }
+
+ ses->rsa_key.exp = tmp;
+ ses->rsa_key.modulus = ses->rsa_key.exp + ses->rsa_key.exp_cnt;
+
+#ifdef CD_PKA_DEBUG
+ printk(KERN_INFO "Dumping EXP amd Mod values \n");
+ CD_HEXDUMP(ses->rsa_key.exp, ses->rsa_key.exp_cnt * 4);
+ CD_HEXDUMP(ses->rsa_key.modulus, ses->rsa_key.base_mod_cnt * 4);
+#endif
+ ses->destroy = cryptodev_destroy_pka_rsa;
+ return 0;
+out:
+ kfree(tmp);
+ return ret;
+}
+
+int set_session_pka_rsa_crt(struct csession *ses, struct pka_op op)
+{
+ void *tmp;
+ u32 *ivec_tmp;
+ int i, ret;
+ u32 size;
+
+ /* Expecting 5 vectors: ExpP, ExpQ, Modp, ModQ, InvQ */
+ if (op.num_iovecs != 5 || op.num_iovecs <=0)
+ return -EINVAL;
+
+ ses->rsa_crt_key.mod_inverse_len = op.iov[0].iov_len;
+ ses->rsa_crt_key.exp_len = op.iov[2].iov_len;
+ if ((ses->rsa_crt_key.mod_inverse_len <= 1) ||
+ (ses->rsa_crt_key.mod_inverse_len > PKA4XX_VECTOR_MAXSIZE) ||
+ (ses->rsa_crt_key.exp_len <= 0) ||
+ (ses->rsa_crt_key.exp_len > PKA4XX_VECTOR_MAXSIZE)) {
+ /* Rules for EXP and MOD values
+ * 0 < Exp_len <= Max_Len
+ * 1 < Mod_len <= Max_Len
+ * ModP, ModQ values > 2^32
+ */
+ printk(KERN_ERR "PKA CRT RSA Exp or Mod sizes incorrect\n"
+ " 0 < Exp_len <= 256 \n"
+ " 1 < Mod_len <= 256 \n"
+ " ModP, ModQ values > 2^32 \n"
+ " Exp_len = %d, Mod_len = %d\n",
+ ses->rsa_crt_key.exp_len,
+ ses->rsa_crt_key.mod_inverse_len);
+ return -EINVAL;
+ }
+ size = (op.iov[0].iov_len * 3 * sizeof(u32)) +
+ (op.iov[2].iov_len * 2 * sizeof(u32));
+
+ tmp = kzalloc(size, GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+ ivec_tmp = tmp;
+
+ for ( i = 0; i < op.num_iovecs ; i++) {
+ if (copy_from_user(ivec_tmp, op.iov[i].iov_base, (op.iov[i].iov_len * 4))) {
+ printk("copy_from user failed \n");
+ ret = -EFAULT;
+ goto out;
+ }
+ ivec_tmp += op.iov[i].iov_len;
+ }
+ ses->rsa_crt_key.modP = tmp;
+ ses->rsa_crt_key.modQ = ses->rsa_crt_key.modP +
+ ses->rsa_crt_key.mod_inverse_len;
+ ses->rsa_crt_key.expP = ses->rsa_crt_key.modQ +
+ ses->rsa_crt_key.mod_inverse_len;
+ ses->rsa_crt_key.expQ = ses->rsa_crt_key.expP +
+ ses->rsa_crt_key.exp_len;
+ ses->rsa_crt_key.inverseQ = ses->rsa_crt_key.expQ +
+ ses->rsa_crt_key.exp_len;
+#ifdef CD_PKA_DEBUG
+ printk(KERN_INFO "ModulusP\n");
+ CD_HEXDUMP(ses->rsa_crt_key.modP, ses->rsa_crt_key.mod_inverse_len * 4);
+ printk(KERN_INFO "ModulusQ\n");
+ CD_HEXDUMP(ses->rsa_crt_key.modQ, ses->rsa_crt_key.mod_inverse_len * 4);
+ printk(KERN_INFO "InverseQ\n");
+ CD_HEXDUMP(ses->rsa_crt_key.inverseQ, ses->rsa_crt_key.mod_inverse_len * 4);
+ printk(KERN_INFO "ExpP\n");
+ CD_HEXDUMP(ses->rsa_crt_key.expP, ses->rsa_crt_key.exp_len * 4);
+ printk(KERN_INFO "ExpQ\n");
+ CD_HEXDUMP(ses->rsa_crt_key.expQ, ses->rsa_crt_key.exp_len * 4);
+#endif
+ ses->destroy = cryptodev_destroy_pka_rsa_crt;
+ return 0;
+out:
+ kfree(tmp);
+ return ret;
+}
+
+struct csession *create_session_pka(void *arg)
+{
+ struct csession *ses_new;
+ struct pka_op op;
+ int ret;
+
+ if (copy_from_user(&op, (void *) arg, sizeof(op))) {
+ printk(KERN_ERR PFX "copy of session data failed\n");
+ ret = -EFAULT;
+ return ERR_PTR(ret);
+ }
+
+ ret = 0;
+ ses_new = kzalloc(sizeof(*ses_new), GFP_KERNEL);
+ if (!ses_new) {
+ ret = -ENOMEM;
+ return ERR_PTR(ret);
+
+ }
+ ses_new->mode = op.pka_alg;
+
+ atomic_set(&ses_new->refcnt, 1);
+ if (op.pka_alg == CRYPTO_MODE_PKA_RSA)
+ ret = set_session_pka_rsa(ses_new, op);
+ else
+ ret = set_session_pka_rsa_crt(ses_new, op);
+ if (ret) {
+ kfree(ses_new);
+ return ERR_PTR(ret);
+ }
+
+ CDPRINTK(1, KERN_INFO, "PKA sid %p alg rsa created\n",
+ ses_new);
+
+ return ses_new;
+}
+/**
+ * Routine for creating a session for CRYPTO block type algorithm
+ *
+ */
+struct csession *create_session_ablkcipher(struct crypto_ablkcipher *tfm,
+ char *alg_name, struct session_op *sop,
+ char *keyp)
+{
+ struct csession *ses_new;
+ int ret = 0;
+
+ crypto_ablkcipher_clear_flags(tfm, ~0);
+
+ /* Copy the key from user and set to TFM. */
+ ret = crypto_ablkcipher_setkey(tfm, keyp, sop->key_size);
+
+ if (ret) {
+ printk(KERN_ERR PFX
+ "failed to set key for %s-%zu: flags=0x%X\n",
+ alg_name, sop->key_size*8,
+ crypto_ablkcipher_get_flags(tfm));
+ printk(KERN_ERR PFX
+ "(see CRYPTO_TFM_RES_* in <linux/crypto.h> for "
+ "details)\n");
+
+ ret = -EINVAL;
+ goto free_ablkcipher;
+ }
+
+ ses_new = kzalloc(sizeof(*ses_new), GFP_KERNEL);
+ if (!ses_new) {
+ ret = -ENOMEM;
+ goto free_ablkcipher;
+ }
+
+ ses_new->tfm_ablkcipher = tfm;
+ ses_new->mode = CRYPTO_MODE_ACIPHER;
+ ses_new->destroy = cryptodev_destroy_ablkcipher_tfm;
+ ses_new->runop = cryptodev_run_acipher;
+ ses_new->getivsize = cryptodev_ablkcipher_getivsize;
+ ses_new->setkey = cryptodev_ablkcipher_setkey;
+
+ atomic_set(&ses_new->refcnt, 1);
+
+ CDPRINTK(1, KERN_INFO, "ABLCKCIPHER sid %p alg %s created\n",
+ ses_new, alg_name);
+ return ses_new;
+
+free_ablkcipher:
+ crypto_free_ablkcipher(tfm);
+ return ERR_PTR(ret);
+}
+
+/**
+ * Prepare session for future use
+ *
+ */
+struct csession *cryptodev_create_session(struct session_op *sop,
+ void *session_udata)
+{
+ char *alg_name;
+ char *key;
+ char *hmac_key;
+ struct crypto_ablkcipher *ablkcipher_tfm;
+ struct crypto_aead *aead_tfm;
+ struct crypto_ahash *ahash_tfm;
+ int ret;
+
+ alg_name = (char *) session_udata;
+ key = alg_name + sop->algo_size;
+ hmac_key = key + sop->key_size;
+
+ ahash_tfm = crypto_alloc_ahash(alg_name, 0, 0);
+ if (!IS_ERR(ahash_tfm))
+ return create_session_ahash(ahash_tfm, alg_name, sop,
+ hmac_key);
+ ablkcipher_tfm = crypto_alloc_ablkcipher(alg_name, 0, 0);
+ if (!IS_ERR(ablkcipher_tfm))
+ return create_session_ablkcipher(ablkcipher_tfm,
+ alg_name, sop, key);
+ aead_tfm = crypto_alloc_aead(alg_name, 0, 0);
+ if (!IS_ERR(aead_tfm))
+ return create_session_aead(aead_tfm, alg_name, sop,
+ key);
+
+ printk(KERN_ERR PFX "un-supported algorithm %s\n", alg_name);
+ ret = -EINVAL;
+ return ERR_PTR(ret);
+}
+
+/**
+ * Helper Functions for Page Creation and deletion.
+ *
+ */
+int cryptodev_num_pages(unsigned long data, size_t bufsize)
+{
+ unsigned long first;
+ unsigned long last;
+ int num_pages;
+
+ if (!bufsize)
+ return 1;
+ first = (data & PAGE_MASK) >> PAGE_SHIFT;
+ last = ((data + bufsize - 1) & PAGE_MASK) >> PAGE_SHIFT;
+ num_pages = last - first + 1;
+ return num_pages;
+}
+
+int pages_count = 0;
+int cryptodev_set_user_pages(char __user *src, struct scatterlist *sg,
+ struct page **pages, size_t bufsize,
+ int *nr_pages, char **null_buf)
+{
+ unsigned long offset;
+ struct page *page = NULL;
+ int x;
+ int rop;
+ int err;
+
+ if (!src) {
+ *nr_pages = 0;
+ CDPRINTK(1, KERN_INFO, "Case of null buffer\n");
+ *null_buf = kzalloc(bufsize, GFP_KERNEL);
+ if (!*null_buf)
+ return -ENOMEM;
+ sg_init_one(&sg[0], *null_buf, bufsize);
+ return 0;
+ }
+
+ offset = (unsigned long) src & ~PAGE_MASK;
+ if (!pages) {
+ printk(KERN_ERR PFX "pages memory allocation failed\n");
+ return -ENOMEM;
+ }
+
+ down_read(&current->mm->mmap_sem);
+ err = get_user_pages(current, current->mm,
+ ((unsigned long) src) & PAGE_MASK,
+ *nr_pages, 1, 0, /* read, force */ pages, NULL);
+ up_read(&current->mm->mmap_sem);
+
+ if (err != *nr_pages) {
+ printk(KERN_ERR PFX "pages requested[%d] !="
+ " pages granted[%d]\n", *nr_pages, err);
+ return err < 0 ? err : -EINVAL;
+
+ }
+ pages_count += err;
+ if (sg_single) {
+ page = pages[0];
+ CDPRINTK(2, KERN_INFO, "single buffer implementation\n");
+ sg_set_page(&sg[0], page, bufsize, offset);
+ return 0;
+ }
+
+ sg_init_table(sg, *nr_pages);
+ for (x = 0; x < *nr_pages; x++) {
+ page = pages[x] ;
+ if (!page || IS_ERR(page)) {
+ printk(KERN_ERR PFX "missing page in "
+ "DumpUserPages %d\n", x);
+ return -EFAULT;
+ }
+ sg_set_page(&sg[x], page, bufsize, offset);
+ rop = PAGE_SIZE - sg[x].offset;
+ if (bufsize > rop) {
+ sg[x].length = rop;
+ bufsize = bufsize - rop;
+ }
+ offset = 0;
+ }
+
+ return 0;
+}
+
+void cryptodev_sg_setbuf(unsigned char *data, size_t bufsize,
+ struct scatterlist *sg, int sg_num)
+{
+ int remainder_of_page;
+ int i = 0;
+
+ sg_init_table(sg, sg_num);
+ while (bufsize > 0 && i < sg_num) {
+ sg_set_buf(&sg[i], data, bufsize);
+ remainder_of_page = PAGE_SIZE - sg[i].offset;
+ if (bufsize > remainder_of_page) {
+ /* the buffer was split over multiple pages */
+ sg[i].length = remainder_of_page;
+ bufsize -= remainder_of_page;
+ data += remainder_of_page;
+ } else {
+ bufsize = 0;
+ }
+ i++;
+ }
+}
+
+/**
+ * Helper Functions for the AEAD mode
+ *
+ */
+static void *aead_alloc_tmp(struct crypto_aead *aead, int sg_size,
+ int ssg_num, int nopin, size_t bufsize)
+{
+ unsigned int len;
+
+ len = sizeof(struct async_result) +
+ (crypto_aead_alignmask(aead) &
+ ~(crypto_tfm_ctx_alignment() - 1));
+ len = ALIGN(len, crypto_tfm_ctx_alignment());
+
+ len += sizeof(struct aead_request) + crypto_aead_reqsize(aead);
+
+ if (nopin) {
+ len = ALIGN(len, __alignof__(struct scatterlist));
+ len += sizeof(struct scatterlist) * ssg_num;
+ len += bufsize;
+
+ return kzalloc(len, GFP_KERNEL);
+ }
+
+ len = ALIGN(len, __alignof__(struct scatterlist));
+ len += sizeof(struct scatterlist) * (sg_size);
+
+ len = ALIGN(len, __alignof__(struct page *));
+ len += sizeof(struct page *) * sg_size;
+
+ return kzalloc(len, GFP_KERNEL);
+}
+
+static inline struct aead_request *aead_result_req(
+ struct crypto_aead *aead,
+ struct async_result *result)
+{
+ struct aead_request *req;
+
+ req = (struct aead_request *) PTR_ALIGN(
+ (unsigned long) result +
+ sizeof(struct async_result),
+ crypto_tfm_ctx_alignment());
+ aead_request_set_tfm(req, aead);
+ return req;
+}
+
+static inline struct scatterlist *aead_req_ssg(struct crypto_aead *aead,
+ struct aead_request *req)
+{
+ return (struct scatterlist *) ALIGN((unsigned long) (req + 1) +
+ crypto_aead_reqsize(aead),
+ __alignof__(struct scatterlist));
+}
+
+static inline char *aead_ssg_data(struct scatterlist *ssg, int ssg_num)
+{
+ return (char *) ((unsigned long) ssg + sizeof(struct scatterlist)
+ * ssg_num);
+}
+
+static inline struct page **aead_ssg_spages(struct scatterlist *sg,
+ int sg_size)
+{
+ return (struct page **) ALIGN((unsigned long) sg +
+ sizeof(struct scatterlist) * sg_size,
+ __alignof__(struct page *));
+}
+
+static inline struct scatterlist *aead_spages_dsg(struct page **pages,
+ int npages)
+{
+ return (struct scatterlist *) ALIGN((unsigned long) pages +
+ sizeof(struct page *) * npages,
+ __alignof__(struct scatterlist));
+}
+
+static inline struct page **aead_dsg_dpages(struct scatterlist *dsg,
+ int sg_size)
+{
+ return (struct page **) ALIGN((unsigned long) dsg +
+ sizeof(struct scatterlist) * sg_size,
+ __alignof__(struct page *));
+}
+int cryptodev_is_pin_pages(int size)
+{
+ int ret;
+ ret = (size <= page_pin_min_size || size >= page_pin_max_size);
+
+ return ret;
+}
+
+/**
+ * This is the actual aead function that implements
+ * the Combined mode
+ *
+ */
+static int cryptodev_run_aead(struct csession *ses_ptr,
+ struct crypto_item_op *cop,
+ struct kiocb *iocb)
+{
+ void *tmp;
+ char __user *src;
+ char __user *dst;
+ char *data = NULL;
+ struct scatterlist *ssg;
+ struct scatterlist *dsg;
+ struct aead_request *req;
+ struct async_result *result = NULL;
+ size_t bufsize, authsize;
+ int nr_spages, nr_dpages = 0;
+ int ssg_num;
+ int enc, ret, dst_flag, nopin; /*Flags*/
+ struct scatterlist adata;
+
+ /* Setting the Input params */
+ bufsize = cop->src_size;
+ src = cop->src_data;
+ dst = cop->dst_data;
+ dst_flag = src != dst;
+ enc = cop->eop == COP_ENCRYPT ? 1 : 0;
+ authsize = crypto_aead_authsize(ses_ptr->tfm_aead);
+
+ ssg_num = cryptodev_num_pages((unsigned long) src,
+ enc ? bufsize + authsize : bufsize);
+
+ if (cop->eop && cop->eop != COP_ENCRYPT && cop->eop != COP_DECRYPT) {
+ printk(KERN_ERR PFX "sid %p invalid operation op=%u\n",
+ ses_ptr, cop->eop);
+ return -EINVAL;
+ }
+
+ if (bufsize > CRYPTO_MAX_DATA_LEN) {
+ printk(KERN_INFO PFX "Maximum Data Size Exceeded: %d > %d\n",
+ bufsize, CRYPTO_MAX_DATA_LEN);
+ return -E2BIG;
+ }
+
+ /* Flag set No Pinning pages, Size too large or too small*/
+ nopin = cryptodev_is_pin_pages(bufsize) ? 1 : 0;
+
+ if (dst_flag) {
+ if (nopin) {
+ nr_spages = nr_dpages = 0;
+ } else {
+ nr_spages = cryptodev_num_pages((unsigned long) src,
+ bufsize);
+ nr_dpages = cryptodev_num_pages((unsigned long) dst,
+ enc ? bufsize + authsize :
+ bufsize - authsize);
+ }
+ } else {
+ if (nopin)
+ nr_spages = 0;
+ else
+ nr_spages = cryptodev_num_pages((unsigned long) src,
+ enc ? bufsize + authsize :
+ bufsize);
+ }
+
+ tmp = aead_alloc_tmp(ses_ptr->tfm_aead,
+ dst_flag ? nr_spages + nr_dpages : nr_spages,
+ ssg_num, nopin, bufsize);
+ if (!tmp)
+ return -ENOMEM;
+
+ result = (struct async_result *) tmp;
+ req = aead_result_req(ses_ptr->tfm_aead, result);
+ ssg = aead_req_ssg(ses_ptr->tfm_aead, req);
+
+ if (nopin) {
+ data = aead_ssg_data(ssg, ssg_num);
+ if (src && copy_from_user(data, src, bufsize)) {
+ printk(KERN_ERR PFX
+ "failed to copy aead "
+ "cop data from user space\n");
+ kfree(tmp);
+ return -EINVAL;
+ }
+ cryptodev_sg_setbuf(data, enc ? bufsize + authsize : bufsize,
+ ssg, ssg_num);
+ dsg = ssg;
+ } else {
+ result->spages = aead_ssg_spages(ssg, nr_spages);
+
+ if (dst_flag) {
+ dsg = aead_spages_dsg(result->spages, nr_spages);
+ result->dpages = aead_dsg_dpages(dsg, nr_dpages);
+ ret = cryptodev_set_user_pages(src, ssg,
+ result->spages, bufsize,
+ &nr_spages,
+ &result->null_buf);
+
+ if (ret)
+ goto out_tmp;
+ ret = cryptodev_set_user_pages(dst, dsg,
+ result->dpages,
+ enc ? bufsize + authsize :
+ bufsize - authsize,
+ &nr_dpages,
+ &result->null_buf);
+ if (ret)
+ goto out_spages;
+ } else {
+ dsg = ssg;
+ result->dpages = result->spages;
+ ret = cryptodev_set_user_pages(src, ssg,
+ result->spages,
+ enc ? bufsize + authsize :
+ bufsize,
+ &nr_spages,
+ &result->null_buf);
+ if (ret)
+ goto out_tmp;
+ }
+ }
+
+ if (iocb) {
+ result->nr_spages = nr_spages;
+ result->nr_dpages = nr_dpages;
+ result->iocbvec = iocb;
+ result->nopin_data = data;
+ result->session_ptr = ses_ptr;
+ result->udata = (void *)cop->udata;
+ result->aio_enc = cop->eop;
+ result->aio_dst_data = dst;
+ result->aio_size_data = enc ? bufsize + authsize :
+ bufsize - authsize;
+ aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ cryptodev_async_aio_complete,
+ result);
+ } else {
+ atomic_set(&result->opcnt, 2);
+ init_completion(&(result->crypto_completion));
+ aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ cryptodev_sync_complete,
+ result);
+ }
+
+ /* Additional Associated data */
+ sg_init_one(&adata, cop->assoc, cop->assoc_len);
+
+ aead_request_set_crypt(req, ssg, dsg, bufsize, cop->iv);
+ aead_request_set_assoc(req, &adata, cop->assoc_len);
+
+ atomic_inc(&ses_ptr->refcnt);
+
+ if (cop->eop == COP_ENCRYPT)
+ ret = crypto_aead_encrypt(req);
+ else
+ ret = crypto_aead_decrypt(req);
+
+ switch (ret) {
+ case 0:
+ if (!iocb)
+ atomic_dec(&result->opcnt);
+ break;
+ case -EINPROGRESS:
+ case -EBUSY:
+ if (iocb) {
+ CDPRINTK(2, KERN_INFO,
+ "Async Call AEAD:Returning Now\n");
+ return -EIOCBQUEUED;
+ }
+ ret = wait_for_completion_interruptible(
+ &result->crypto_completion);
+ if (!ret)
+ ret = result->err;
+ if (!ret) {
+ INIT_COMPLETION(result->crypto_completion);
+ break;
+ }
+ printk(KERN_ERR PFX "sid %p enc/dec failed error %d\n",
+ ses_ptr, -ret);
+ break;
+ default:
+ printk(KERN_ERR PFX "sid %p enc/dec failed error %d\n",
+ ses_ptr, -ret);
+ if (!iocb)
+ atomic_dec(&result->opcnt);
+ break;
+ }
+
+ if (nopin && !ret) {
+ if (copy_to_user(dst, data, enc ? bufsize + authsize :
+ bufsize - authsize))
+ printk(KERN_ERR PFX
+ "failed to copy encrypted data "
+ "to user space\n");
+ CD_HEXDUMP(data, enc ? bufsize + authsize :
+ bufsize - authsize);
+ }
+
+ /* Check if last reference */
+ if (atomic_dec_and_test(&ses_ptr->refcnt))
+ cryptodev_destroy_session(ses_ptr);
+ if (!iocb) {
+ if (atomic_dec_and_test(&result->opcnt))
+ goto out_dpages; /* cleanup */
+ else
+ return ret;
+ }
+out_dpages:
+ if (dst_flag)
+ cryptodev_release_pages(result->dpages, nr_dpages);
+out_spages:
+ cryptodev_release_pages(result->spages, nr_spages);
+out_tmp:
+ cryptodev_destroy_res(result);
+ return ret;
+}
+
+static int cryptodev_user_pka_op(struct csession *ses_ptr, struct kiocb *iocb,
+ const struct iovec *iov,
+ int nr_segs, int rsa_crt)
+{
+ struct async_result *pka_ctx = NULL;
+ u32 *base;
+ u32 *result;
+ void *tmp;
+ int ret;
+ u32 op_id;
+ u32 base_cnt = iov[0].iov_len;
+ u32 result_len, len;
+ void *cb;
+ void *cb_ctx;
+ char __user *dst_data = iov[1].iov_base;
+
+ if(!rsa_crt) {
+ /*This check only for RSA */
+ if (base_cnt != ses_ptr->rsa_key.base_mod_cnt) {
+ if (base_cnt > ses_ptr->rsa_key.base_mod_cnt) {
+ printk(KERN_ERR "base count value "
+ "greater than "
+ "modulus count\n");
+ return -EINVAL;
+ } else
+ base_cnt = ses_ptr->rsa_key.base_mod_cnt;
+ }
+ }
+
+ /* Total length = sizeof ctx +
+ * (input vector size + result vector size) * 4)
+ * The input and result vectors sizes are in 32 bit word size */
+ len = (sizeof(struct async_result) + (base_cnt * 4 * nr_segs));
+ tmp = kzalloc(len, GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ pka_ctx = (struct async_result *)tmp;
+ tmp += sizeof(struct async_result);
+
+ /* Copying the input/base buffer */
+ if (copy_from_user(tmp, iov[0].iov_base, (i